From 90bf01d8b107375d45aee9de79c1760a0363b189 Mon Sep 17 00:00:00 2001 From: Neil Alexander Date: Fri, 15 Jul 2022 16:25:26 +0100 Subject: [PATCH 01/18] Use sync API database in `filterSharedUsers` (#2572) * Add function to the sync API storage package for filtering shared users * Use the database instead of asking the RS API * Fix unit tests * Fix map handling in `filterSharedUsers` --- syncapi/internal/keychange.go | 33 +++++++++--------- syncapi/internal/keychange_test.go | 31 +++++++++++++---- syncapi/storage/interface.go | 7 ++++ .../postgres/current_room_state_table.go | 30 ++++++++++++++++ syncapi/storage/shared/syncserver.go | 4 +++ .../sqlite3/current_room_state_table.go | 34 ++++++++++++++++++- syncapi/storage/tables/interface.go | 2 ++ syncapi/streams/stream_devicelist.go | 2 +- syncapi/sync/requestpool.go | 2 +- 9 files changed, 118 insertions(+), 27 deletions(-) diff --git a/syncapi/internal/keychange.go b/syncapi/internal/keychange.go index d96718d20..03df9285c 100644 --- a/syncapi/internal/keychange.go +++ b/syncapi/internal/keychange.go @@ -21,6 +21,7 @@ import ( keyapi "github.com/matrix-org/dendrite/keyserver/api" keytypes "github.com/matrix-org/dendrite/keyserver/types" roomserverAPI "github.com/matrix-org/dendrite/roomserver/api" + "github.com/matrix-org/dendrite/syncapi/storage" "github.com/matrix-org/dendrite/syncapi/types" "github.com/matrix-org/gomatrixserverlib" "github.com/matrix-org/util" @@ -46,7 +47,7 @@ func DeviceOTKCounts(ctx context.Context, keyAPI keyapi.SyncKeyAPI, userID, devi // was filled in, else false if there are no new device list changes because there is nothing to catch up on. The response MUST // be already filled in with join/leave information. func DeviceListCatchup( - ctx context.Context, keyAPI keyapi.SyncKeyAPI, rsAPI roomserverAPI.SyncRoomserverAPI, + ctx context.Context, db storage.SharedUsers, keyAPI keyapi.SyncKeyAPI, rsAPI roomserverAPI.SyncRoomserverAPI, userID string, res *types.Response, from, to types.StreamPosition, ) (newPos types.StreamPosition, hasNew bool, err error) { @@ -93,7 +94,7 @@ func DeviceListCatchup( queryRes.UserIDs = append(queryRes.UserIDs, leaveUserIDs...) queryRes.UserIDs = util.UniqueStrings(queryRes.UserIDs) var sharedUsersMap map[string]int - sharedUsersMap, queryRes.UserIDs = filterSharedUsers(ctx, rsAPI, userID, queryRes.UserIDs) + sharedUsersMap, queryRes.UserIDs = filterSharedUsers(ctx, db, userID, queryRes.UserIDs) util.GetLogger(ctx).Debugf( "QueryKeyChanges request off=%d,to=%d response off=%d uids=%v", offset, toOffset, queryRes.Offset, queryRes.UserIDs, @@ -215,30 +216,28 @@ func TrackChangedUsers( return changed, left, nil } +// filterSharedUsers takes a list of remote users whose keys have changed and filters +// it down to include only users who the requesting user shares a room with. func filterSharedUsers( - ctx context.Context, rsAPI roomserverAPI.SyncRoomserverAPI, userID string, usersWithChangedKeys []string, + ctx context.Context, db storage.SharedUsers, userID string, usersWithChangedKeys []string, ) (map[string]int, []string) { - var result []string - var sharedUsersRes roomserverAPI.QuerySharedUsersResponse - err := rsAPI.QuerySharedUsers(ctx, &roomserverAPI.QuerySharedUsersRequest{ - UserID: userID, - OtherUserIDs: usersWithChangedKeys, - }, &sharedUsersRes) + sharedUsersMap := make(map[string]int, len(usersWithChangedKeys)) + for _, userID := range usersWithChangedKeys { + sharedUsersMap[userID] = 0 + } + sharedUsers, err := db.SharedUsers(ctx, userID, usersWithChangedKeys) if err != nil { // default to all users so we do needless queries rather than miss some important device update return nil, usersWithChangedKeys } + for _, userID := range sharedUsers { + sharedUsersMap[userID]++ + } // We forcibly put ourselves in this list because we should be notified about our own device updates // and if we are in 0 rooms then we don't technically share any room with ourselves so we wouldn't // be notified about key changes. - sharedUsersRes.UserIDsToCount[userID] = 1 - - for _, uid := range usersWithChangedKeys { - if sharedUsersRes.UserIDsToCount[uid] > 0 { - result = append(result, uid) - } - } - return sharedUsersRes.UserIDsToCount, result + sharedUsersMap[userID] = 1 + return sharedUsersMap, sharedUsers } func joinedRooms(res *types.Response, userID string) []string { diff --git a/syncapi/internal/keychange_test.go b/syncapi/internal/keychange_test.go index 219b35e2c..79ed440e7 100644 --- a/syncapi/internal/keychange_test.go +++ b/syncapi/internal/keychange_test.go @@ -11,6 +11,7 @@ import ( "github.com/matrix-org/dendrite/syncapi/types" userapi "github.com/matrix-org/dendrite/userapi/api" "github.com/matrix-org/gomatrixserverlib" + "github.com/matrix-org/util" ) var ( @@ -105,6 +106,22 @@ func (s *mockRoomserverAPI) QuerySharedUsers(ctx context.Context, req *api.Query return nil } +// This is actually a database function, but seeing as we track the state inside the +// *mockRoomserverAPI, we'll just comply with the interface here instead. +func (s *mockRoomserverAPI) SharedUsers(ctx context.Context, userID string, otherUserIDs []string) ([]string, error) { + commonUsers := []string{} + for _, members := range s.roomIDToJoinedMembers { + for _, member := range members { + for _, userID := range otherUserIDs { + if member == userID { + commonUsers = append(commonUsers, userID) + } + } + } + } + return util.UniqueStrings(commonUsers), nil +} + type wantCatchup struct { hasNew bool changed []string @@ -178,7 +195,7 @@ func TestKeyChangeCatchupOnJoinShareNewUser(t *testing.T) { "!another:room": {syncingUser}, }, } - _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) + _, hasNew, err := DeviceListCatchup(context.Background(), rsAPI, &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) if err != nil { t.Fatalf("DeviceListCatchup returned an error: %s", err) } @@ -201,7 +218,7 @@ func TestKeyChangeCatchupOnLeaveShareLeftUser(t *testing.T) { "!another:room": {syncingUser}, }, } - _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) + _, hasNew, err := DeviceListCatchup(context.Background(), rsAPI, &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) if err != nil { t.Fatalf("DeviceListCatchup returned an error: %s", err) } @@ -224,7 +241,7 @@ func TestKeyChangeCatchupOnJoinShareNoNewUsers(t *testing.T) { "!another:room": {syncingUser, existingUser}, }, } - _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) + _, hasNew, err := DeviceListCatchup(context.Background(), rsAPI, &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) if err != nil { t.Fatalf("Catchup returned an error: %s", err) } @@ -246,7 +263,7 @@ func TestKeyChangeCatchupOnLeaveShareNoUsers(t *testing.T) { "!another:room": {syncingUser, existingUser}, }, } - _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) + _, hasNew, err := DeviceListCatchup(context.Background(), rsAPI, &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) if err != nil { t.Fatalf("DeviceListCatchup returned an error: %s", err) } @@ -305,7 +322,7 @@ func TestKeyChangeCatchupNoNewJoinsButMessages(t *testing.T) { roomID: {syncingUser, existingUser}, }, } - _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) + _, hasNew, err := DeviceListCatchup(context.Background(), rsAPI, &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) if err != nil { t.Fatalf("DeviceListCatchup returned an error: %s", err) } @@ -333,7 +350,7 @@ func TestKeyChangeCatchupChangeAndLeft(t *testing.T) { "!another:room": {syncingUser}, }, } - _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) + _, hasNew, err := DeviceListCatchup(context.Background(), rsAPI, &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) if err != nil { t.Fatalf("Catchup returned an error: %s", err) } @@ -419,7 +436,7 @@ func TestKeyChangeCatchupChangeAndLeftSameRoom(t *testing.T) { }, } _, hasNew, err := DeviceListCatchup( - context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken, + context.Background(), rsAPI, &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken, ) if err != nil { t.Fatalf("DeviceListCatchup returned an error: %s", err) diff --git a/syncapi/storage/interface.go b/syncapi/storage/interface.go index 5a036d889..055426030 100644 --- a/syncapi/storage/interface.go +++ b/syncapi/storage/interface.go @@ -27,6 +27,8 @@ import ( type Database interface { Presence + SharedUsers + MaxStreamPositionForPDUs(ctx context.Context) (types.StreamPosition, error) MaxStreamPositionForReceipts(ctx context.Context) (types.StreamPosition, error) MaxStreamPositionForInvites(ctx context.Context) (types.StreamPosition, error) @@ -165,3 +167,8 @@ type Presence interface { PresenceAfter(ctx context.Context, after types.StreamPosition, filter gomatrixserverlib.EventFilter) (map[string]*types.PresenceInternal, error) MaxStreamPositionForPresence(ctx context.Context) (types.StreamPosition, error) } + +type SharedUsers interface { + // SharedUsers returns a subset of otherUserIDs that share a room with userID. + SharedUsers(ctx context.Context, userID string, otherUserIDs []string) ([]string, error) +} diff --git a/syncapi/storage/postgres/current_room_state_table.go b/syncapi/storage/postgres/current_room_state_table.go index 8ee387b39..c4667bafd 100644 --- a/syncapi/storage/postgres/current_room_state_table.go +++ b/syncapi/storage/postgres/current_room_state_table.go @@ -107,6 +107,11 @@ const selectEventsWithEventIDsSQL = "" + "SELECT event_id, added_at, headered_event_json, 0 AS session_id, false AS exclude_from_sync, '' AS transaction_id" + " FROM syncapi_current_room_state WHERE event_id = ANY($1)" +const selectSharedUsersSQL = "" + + "SELECT state_key FROM syncapi_current_room_state WHERE room_id = ANY(" + + " SELECT room_id FROM syncapi_current_room_state WHERE state_key = $1 AND membership='join'" + + ") AND state_key = ANY($2) AND membership='join';" + type currentRoomStateStatements struct { upsertRoomStateStmt *sql.Stmt deleteRoomStateByEventIDStmt *sql.Stmt @@ -118,6 +123,7 @@ type currentRoomStateStatements struct { selectJoinedUsersInRoomStmt *sql.Stmt selectEventsWithEventIDsStmt *sql.Stmt selectStateEventStmt *sql.Stmt + selectSharedUsersStmt *sql.Stmt } func NewPostgresCurrentRoomStateTable(db *sql.DB) (tables.CurrentRoomState, error) { @@ -156,6 +162,9 @@ func NewPostgresCurrentRoomStateTable(db *sql.DB) (tables.CurrentRoomState, erro if s.selectStateEventStmt, err = db.Prepare(selectStateEventSQL); err != nil { return nil, err } + if s.selectSharedUsersStmt, err = db.Prepare(selectSharedUsersSQL); err != nil { + return nil, err + } return s, nil } @@ -379,3 +388,24 @@ func (s *currentRoomStateStatements) SelectStateEvent( } return &ev, err } + +func (s *currentRoomStateStatements) SelectSharedUsers( + ctx context.Context, txn *sql.Tx, userID string, otherUserIDs []string, +) ([]string, error) { + stmt := sqlutil.TxStmt(txn, s.selectSharedUsersStmt) + rows, err := stmt.QueryContext(ctx, userID, otherUserIDs) + if err != nil { + return nil, err + } + defer internal.CloseAndLogIfError(ctx, rows, "selectSharedUsersStmt: rows.close() failed") + + var stateKey string + result := make([]string, 0, len(otherUserIDs)) + for rows.Next() { + if err := rows.Scan(&stateKey); err != nil { + return nil, err + } + result = append(result, stateKey) + } + return result, rows.Err() +} diff --git a/syncapi/storage/shared/syncserver.go b/syncapi/storage/shared/syncserver.go index 76114aff8..d1c5e2d16 100644 --- a/syncapi/storage/shared/syncserver.go +++ b/syncapi/storage/shared/syncserver.go @@ -176,6 +176,10 @@ func (d *Database) AllPeekingDevicesInRooms(ctx context.Context) (map[string][]t return d.Peeks.SelectPeekingDevices(ctx) } +func (d *Database) SharedUsers(ctx context.Context, userID string, otherUserIDs []string) ([]string, error) { + return d.CurrentRoomState.SelectSharedUsers(ctx, nil, userID, otherUserIDs) +} + func (d *Database) GetStateEvent( ctx context.Context, roomID, evType, stateKey string, ) (*gomatrixserverlib.HeaderedEvent, error) { diff --git a/syncapi/storage/sqlite3/current_room_state_table.go b/syncapi/storage/sqlite3/current_room_state_table.go index f0a1c7bb7..376c3a3d2 100644 --- a/syncapi/storage/sqlite3/current_room_state_table.go +++ b/syncapi/storage/sqlite3/current_room_state_table.go @@ -91,6 +91,11 @@ const selectEventsWithEventIDsSQL = "" + "SELECT event_id, added_at, headered_event_json, 0 AS session_id, false AS exclude_from_sync, '' AS transaction_id" + " FROM syncapi_current_room_state WHERE event_id IN ($1)" +const selectSharedUsersSQL = "" + + "SELECT state_key FROM syncapi_current_room_state WHERE room_id = ANY(" + + " SELECT room_id FROM syncapi_current_room_state WHERE state_key = $1 AND membership='join'" + + ") AND state_key IN ($2) AND membership='join';" + type currentRoomStateStatements struct { db *sql.DB streamIDStatements *StreamIDStatements @@ -100,8 +105,9 @@ type currentRoomStateStatements struct { selectRoomIDsWithMembershipStmt *sql.Stmt selectRoomIDsWithAnyMembershipStmt *sql.Stmt selectJoinedUsersStmt *sql.Stmt - //selectJoinedUsersInRoomStmt *sql.Stmt - prepared at runtime due to variadic + //selectJoinedUsersInRoomStmt *sql.Stmt - prepared at runtime due to variadic selectStateEventStmt *sql.Stmt + //selectSharedUsersSQL *sql.Stmt - prepared at runtime due to variadic } func NewSqliteCurrentRoomStateTable(db *sql.DB, streamID *StreamIDStatements) (tables.CurrentRoomState, error) { @@ -396,3 +402,29 @@ func (s *currentRoomStateStatements) SelectStateEvent( } return &ev, err } + +func (s *currentRoomStateStatements) SelectSharedUsers( + ctx context.Context, txn *sql.Tx, userID string, otherUserIDs []string, +) ([]string, error) { + query := strings.Replace(selectSharedUsersSQL, "($2)", sqlutil.QueryVariadicOffset(len(otherUserIDs), 1), 1) + stmt, err := s.db.Prepare(query) + if err != nil { + return nil, fmt.Errorf("SelectSharedUsers s.db.Prepare: %w", err) + } + defer internal.CloseAndLogIfError(ctx, stmt, "SelectSharedUsers: stmt.close() failed") + rows, err := sqlutil.TxStmt(txn, stmt).QueryContext(ctx, userID, otherUserIDs) + if err != nil { + return nil, err + } + defer internal.CloseAndLogIfError(ctx, rows, "selectSharedUsersStmt: rows.close() failed") + + var stateKey string + result := make([]string, 0, len(otherUserIDs)) + for rows.Next() { + if err := rows.Scan(&stateKey); err != nil { + return nil, err + } + result = append(result, stateKey) + } + return result, rows.Err() +} diff --git a/syncapi/storage/tables/interface.go b/syncapi/storage/tables/interface.go index ccdebfdbd..08568d9ac 100644 --- a/syncapi/storage/tables/interface.go +++ b/syncapi/storage/tables/interface.go @@ -104,6 +104,8 @@ type CurrentRoomState interface { SelectJoinedUsers(ctx context.Context) (map[string][]string, error) // SelectJoinedUsersInRoom returns a map of room ID to a list of joined user IDs for a given room. SelectJoinedUsersInRoom(ctx context.Context, roomIDs []string) (map[string][]string, error) + // SelectSharedUsers returns a subset of otherUserIDs that share a room with userID. + SelectSharedUsers(ctx context.Context, txn *sql.Tx, userID string, otherUserIDs []string) ([]string, error) } // BackwardsExtremities keeps track of backwards extremities for a room. diff --git a/syncapi/streams/stream_devicelist.go b/syncapi/streams/stream_devicelist.go index f42099510..5448ee5bd 100644 --- a/syncapi/streams/stream_devicelist.go +++ b/syncapi/streams/stream_devicelist.go @@ -28,7 +28,7 @@ func (p *DeviceListStreamProvider) IncrementalSync( from, to types.StreamPosition, ) types.StreamPosition { var err error - to, _, err = internal.DeviceListCatchup(context.Background(), p.keyAPI, p.rsAPI, req.Device.UserID, req.Response, from, to) + to, _, err = internal.DeviceListCatchup(context.Background(), p.DB, p.keyAPI, p.rsAPI, req.Device.UserID, req.Response, from, to) if err != nil { req.Log.WithError(err).Error("internal.DeviceListCatchup failed") return from diff --git a/syncapi/sync/requestpool.go b/syncapi/sync/requestpool.go index 6f0849e08..b6b4779a5 100644 --- a/syncapi/sync/requestpool.go +++ b/syncapi/sync/requestpool.go @@ -429,7 +429,7 @@ func (rp *RequestPool) OnIncomingKeyChangeRequest(req *http.Request, device *use } rp.streams.PDUStreamProvider.IncrementalSync(req.Context(), syncReq, fromToken.PDUPosition, toToken.PDUPosition) _, _, err = internal.DeviceListCatchup( - req.Context(), rp.keyAPI, rp.rsAPI, syncReq.Device.UserID, + req.Context(), rp.db, rp.keyAPI, rp.rsAPI, syncReq.Device.UserID, syncReq.Response, fromToken.DeviceListPosition, toToken.DeviceListPosition, ) if err != nil { From 4ff57993abc7066c52d979617a81af45629dd611 Mon Sep 17 00:00:00 2001 From: andreever <52261463+andreever@users.noreply.github.com> Date: Fri, 15 Jul 2022 17:33:05 +0200 Subject: [PATCH 02/18] Update 1_createusers.md (#2571) * Update 1_createusers.md Added description on how to create user accounts when running in docker. * Update 1_createusers.md Co-authored-by: Neil Alexander --- docs/administration/1_createusers.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/administration/1_createusers.md b/docs/administration/1_createusers.md index f40b7f576..61ec2299b 100644 --- a/docs/administration/1_createusers.md +++ b/docs/administration/1_createusers.md @@ -32,6 +32,15 @@ To create a new **admin account**, add the `-admin` flag: ./bin/create-account -config /path/to/dendrite.yaml -username USERNAME -admin ``` +An example of using `create-account` when running in **Docker**, having found the `CONTAINERNAME` from `docker ps`: + +```bash +docker exec -it CONTAINERNAME /usr/bin/create-account -config /path/to/dendrite.yaml -username USERNAME +``` +```bash +docker exec -it CONTAINERNAME /usr/bin/create-account -config /path/to/dendrite.yaml -username USERNAME -admin +``` + ## Using shared secret registration Dendrite supports the Synapse-compatible shared secret registration endpoint. From c0c909d30642d03600fd48bd052a22040a67267b Mon Sep 17 00:00:00 2001 From: Maximilian Gaedig <38767445+MaximilianGaedig@users.noreply.github.com> Date: Mon, 18 Jul 2022 00:01:52 +0200 Subject: [PATCH 03/18] Fix connection_string format in dendrite-sample.polylith.yaml (#2574) --- dendrite-sample.polylith.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/dendrite-sample.polylith.yaml b/dendrite-sample.polylith.yaml index ea3d9d689..723547f55 100644 --- a/dendrite-sample.polylith.yaml +++ b/dendrite-sample.polylith.yaml @@ -125,7 +125,7 @@ app_service_api: # Database configuration for this component. database: - connection_string: postgresql://username@password:hostname/dendrite_appservice?sslmode=disable + connection_string: postgresql://username:password@hostname/dendrite_appservice?sslmode=disable max_open_conns: 10 max_idle_conns: 2 conn_max_lifetime: -1 @@ -199,7 +199,7 @@ federation_api: external_api: listen: http://[::]:8072 database: - connection_string: postgresql://username@password:hostname/dendrite_federationapi?sslmode=disable + connection_string: postgresql://username:password@hostname/dendrite_federationapi?sslmode=disable max_open_conns: 10 max_idle_conns: 2 conn_max_lifetime: -1 @@ -236,7 +236,7 @@ key_server: listen: http://[::]:7779 # The listen address for incoming API requests connect: http://key_server:7779 # The connect address for other components to use database: - connection_string: postgresql://username@password:hostname/dendrite_keyserver?sslmode=disable + connection_string: postgresql://username:password@hostname/dendrite_keyserver?sslmode=disable max_open_conns: 10 max_idle_conns: 2 conn_max_lifetime: -1 @@ -249,7 +249,7 @@ media_api: external_api: listen: http://[::]:8074 database: - connection_string: postgresql://username@password:hostname/dendrite_mediaapi?sslmode=disable + connection_string: postgresql://username:password@hostname/dendrite_mediaapi?sslmode=disable max_open_conns: 5 max_idle_conns: 2 conn_max_lifetime: -1 @@ -286,7 +286,7 @@ mscs: # - msc2836 # (Threading, see https://github.com/matrix-org/matrix-doc/pull/2836) # - msc2946 # (Spaces Summary, see https://github.com/matrix-org/matrix-doc/pull/2946) database: - connection_string: postgresql://username@password:hostname/dendrite_mscs?sslmode=disable + connection_string: postgresql://username:password@hostname/dendrite_mscs?sslmode=disable max_open_conns: 5 max_idle_conns: 2 conn_max_lifetime: -1 @@ -297,7 +297,7 @@ room_server: listen: http://[::]:7770 # The listen address for incoming API requests connect: http://room_server:7770 # The connect address for other components to use database: - connection_string: postgresql://username@password:hostname/dendrite_roomserver?sslmode=disable + connection_string: postgresql://username:password@hostname/dendrite_roomserver?sslmode=disable max_open_conns: 10 max_idle_conns: 2 conn_max_lifetime: -1 @@ -310,7 +310,7 @@ sync_api: external_api: listen: http://[::]:8073 database: - connection_string: postgresql://username@password:hostname/dendrite_syncapi?sslmode=disable + connection_string: postgresql://username:password@hostname/dendrite_syncapi?sslmode=disable max_open_conns: 10 max_idle_conns: 2 conn_max_lifetime: -1 @@ -326,7 +326,7 @@ user_api: listen: http://[::]:7781 # The listen address for incoming API requests connect: http://user_api:7781 # The connect address for other components to use account_database: - connection_string: postgresql://username@password:hostname/dendrite_userapi?sslmode=disable + connection_string: postgresql://username:password@hostname/dendrite_userapi?sslmode=disable max_open_conns: 10 max_idle_conns: 2 conn_max_lifetime: -1 From a7e92f8cb99105c2a579f73fbc972b7fb0b35678 Mon Sep 17 00:00:00 2001 From: Till <2353100+S7evinK@users.noreply.github.com> Date: Mon, 18 Jul 2022 14:46:15 +0200 Subject: [PATCH 04/18] History visibility database changes (#2533) * Add new history_visibility column * Update SQL queries to include history_visibility * Store the history visibilty calculated by the roomserver * Update GMSL * Update migrations * Fix migration * Update GMSL * Fix `go.sum` * Update GMSL to use sql.Scanner & sql.Valuer * Re-order migration/table creation * Update gomatrixserverlib * Add history_visibility column to current_room_state * Fix migrations * Return error instead of Fatal log Co-authored-by: Neil Alexander --- go.mod | 2 +- go.sum | 4 +- syncapi/consumers/roomserver.go | 4 +- syncapi/routing/messages.go | 1 + syncapi/storage/interface.go | 4 +- .../postgres/current_room_state_table.go | 12 +-- ...2061412000000_history_visibility_column.go | 50 +++++++++++ .../postgres/output_room_events_table.go | 82 ++++++++++-------- syncapi/storage/postgres/syncserver.go | 24 ++++-- syncapi/storage/shared/syncserver.go | 7 +- .../sqlite3/current_room_state_table.go | 12 +-- ...2061412000000_history_visibility_column.go | 82 ++++++++++++++++++ .../sqlite3/output_room_events_table.go | 83 +++++++++++-------- syncapi/storage/sqlite3/syncserver.go | 24 ++++-- syncapi/storage/storage_test.go | 2 +- syncapi/storage/tables/interface.go | 9 +- .../storage/tables/output_room_events_test.go | 4 +- 17 files changed, 299 insertions(+), 107 deletions(-) create mode 100644 syncapi/storage/postgres/deltas/2022061412000000_history_visibility_column.go create mode 100644 syncapi/storage/sqlite3/deltas/2022061412000000_history_visibility_column.go diff --git a/go.mod b/go.mod index 1e62e1d93..0a965ec8e 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91 github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 - github.com/matrix-org/gomatrixserverlib v0.0.0-20220713083127-fc2ea1e62e46 + github.com/matrix-org/gomatrixserverlib v0.0.0-20220718085240-f08f98af7d2d github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 github.com/mattn/go-sqlite3 v1.14.13 diff --git a/go.sum b/go.sum index 86656ccd5..df3feeb6e 100644 --- a/go.sum +++ b/go.sum @@ -341,8 +341,8 @@ github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91/go.mod h1 github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26/go.mod h1:3fxX6gUjWyI/2Bt7J1OLhpCzOfO/bB3AiX0cJtEKud0= github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 h1:ZtO5uywdd5dLDCud4r0r55eP4j9FuUNpl60Gmntcop4= github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s= -github.com/matrix-org/gomatrixserverlib v0.0.0-20220713083127-fc2ea1e62e46 h1:5X/kXY3nwqKOwwrE9tnMKrjbsi3PHigQYvrvDBSntO8= -github.com/matrix-org/gomatrixserverlib v0.0.0-20220713083127-fc2ea1e62e46/go.mod h1:jX38yp3SSLJNftBg3PXU1ayd0PCLIiDHQ4xAc9DIixk= +github.com/matrix-org/gomatrixserverlib v0.0.0-20220718085240-f08f98af7d2d h1:BWInUURXVOW+OiifMapoRIS7i122KWdEKj6fnDFXgBo= +github.com/matrix-org/gomatrixserverlib v0.0.0-20220718085240-f08f98af7d2d/go.mod h1:jX38yp3SSLJNftBg3PXU1ayd0PCLIiDHQ4xAc9DIixk= github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a h1:DdG8vXMlZ65EAtc4V+3t7zHZ2Gqs24pSnyXS+4BRHUs= github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a/go.mod h1:ulJzsVOTssIVp1j/m5eI//4VpAGDkMt5NrRuAVX7wpc= github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U= diff --git a/syncapi/consumers/roomserver.go b/syncapi/consumers/roomserver.go index f0ca2106f..328e1e9dd 100644 --- a/syncapi/consumers/roomserver.go +++ b/syncapi/consumers/roomserver.go @@ -240,6 +240,7 @@ func (s *OutputRoomEventConsumer) onNewRoomEvent( msg.RemovesStateEventIDs, msg.TransactionID, false, + msg.HistoryVisibility, ) if err != nil { // panic rather than continue with an inconsistent database @@ -289,7 +290,8 @@ func (s *OutputRoomEventConsumer) onOldRoomEvent( []string{}, // adds no state []string{}, // removes no state nil, // no transaction - ev.StateKey() != nil, // exclude from sync? + ev.StateKey() != nil, // exclude from sync?, + msg.HistoryVisibility, ) if err != nil { // panic rather than continue with an inconsistent database diff --git a/syncapi/routing/messages.go b/syncapi/routing/messages.go index 24745cd55..990ca55b1 100644 --- a/syncapi/routing/messages.go +++ b/syncapi/routing/messages.go @@ -594,6 +594,7 @@ func (r *messagesReq) backfill(roomID string, backwardsExtremities map[string][] []string{}, []string{}, nil, true, + gomatrixserverlib.HistoryVisibilityShared, ) if err != nil { return nil, err diff --git a/syncapi/storage/interface.go b/syncapi/storage/interface.go index 055426030..9751670b2 100644 --- a/syncapi/storage/interface.go +++ b/syncapi/storage/interface.go @@ -69,7 +69,9 @@ type Database interface { // when generating the sync stream position for this event. Returns the sync stream position for the inserted event. // Returns an error if there was a problem inserting this event. WriteEvent(ctx context.Context, ev *gomatrixserverlib.HeaderedEvent, addStateEvents []*gomatrixserverlib.HeaderedEvent, - addStateEventIDs []string, removeStateEventIDs []string, transactionID *api.TransactionID, excludeFromSync bool) (types.StreamPosition, error) + addStateEventIDs []string, removeStateEventIDs []string, transactionID *api.TransactionID, excludeFromSync bool, + historyVisibility gomatrixserverlib.HistoryVisibility, + ) (types.StreamPosition, error) // PurgeRoomState completely purges room state from the sync API. This is done when // receiving an output event that completely resets the state. PurgeRoomState(ctx context.Context, roomID string) error diff --git a/syncapi/storage/postgres/current_room_state_table.go b/syncapi/storage/postgres/current_room_state_table.go index c4667bafd..aae2d8c33 100644 --- a/syncapi/storage/postgres/current_room_state_table.go +++ b/syncapi/storage/postgres/current_room_state_table.go @@ -51,6 +51,7 @@ CREATE TABLE IF NOT EXISTS syncapi_current_room_state ( -- The serial ID of the output_room_events table when this event became -- part of the current state of the room. added_at BIGINT, + history_visibility SMALLINT NOT NULL DEFAULT 2, -- Clobber based on 3-uple of room_id, type and state_key CONSTRAINT syncapi_room_state_unique UNIQUE (room_id, type, state_key) ); @@ -63,8 +64,8 @@ CREATE UNIQUE INDEX IF NOT EXISTS syncapi_current_room_state_eventid_idx ON sync ` const upsertRoomStateSQL = "" + - "INSERT INTO syncapi_current_room_state (room_id, event_id, type, sender, contains_url, state_key, headered_event_json, membership, added_at)" + - " VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)" + + "INSERT INTO syncapi_current_room_state (room_id, event_id, type, sender, contains_url, state_key, headered_event_json, membership, added_at, history_visibility)" + + " VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)" + " ON CONFLICT ON CONSTRAINT syncapi_room_state_unique" + " DO UPDATE SET event_id = $2, sender=$4, contains_url=$5, headered_event_json = $7, membership = $8, added_at = $9" @@ -100,11 +101,11 @@ const selectStateEventSQL = "" + "SELECT headered_event_json FROM syncapi_current_room_state WHERE room_id = $1 AND type = $2 AND state_key = $3" const selectEventsWithEventIDsSQL = "" + - // TODO: The session_id and transaction_id blanks are here because otherwise - // the rowsToStreamEvents expects there to be exactly six columns. We need to + // TODO: The session_id and transaction_id blanks are here because + // the rowsToStreamEvents expects there to be exactly seven columns. We need to // figure out if these really need to be in the DB, and if so, we need a // better permanent fix for this. - neilalexander, 2 Jan 2020 - "SELECT event_id, added_at, headered_event_json, 0 AS session_id, false AS exclude_from_sync, '' AS transaction_id" + + "SELECT event_id, added_at, headered_event_json, 0 AS session_id, false AS exclude_from_sync, '' AS transaction_id, history_visibility" + " FROM syncapi_current_room_state WHERE event_id = ANY($1)" const selectSharedUsersSQL = "" + @@ -336,6 +337,7 @@ func (s *currentRoomStateStatements) UpsertRoomState( headeredJSON, membership, addedAt, + event.Visibility, ) return err } diff --git a/syncapi/storage/postgres/deltas/2022061412000000_history_visibility_column.go b/syncapi/storage/postgres/deltas/2022061412000000_history_visibility_column.go new file mode 100644 index 000000000..2b4b65287 --- /dev/null +++ b/syncapi/storage/postgres/deltas/2022061412000000_history_visibility_column.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package deltas + +import ( + "database/sql" + "fmt" + + "github.com/matrix-org/dendrite/internal/sqlutil" +) + +func LoadAddHistoryVisibilityColumn(m *sqlutil.Migrations) { + m.AddMigration(UpAddHistoryVisibilityColumn, DownAddHistoryVisibilityColumn) +} + +func UpAddHistoryVisibilityColumn(tx *sql.Tx) error { + _, err := tx.Exec(` + ALTER TABLE syncapi_output_room_events ADD COLUMN IF NOT EXISTS history_visibility SMALLINT NOT NULL DEFAULT 2; + UPDATE syncapi_output_room_events SET history_visibility = 4 WHERE type IN ('m.room.message', 'm.room.encrypted'); + ALTER TABLE syncapi_current_room_state ADD COLUMN IF NOT EXISTS history_visibility SMALLINT NOT NULL DEFAULT 2; + UPDATE syncapi_current_room_state SET history_visibility = 4 WHERE type IN ('m.room.message', 'm.room.encrypted'); + `) + if err != nil { + return fmt.Errorf("failed to execute upgrade: %w", err) + } + return nil +} + +func DownAddHistoryVisibilityColumn(tx *sql.Tx) error { + _, err := tx.Exec(` + ALTER TABLE syncapi_output_room_events DROP COLUMN IF EXISTS history_visibility; + ALTER TABLE syncapi_current_room_state DROP COLUMN IF EXISTS history_visibility; + `) + if err != nil { + return fmt.Errorf("failed to execute downgrade: %w", err) + } + return nil +} diff --git a/syncapi/storage/postgres/output_room_events_table.go b/syncapi/storage/postgres/output_room_events_table.go index d84d0cfa2..d0474d4c3 100644 --- a/syncapi/storage/postgres/output_room_events_table.go +++ b/syncapi/storage/postgres/output_room_events_table.go @@ -67,7 +67,9 @@ CREATE TABLE IF NOT EXISTS syncapi_output_room_events ( -- events retrieved through backfilling that have a position in the stream -- that relates to the moment these were retrieved rather than the moment these -- were emitted. - exclude_from_sync BOOL DEFAULT FALSE + exclude_from_sync BOOL DEFAULT FALSE, + -- The history visibility before this event (1 - world_readable; 2 - shared; 3 - invited; 4 - joined) + history_visibility SMALLINT NOT NULL DEFAULT 2 ); CREATE INDEX IF NOT EXISTS syncapi_output_room_events_type_idx ON syncapi_output_room_events (type); @@ -78,16 +80,16 @@ CREATE INDEX IF NOT EXISTS syncapi_output_room_events_exclude_from_sync_idx ON s const insertEventSQL = "" + "INSERT INTO syncapi_output_room_events (" + - "room_id, event_id, headered_event_json, type, sender, contains_url, add_state_ids, remove_state_ids, session_id, transaction_id, exclude_from_sync" + - ") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) " + + "room_id, event_id, headered_event_json, type, sender, contains_url, add_state_ids, remove_state_ids, session_id, transaction_id, exclude_from_sync, history_visibility" + + ") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) " + "ON CONFLICT ON CONSTRAINT syncapi_event_id_idx DO UPDATE SET exclude_from_sync = (excluded.exclude_from_sync AND $11) " + "RETURNING id" const selectEventsSQL = "" + - "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events WHERE event_id = ANY($1)" + "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id, history_visibility FROM syncapi_output_room_events WHERE event_id = ANY($1)" const selectEventsWithFilterSQL = "" + - "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events WHERE event_id = ANY($1)" + + "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id, history_visibility FROM syncapi_output_room_events WHERE event_id = ANY($1)" + " AND ( $2::text[] IS NULL OR sender = ANY($2) )" + " AND ( $3::text[] IS NULL OR NOT(sender = ANY($3)) )" + " AND ( $4::text[] IS NULL OR type LIKE ANY($4) )" + @@ -96,7 +98,7 @@ const selectEventsWithFilterSQL = "" + " LIMIT $7" const selectRecentEventsSQL = "" + - "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events" + + "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id, history_visibility FROM syncapi_output_room_events" + " WHERE room_id = $1 AND id > $2 AND id <= $3" + " AND ( $4::text[] IS NULL OR sender = ANY($4) )" + " AND ( $5::text[] IS NULL OR NOT(sender = ANY($5)) )" + @@ -105,7 +107,7 @@ const selectRecentEventsSQL = "" + " ORDER BY id DESC LIMIT $8" const selectRecentEventsForSyncSQL = "" + - "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events" + + "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id, history_visibility FROM syncapi_output_room_events" + " WHERE room_id = $1 AND id > $2 AND id <= $3 AND exclude_from_sync = FALSE" + " AND ( $4::text[] IS NULL OR sender = ANY($4) )" + " AND ( $5::text[] IS NULL OR NOT(sender = ANY($5)) )" + @@ -114,7 +116,7 @@ const selectRecentEventsForSyncSQL = "" + " ORDER BY id DESC LIMIT $8" const selectEarlyEventsSQL = "" + - "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events" + + "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id, history_visibility FROM syncapi_output_room_events" + " WHERE room_id = $1 AND id > $2 AND id <= $3" + " AND ( $4::text[] IS NULL OR sender = ANY($4) )" + " AND ( $5::text[] IS NULL OR NOT(sender = ANY($5)) )" + @@ -130,7 +132,7 @@ const updateEventJSONSQL = "" + // In order for us to apply the state updates correctly, rows need to be ordered in the order they were received (id). const selectStateInRangeSQL = "" + - "SELECT event_id, id, headered_event_json, exclude_from_sync, add_state_ids, remove_state_ids" + + "SELECT event_id, id, headered_event_json, exclude_from_sync, add_state_ids, remove_state_ids, history_visibility" + " FROM syncapi_output_room_events" + " WHERE (id > $1 AND id <= $2) AND (add_state_ids IS NOT NULL OR remove_state_ids IS NOT NULL)" + " AND room_id = ANY($3)" + @@ -146,10 +148,10 @@ const deleteEventsForRoomSQL = "" + "DELETE FROM syncapi_output_room_events WHERE room_id = $1" const selectContextEventSQL = "" + - "SELECT id, headered_event_json FROM syncapi_output_room_events WHERE room_id = $1 AND event_id = $2" + "SELECT id, headered_event_json, history_visibility FROM syncapi_output_room_events WHERE room_id = $1 AND event_id = $2" const selectContextBeforeEventSQL = "" + - "SELECT headered_event_json FROM syncapi_output_room_events WHERE room_id = $1 AND id < $2" + + "SELECT headered_event_json, history_visibility FROM syncapi_output_room_events WHERE room_id = $1 AND id < $2" + " AND ( $4::text[] IS NULL OR sender = ANY($4) )" + " AND ( $5::text[] IS NULL OR NOT(sender = ANY($5)) )" + " AND ( $6::text[] IS NULL OR type LIKE ANY($6) )" + @@ -157,7 +159,7 @@ const selectContextBeforeEventSQL = "" + " ORDER BY id DESC LIMIT $3" const selectContextAfterEventSQL = "" + - "SELECT id, headered_event_json FROM syncapi_output_room_events WHERE room_id = $1 AND id > $2" + + "SELECT id, headered_event_json, history_visibility FROM syncapi_output_room_events WHERE room_id = $1 AND id > $2" + " AND ( $4::text[] IS NULL OR sender = ANY($4) )" + " AND ( $5::text[] IS NULL OR NOT(sender = ANY($5)) )" + " AND ( $6::text[] IS NULL OR type LIKE ANY($6) )" + @@ -246,14 +248,15 @@ func (s *outputRoomEventsStatements) SelectStateInRange( for rows.Next() { var ( - eventID string - streamPos types.StreamPosition - eventBytes []byte - excludeFromSync bool - addIDs pq.StringArray - delIDs pq.StringArray + eventID string + streamPos types.StreamPosition + eventBytes []byte + excludeFromSync bool + addIDs pq.StringArray + delIDs pq.StringArray + historyVisibility gomatrixserverlib.HistoryVisibility ) - if err := rows.Scan(&eventID, &streamPos, &eventBytes, &excludeFromSync, &addIDs, &delIDs); err != nil { + if err := rows.Scan(&eventID, &streamPos, &eventBytes, &excludeFromSync, &addIDs, &delIDs, &historyVisibility); err != nil { return nil, nil, err } // Sanity check for deleted state and whine if we see it. We don't need to do anything @@ -283,6 +286,7 @@ func (s *outputRoomEventsStatements) SelectStateInRange( needSet[id] = true } stateNeeded[ev.RoomID()] = needSet + ev.Visibility = historyVisibility eventIDToEvent[eventID] = types.StreamEvent{ HeaderedEvent: &ev, @@ -314,7 +318,7 @@ func (s *outputRoomEventsStatements) SelectMaxEventID( func (s *outputRoomEventsStatements) InsertEvent( ctx context.Context, txn *sql.Tx, event *gomatrixserverlib.HeaderedEvent, addState, removeState []string, - transactionID *api.TransactionID, excludeFromSync bool, + transactionID *api.TransactionID, excludeFromSync bool, historyVisibility gomatrixserverlib.HistoryVisibility, ) (streamPos types.StreamPosition, err error) { var txnID *string var sessionID *int64 @@ -351,6 +355,7 @@ func (s *outputRoomEventsStatements) InsertEvent( sessionID, txnID, excludeFromSync, + historyVisibility, ).Scan(&streamPos) return } @@ -504,13 +509,15 @@ func (s *outputRoomEventsStatements) SelectContextEvent(ctx context.Context, txn row := sqlutil.TxStmt(txn, s.selectContextEventStmt).QueryRowContext(ctx, roomID, eventID) var eventAsString string - if err = row.Scan(&id, &eventAsString); err != nil { + var historyVisibility gomatrixserverlib.HistoryVisibility + if err = row.Scan(&id, &eventAsString, &historyVisibility); err != nil { return 0, evt, err } if err = json.Unmarshal([]byte(eventAsString), &evt); err != nil { return 0, evt, err } + evt.Visibility = historyVisibility return id, evt, nil } @@ -532,15 +539,17 @@ func (s *outputRoomEventsStatements) SelectContextBeforeEvent( for rows.Next() { var ( - eventBytes []byte - evt *gomatrixserverlib.HeaderedEvent + eventBytes []byte + evt *gomatrixserverlib.HeaderedEvent + historyVisibility gomatrixserverlib.HistoryVisibility ) - if err = rows.Scan(&eventBytes); err != nil { + if err = rows.Scan(&eventBytes, &historyVisibility); err != nil { return evts, err } if err = json.Unmarshal(eventBytes, &evt); err != nil { return evts, err } + evt.Visibility = historyVisibility evts = append(evts, evt) } @@ -565,15 +574,17 @@ func (s *outputRoomEventsStatements) SelectContextAfterEvent( for rows.Next() { var ( - eventBytes []byte - evt *gomatrixserverlib.HeaderedEvent + eventBytes []byte + evt *gomatrixserverlib.HeaderedEvent + historyVisibility gomatrixserverlib.HistoryVisibility ) - if err = rows.Scan(&lastID, &eventBytes); err != nil { + if err = rows.Scan(&lastID, &eventBytes, &historyVisibility); err != nil { return 0, evts, err } if err = json.Unmarshal(eventBytes, &evt); err != nil { return 0, evts, err } + evt.Visibility = historyVisibility evts = append(evts, evt) } @@ -584,15 +595,16 @@ func rowsToStreamEvents(rows *sql.Rows) ([]types.StreamEvent, error) { var result []types.StreamEvent for rows.Next() { var ( - eventID string - streamPos types.StreamPosition - eventBytes []byte - excludeFromSync bool - sessionID *int64 - txnID *string - transactionID *api.TransactionID + eventID string + streamPos types.StreamPosition + eventBytes []byte + excludeFromSync bool + sessionID *int64 + txnID *string + transactionID *api.TransactionID + historyVisibility gomatrixserverlib.HistoryVisibility ) - if err := rows.Scan(&eventID, &streamPos, &eventBytes, &sessionID, &excludeFromSync, &txnID); err != nil { + if err := rows.Scan(&eventID, &streamPos, &eventBytes, &sessionID, &excludeFromSync, &txnID, &historyVisibility); err != nil { return nil, err } // TODO: Handle redacted events diff --git a/syncapi/storage/postgres/syncserver.go b/syncapi/storage/postgres/syncserver.go index 9cfe7c070..c6121e419 100644 --- a/syncapi/storage/postgres/syncserver.go +++ b/syncapi/storage/postgres/syncserver.go @@ -42,18 +42,16 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions) if d.db, d.writer, err = base.DatabaseConnection(dbProperties, sqlutil.NewDummyWriter()); err != nil { return nil, err } + if _, err = d.db.Exec(outputRoomEventsSchema); err != nil { + return nil, err + } + if _, err = d.db.Exec(currentRoomStateSchema); err != nil { + return nil, err + } accountData, err := NewPostgresAccountDataTable(d.db) if err != nil { return nil, err } - events, err := NewPostgresEventsTable(d.db) - if err != nil { - return nil, err - } - currState, err := NewPostgresCurrentRoomStateTable(d.db) - if err != nil { - return nil, err - } invites, err := NewPostgresInvitesTable(d.db) if err != nil { return nil, err @@ -101,9 +99,19 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions) m := sqlutil.NewMigrations() deltas.LoadFixSequences(m) deltas.LoadRemoveSendToDeviceSentColumn(m) + deltas.LoadAddHistoryVisibilityColumn(m) if err = m.RunDeltas(d.db, dbProperties); err != nil { return nil, err } + // prepare statements after the migrations have run + events, err := NewPostgresEventsTable(d.db) + if err != nil { + return nil, err + } + currState, err := NewPostgresCurrentRoomStateTable(d.db) + if err != nil { + return nil, err + } d.Database = shared.Database{ DB: d.db, Writer: d.writer, diff --git a/syncapi/storage/shared/syncserver.go b/syncapi/storage/shared/syncserver.go index d1c5e2d16..932bda164 100644 --- a/syncapi/storage/shared/syncserver.go +++ b/syncapi/storage/shared/syncserver.go @@ -368,11 +368,12 @@ func (d *Database) WriteEvent( addStateEvents []*gomatrixserverlib.HeaderedEvent, addStateEventIDs, removeStateEventIDs []string, transactionID *api.TransactionID, excludeFromSync bool, + historyVisibility gomatrixserverlib.HistoryVisibility, ) (pduPosition types.StreamPosition, returnErr error) { returnErr = d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error { var err error pos, err := d.OutputEvents.InsertEvent( - ctx, txn, ev, addStateEventIDs, removeStateEventIDs, transactionID, excludeFromSync, + ctx, txn, ev, addStateEventIDs, removeStateEventIDs, transactionID, excludeFromSync, historyVisibility, ) if err != nil { return fmt.Errorf("d.OutputEvents.InsertEvent: %w", err) @@ -391,7 +392,9 @@ func (d *Database) WriteEvent( // Nothing to do, the event may have just been a message event. return nil } - + for i := range addStateEvents { + addStateEvents[i].Visibility = historyVisibility + } return d.updateRoomState(ctx, txn, removeStateEventIDs, addStateEvents, pduPosition, topoPosition) }) diff --git a/syncapi/storage/sqlite3/current_room_state_table.go b/syncapi/storage/sqlite3/current_room_state_table.go index 376c3a3d2..501d0ee98 100644 --- a/syncapi/storage/sqlite3/current_room_state_table.go +++ b/syncapi/storage/sqlite3/current_room_state_table.go @@ -41,6 +41,7 @@ CREATE TABLE IF NOT EXISTS syncapi_current_room_state ( headered_event_json TEXT NOT NULL, membership TEXT, added_at BIGINT, + history_visibility SMALLINT NOT NULL DEFAULT 2, -- The history visibility before this event (1 - world_readable; 2 - shared; 3 - invited; 4 - joined) UNIQUE (room_id, type, state_key) ); -- for event deletion @@ -52,8 +53,8 @@ CREATE UNIQUE INDEX IF NOT EXISTS syncapi_current_room_state_eventid_idx ON sync ` const upsertRoomStateSQL = "" + - "INSERT INTO syncapi_current_room_state (room_id, event_id, type, sender, contains_url, state_key, headered_event_json, membership, added_at)" + - " VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)" + + "INSERT INTO syncapi_current_room_state (room_id, event_id, type, sender, contains_url, state_key, headered_event_json, membership, added_at, history_visibility)" + + " VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)" + " ON CONFLICT (room_id, type, state_key)" + " DO UPDATE SET event_id = $2, sender=$4, contains_url=$5, headered_event_json = $7, membership = $8, added_at = $9" @@ -84,11 +85,11 @@ const selectStateEventSQL = "" + "SELECT headered_event_json FROM syncapi_current_room_state WHERE room_id = $1 AND type = $2 AND state_key = $3" const selectEventsWithEventIDsSQL = "" + - // TODO: The session_id and transaction_id blanks are here because otherwise - // the rowsToStreamEvents expects there to be exactly six columns. We need to + // TODO: The session_id and transaction_id blanks are here because + // the rowsToStreamEvents expects there to be exactly seven columns. We need to // figure out if these really need to be in the DB, and if so, we need a // better permanent fix for this. - neilalexander, 2 Jan 2020 - "SELECT event_id, added_at, headered_event_json, 0 AS session_id, false AS exclude_from_sync, '' AS transaction_id" + + "SELECT event_id, added_at, headered_event_json, 0 AS session_id, false AS exclude_from_sync, '' AS transaction_id, history_visibility" + " FROM syncapi_current_room_state WHERE event_id IN ($1)" const selectSharedUsersSQL = "" + @@ -328,6 +329,7 @@ func (s *currentRoomStateStatements) UpsertRoomState( headeredJSON, membership, addedAt, + event.Visibility, ) return err } diff --git a/syncapi/storage/sqlite3/deltas/2022061412000000_history_visibility_column.go b/syncapi/storage/sqlite3/deltas/2022061412000000_history_visibility_column.go new file mode 100644 index 000000000..f6bcaddfb --- /dev/null +++ b/syncapi/storage/sqlite3/deltas/2022061412000000_history_visibility_column.go @@ -0,0 +1,82 @@ +// Copyright 2022 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package deltas + +import ( + "database/sql" + "fmt" + + "github.com/matrix-org/dendrite/internal/sqlutil" +) + +func LoadAddHistoryVisibilityColumn(m *sqlutil.Migrations) { + m.AddMigration(UpAddHistoryVisibilityColumn, DownAddHistoryVisibilityColumn) +} + +func UpAddHistoryVisibilityColumn(tx *sql.Tx) error { + // SQLite doesn't have "if exists", so check if the column exists. If the query doesn't return an error, it already exists. + // Required for unit tests, as otherwise a duplicate column error will show up. + _, err := tx.Query("SELECT history_visibility FROM syncapi_output_room_events LIMIT 1") + if err == nil { + return nil + } + _, err = tx.Exec(` + ALTER TABLE syncapi_output_room_events ADD COLUMN history_visibility SMALLINT NOT NULL DEFAULT 2; + UPDATE syncapi_output_room_events SET history_visibility = 4 WHERE type IN ('m.room.message', 'm.room.encrypted'); + `) + if err != nil { + return fmt.Errorf("failed to execute upgrade: %w", err) + } + + _, err = tx.Query("SELECT history_visibility FROM syncapi_current_room_state LIMIT 1") + if err == nil { + return nil + } + _, err = tx.Exec(` + ALTER TABLE syncapi_current_room_state ADD COLUMN history_visibility SMALLINT NOT NULL DEFAULT 2; + UPDATE syncapi_current_room_state SET history_visibility = 4 WHERE type IN ('m.room.message', 'm.room.encrypted'); + `) + if err != nil { + return fmt.Errorf("failed to execute upgrade: %w", err) + } + return nil +} + +func DownAddHistoryVisibilityColumn(tx *sql.Tx) error { + // SQLite doesn't have "if exists", so check if the column exists. + _, err := tx.Query("SELECT history_visibility FROM syncapi_output_room_events LIMIT 1") + if err != nil { + // The column probably doesn't exist + return nil + } + _, err = tx.Exec(` + ALTER TABLE syncapi_output_room_events DROP COLUMN history_visibility; + `) + if err != nil { + return fmt.Errorf("failed to execute downgrade: %w", err) + } + _, err = tx.Query("SELECT history_visibility FROM syncapi_current_room_state LIMIT 1") + if err != nil { + // The column probably doesn't exist + return nil + } + _, err = tx.Exec(` + ALTER TABLE syncapi_current_room_state DROP COLUMN history_visibility; + `) + if err != nil { + return fmt.Errorf("failed to execute downgrade: %w", err) + } + return nil +} diff --git a/syncapi/storage/sqlite3/output_room_events_table.go b/syncapi/storage/sqlite3/output_room_events_table.go index f9961a9d1..b3dcb44cb 100644 --- a/syncapi/storage/sqlite3/output_room_events_table.go +++ b/syncapi/storage/sqlite3/output_room_events_table.go @@ -47,7 +47,8 @@ CREATE TABLE IF NOT EXISTS syncapi_output_room_events ( remove_state_ids TEXT, -- JSON encoded string array session_id BIGINT, transaction_id TEXT, - exclude_from_sync BOOL NOT NULL DEFAULT FALSE + exclude_from_sync BOOL NOT NULL DEFAULT FALSE, + history_visibility SMALLINT NOT NULL DEFAULT 2 -- The history visibility before this event (1 - world_readable; 2 - shared; 3 - invited; 4 - joined) ); CREATE INDEX IF NOT EXISTS syncapi_output_room_events_type_idx ON syncapi_output_room_events (type); @@ -58,27 +59,27 @@ CREATE INDEX IF NOT EXISTS syncapi_output_room_events_exclude_from_sync_idx ON s const insertEventSQL = "" + "INSERT INTO syncapi_output_room_events (" + - "id, room_id, event_id, headered_event_json, type, sender, contains_url, add_state_ids, remove_state_ids, session_id, transaction_id, exclude_from_sync" + - ") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) " + - "ON CONFLICT (event_id) DO UPDATE SET exclude_from_sync = (excluded.exclude_from_sync AND $13)" + "id, room_id, event_id, headered_event_json, type, sender, contains_url, add_state_ids, remove_state_ids, session_id, transaction_id, exclude_from_sync, history_visibility" + + ") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) " + + "ON CONFLICT (event_id) DO UPDATE SET exclude_from_sync = (excluded.exclude_from_sync AND $14)" const selectEventsSQL = "" + - "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events WHERE event_id IN ($1)" + "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id, history_visibility FROM syncapi_output_room_events WHERE event_id IN ($1)" const selectRecentEventsSQL = "" + - "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events" + + "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id, history_visibility FROM syncapi_output_room_events" + " WHERE room_id = $1 AND id > $2 AND id <= $3" // WHEN, ORDER BY and LIMIT are appended by prepareWithFilters const selectRecentEventsForSyncSQL = "" + - "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events" + + "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id, history_visibility FROM syncapi_output_room_events" + " WHERE room_id = $1 AND id > $2 AND id <= $3 AND exclude_from_sync = FALSE" // WHEN, ORDER BY and LIMIT are appended by prepareWithFilters const selectEarlyEventsSQL = "" + - "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events" + + "SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id, history_visibility FROM syncapi_output_room_events" + " WHERE room_id = $1 AND id > $2 AND id <= $3" // WHEN, ORDER BY and LIMIT are appended by prepareWithFilters @@ -90,7 +91,7 @@ const updateEventJSONSQL = "" + "UPDATE syncapi_output_room_events SET headered_event_json=$1 WHERE event_id=$2" const selectStateInRangeSQL = "" + - "SELECT event_id, id, headered_event_json, exclude_from_sync, add_state_ids, remove_state_ids" + + "SELECT event_id, id, headered_event_json, exclude_from_sync, add_state_ids, remove_state_ids, history_visibility" + " FROM syncapi_output_room_events" + " WHERE (id > $1 AND id <= $2)" + " AND room_id IN ($3)" + @@ -102,15 +103,15 @@ const deleteEventsForRoomSQL = "" + "DELETE FROM syncapi_output_room_events WHERE room_id = $1" const selectContextEventSQL = "" + - "SELECT id, headered_event_json FROM syncapi_output_room_events WHERE room_id = $1 AND event_id = $2" + "SELECT id, headered_event_json, history_visibility FROM syncapi_output_room_events WHERE room_id = $1 AND event_id = $2" const selectContextBeforeEventSQL = "" + - "SELECT headered_event_json FROM syncapi_output_room_events WHERE room_id = $1 AND id < $2" + "SELECT headered_event_json, history_visibility FROM syncapi_output_room_events WHERE room_id = $1 AND id < $2" // WHEN, ORDER BY and LIMIT are appended by prepareWithFilters const selectContextAfterEventSQL = "" + - "SELECT id, headered_event_json FROM syncapi_output_room_events WHERE room_id = $1 AND id > $2" + "SELECT id, headered_event_json, history_visibility FROM syncapi_output_room_events WHERE room_id = $1 AND id > $2" // WHEN, ORDER BY and LIMIT are appended by prepareWithFilters @@ -196,14 +197,15 @@ func (s *outputRoomEventsStatements) SelectStateInRange( for rows.Next() { var ( - eventID string - streamPos types.StreamPosition - eventBytes []byte - excludeFromSync bool - addIDsJSON string - delIDsJSON string + eventID string + streamPos types.StreamPosition + eventBytes []byte + excludeFromSync bool + addIDsJSON string + delIDsJSON string + historyVisibility gomatrixserverlib.HistoryVisibility ) - if err := rows.Scan(&eventID, &streamPos, &eventBytes, &excludeFromSync, &addIDsJSON, &delIDsJSON); err != nil { + if err := rows.Scan(&eventID, &streamPos, &eventBytes, &excludeFromSync, &addIDsJSON, &delIDsJSON, &historyVisibility); err != nil { return nil, nil, err } @@ -239,6 +241,7 @@ func (s *outputRoomEventsStatements) SelectStateInRange( needSet[id] = true } stateNeeded[ev.RoomID()] = needSet + ev.Visibility = historyVisibility eventIDToEvent[eventID] = types.StreamEvent{ HeaderedEvent: &ev, @@ -270,7 +273,7 @@ func (s *outputRoomEventsStatements) SelectMaxEventID( func (s *outputRoomEventsStatements) InsertEvent( ctx context.Context, txn *sql.Tx, event *gomatrixserverlib.HeaderedEvent, addState, removeState []string, - transactionID *api.TransactionID, excludeFromSync bool, + transactionID *api.TransactionID, excludeFromSync bool, historyVisibility gomatrixserverlib.HistoryVisibility, ) (types.StreamPosition, error) { var txnID *string var sessionID *int64 @@ -326,6 +329,7 @@ func (s *outputRoomEventsStatements) InsertEvent( sessionID, txnID, excludeFromSync, + historyVisibility, excludeFromSync, ) return streamPos, err @@ -481,15 +485,16 @@ func rowsToStreamEvents(rows *sql.Rows) ([]types.StreamEvent, error) { var result []types.StreamEvent for rows.Next() { var ( - eventID string - streamPos types.StreamPosition - eventBytes []byte - excludeFromSync bool - sessionID *int64 - txnID *string - transactionID *api.TransactionID + eventID string + streamPos types.StreamPosition + eventBytes []byte + excludeFromSync bool + sessionID *int64 + txnID *string + transactionID *api.TransactionID + historyVisibility gomatrixserverlib.HistoryVisibility ) - if err := rows.Scan(&eventID, &streamPos, &eventBytes, &sessionID, &excludeFromSync, &txnID); err != nil { + if err := rows.Scan(&eventID, &streamPos, &eventBytes, &sessionID, &excludeFromSync, &txnID, &historyVisibility); err != nil { return nil, err } // TODO: Handle redacted events @@ -505,6 +510,8 @@ func rowsToStreamEvents(rows *sql.Rows) ([]types.StreamEvent, error) { } } + ev.Visibility = historyVisibility + result = append(result, types.StreamEvent{ HeaderedEvent: &ev, StreamPosition: streamPos, @@ -519,13 +526,15 @@ func (s *outputRoomEventsStatements) SelectContextEvent( ) (id int, evt gomatrixserverlib.HeaderedEvent, err error) { row := sqlutil.TxStmt(txn, s.selectContextEventStmt).QueryRowContext(ctx, roomID, eventID) var eventAsString string - if err = row.Scan(&id, &eventAsString); err != nil { + var historyVisibility gomatrixserverlib.HistoryVisibility + if err = row.Scan(&id, &eventAsString, &historyVisibility); err != nil { return 0, evt, err } if err = json.Unmarshal([]byte(eventAsString), &evt); err != nil { return 0, evt, err } + evt.Visibility = historyVisibility return id, evt, nil } @@ -550,15 +559,17 @@ func (s *outputRoomEventsStatements) SelectContextBeforeEvent( for rows.Next() { var ( - eventBytes []byte - evt *gomatrixserverlib.HeaderedEvent + eventBytes []byte + evt *gomatrixserverlib.HeaderedEvent + historyVisibility gomatrixserverlib.HistoryVisibility ) - if err = rows.Scan(&eventBytes); err != nil { + if err = rows.Scan(&eventBytes, &historyVisibility); err != nil { return evts, err } if err = json.Unmarshal(eventBytes, &evt); err != nil { return evts, err } + evt.Visibility = historyVisibility evts = append(evts, evt) } @@ -586,15 +597,17 @@ func (s *outputRoomEventsStatements) SelectContextAfterEvent( for rows.Next() { var ( - eventBytes []byte - evt *gomatrixserverlib.HeaderedEvent + eventBytes []byte + evt *gomatrixserverlib.HeaderedEvent + historyVisibility gomatrixserverlib.HistoryVisibility ) - if err = rows.Scan(&lastID, &eventBytes); err != nil { + if err = rows.Scan(&lastID, &eventBytes, &historyVisibility); err != nil { return 0, evts, err } if err = json.Unmarshal(eventBytes, &evt); err != nil { return 0, evts, err } + evt.Visibility = historyVisibility evts = append(evts, evt) } return lastID, evts, rows.Err() diff --git a/syncapi/storage/sqlite3/syncserver.go b/syncapi/storage/sqlite3/syncserver.go index e08a0ba82..39ceec81d 100644 --- a/syncapi/storage/sqlite3/syncserver.go +++ b/syncapi/storage/sqlite3/syncserver.go @@ -52,18 +52,16 @@ func (d *SyncServerDatasource) prepare(dbProperties *config.DatabaseOptions) (er if err = d.streamID.Prepare(d.db); err != nil { return err } + if _, err = d.db.Exec(outputRoomEventsSchema); err != nil { + return err + } + if _, err = d.db.Exec(currentRoomStateSchema); err != nil { + return err + } accountData, err := NewSqliteAccountDataTable(d.db, &d.streamID) if err != nil { return err } - events, err := NewSqliteEventsTable(d.db, &d.streamID) - if err != nil { - return err - } - roomState, err := NewSqliteCurrentRoomStateTable(d.db, &d.streamID) - if err != nil { - return err - } invites, err := NewSqliteInvitesTable(d.db, &d.streamID) if err != nil { return err @@ -111,9 +109,19 @@ func (d *SyncServerDatasource) prepare(dbProperties *config.DatabaseOptions) (er m := sqlutil.NewMigrations() deltas.LoadFixSequences(m) deltas.LoadRemoveSendToDeviceSentColumn(m) + deltas.LoadAddHistoryVisibilityColumn(m) if err = m.RunDeltas(d.db, dbProperties); err != nil { return err } + // prepare statements after the migrations have run + events, err := NewSqliteEventsTable(d.db, &d.streamID) + if err != nil { + return err + } + roomState, err := NewSqliteCurrentRoomStateTable(d.db, &d.streamID) + if err != nil { + return err + } d.Database = shared.Database{ DB: d.db, Writer: d.writer, diff --git a/syncapi/storage/storage_test.go b/syncapi/storage/storage_test.go index c74151700..df03a33c2 100644 --- a/syncapi/storage/storage_test.go +++ b/syncapi/storage/storage_test.go @@ -37,7 +37,7 @@ func MustWriteEvents(t *testing.T, db storage.Database, events []*gomatrixserver addStateEvents = append(addStateEvents, ev) addStateEventIDs = append(addStateEventIDs, ev.EventID()) } - pos, err := db.WriteEvent(ctx, ev, addStateEvents, addStateEventIDs, removeStateEventIDs, nil, false) + pos, err := db.WriteEvent(ctx, ev, addStateEvents, addStateEventIDs, removeStateEventIDs, nil, false, gomatrixserverlib.HistoryVisibilityShared) if err != nil { t.Fatalf("WriteEvent failed: %s", err) } diff --git a/syncapi/storage/tables/interface.go b/syncapi/storage/tables/interface.go index 08568d9ac..d8fc8f508 100644 --- a/syncapi/storage/tables/interface.go +++ b/syncapi/storage/tables/interface.go @@ -52,7 +52,14 @@ type Peeks interface { type Events interface { SelectStateInRange(ctx context.Context, txn *sql.Tx, r types.Range, stateFilter *gomatrixserverlib.StateFilter, roomIDs []string) (map[string]map[string]bool, map[string]types.StreamEvent, error) SelectMaxEventID(ctx context.Context, txn *sql.Tx) (id int64, err error) - InsertEvent(ctx context.Context, txn *sql.Tx, event *gomatrixserverlib.HeaderedEvent, addState, removeState []string, transactionID *api.TransactionID, excludeFromSync bool) (streamPos types.StreamPosition, err error) + InsertEvent( + ctx context.Context, txn *sql.Tx, + event *gomatrixserverlib.HeaderedEvent, + addState, removeState []string, + transactionID *api.TransactionID, + excludeFromSync bool, + historyVisibility gomatrixserverlib.HistoryVisibility, + ) (streamPos types.StreamPosition, err error) // SelectRecentEvents returns events between the two stream positions: exclusive of low and inclusive of high. // If onlySyncEvents has a value of true, only returns the events that aren't marked as to exclude from sync. // Returns up to `limit` events. Returns `limited=true` if there are more events in this range but we hit the `limit`. diff --git a/syncapi/storage/tables/output_room_events_test.go b/syncapi/storage/tables/output_room_events_test.go index 69bbd04c9..bdb17ae20 100644 --- a/syncapi/storage/tables/output_room_events_test.go +++ b/syncapi/storage/tables/output_room_events_test.go @@ -53,7 +53,7 @@ func TestOutputRoomEventsTable(t *testing.T) { events := room.Events() err := sqlutil.WithTransaction(db, func(txn *sql.Tx) error { for _, ev := range events { - _, err := tab.InsertEvent(ctx, txn, ev, nil, nil, nil, false) + _, err := tab.InsertEvent(ctx, txn, ev, nil, nil, nil, false, gomatrixserverlib.HistoryVisibilityShared) if err != nil { return fmt.Errorf("failed to InsertEvent: %s", err) } @@ -79,7 +79,7 @@ func TestOutputRoomEventsTable(t *testing.T) { "body": "test.txt", "url": "mxc://test.txt", }) - if _, err = tab.InsertEvent(ctx, txn, urlEv, nil, nil, nil, false); err != nil { + if _, err = tab.InsertEvent(ctx, txn, urlEv, nil, nil, nil, false, gomatrixserverlib.HistoryVisibilityShared); err != nil { return fmt.Errorf("failed to InsertEvent: %s", err) } wantEventID := []string{urlEv.EventID()} From 99b696e775f13eb876f68195acdbffc2e05764b2 Mon Sep 17 00:00:00 2001 From: Neil Alexander Date: Mon, 18 Jul 2022 14:49:41 +0100 Subject: [PATCH 05/18] Tweak cache counters (#2575) * Tweak cache counters This makes the number of counters relative to the maximum cache size. Since the counters effectively manage the size of the bloom filter, larger caches need more counters and smaller caches need less. 10 counters per 1KB data means that the default cache size of 1GB should result in a bloom filter and TinyLRU admission set of about 16MB estimated. * Remove line left by accident --- internal/caching/impl_ristretto.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/caching/impl_ristretto.go b/internal/caching/impl_ristretto.go index 677218b5e..01c97ad55 100644 --- a/internal/caching/impl_ristretto.go +++ b/internal/caching/impl_ristretto.go @@ -44,9 +44,9 @@ const ( func NewRistrettoCache(maxCost config.DataUnit, maxAge time.Duration, enablePrometheus bool) *Caches { cache, err := ristretto.NewCache(&ristretto.Config{ - NumCounters: 1e5, // 10x number of expected cache items, affects bloom filter size, gives us room for 10,000 currently - BufferItems: 64, // recommended by the ristretto godocs as a sane buffer size value - MaxCost: int64(maxCost), + NumCounters: int64((maxCost / 1024) * 10), // 10 counters per 1KB data, affects bloom filter size + BufferItems: 64, // recommended by the ristretto godocs as a sane buffer size value + MaxCost: int64(maxCost), // max cost is in bytes, as per the Dendrite config Metrics: true, KeyToHash: func(key interface{}) (uint64, uint64) { return z.KeyToHash(key) From bcff14adea471cbb496924622295c62c02a82720 Mon Sep 17 00:00:00 2001 From: Till Faelligen Date: Mon, 18 Jul 2022 18:19:44 +0200 Subject: [PATCH 06/18] Set historyVisibility in rowsToStreamEvents --- syncapi/storage/postgres/output_room_events_table.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syncapi/storage/postgres/output_room_events_table.go b/syncapi/storage/postgres/output_room_events_table.go index d0474d4c3..ddef27383 100644 --- a/syncapi/storage/postgres/output_room_events_table.go +++ b/syncapi/storage/postgres/output_room_events_table.go @@ -619,7 +619,7 @@ func rowsToStreamEvents(rows *sql.Rows) ([]types.StreamEvent, error) { TransactionID: *txnID, } } - + ev.Visibility = historyVisibility result = append(result, types.StreamEvent{ HeaderedEvent: &ev, StreamPosition: streamPos, From 583b8ea273be82ebdf2a9f81fd51a150d910b349 Mon Sep 17 00:00:00 2001 From: Neil Alexander Date: Tue, 19 Jul 2022 11:51:46 +0100 Subject: [PATCH 07/18] Update FAQ --- docs/FAQ.md | 9 ++++++--- setup/config/config_global.go | 6 +++--- userapi/util/phonehomestats.go | 6 +++--- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/docs/FAQ.md b/docs/FAQ.md index 47f39b9e6..f8255684e 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -86,9 +86,12 @@ would be a huge help too, as that will help us to understand where the memory us You may need to revisit the connection limit of your PostgreSQL server and/or make changes to the `max_connections` lines in your Dendrite configuration. Be aware that each Dendrite component opens its own database connections and has its own connection limit, even in monolith mode! -## What is being reported when enabling anonymous stats? +## What is being reported when enabling phone-home statistics? -If anonymous stats reporting is enabled, the following data is send to the defined endpoint. +Phone-home statistics contain your server's domain name, some configuration information about +your deployment and aggregated information about active users on your deployment. They are sent +to the endpoint URL configured in your Dendrite configuration file only. The following is an +example of the data that is sent: ```json { @@ -106,7 +109,7 @@ If anonymous stats reporting is enabled, the following data is send to the defin "go_arch": "amd64", "go_os": "linux", "go_version": "go1.16.13", - "homeserver": "localhost:8800", + "homeserver": "my.domain.com", "log_level": "trace", "memory_rss": 93452, "monolith": true, diff --git a/setup/config/config_global.go b/setup/config/config_global.go index ac1380a4e..10827bb21 100644 --- a/setup/config/config_global.go +++ b/setup/config/config_global.go @@ -73,7 +73,7 @@ type Global struct { // ServerNotices configuration used for sending server notices ServerNotices ServerNotices `yaml:"server_notices"` - // ReportStats configures opt-in anonymous stats reporting. + // ReportStats configures opt-in phone-home statistics reporting. ReportStats ReportStats `yaml:"report_stats"` // Configuration for the caches. @@ -189,9 +189,9 @@ func (c *Cache) Verify(errors *ConfigErrors, isMonolith bool) { checkPositive(errors, "max_size_estimated", int64(c.EstimatedMaxSize)) } -// ReportStats configures opt-in anonymous stats reporting. +// ReportStats configures opt-in phone-home statistics reporting. type ReportStats struct { - // Enabled configures anonymous usage stats of the server + // Enabled configures phone-home statistics of the server Enabled bool `yaml:"enabled"` // Endpoint the endpoint to report stats to diff --git a/userapi/util/phonehomestats.go b/userapi/util/phonehomestats.go index ad93a50e3..b17f62060 100644 --- a/userapi/util/phonehomestats.go +++ b/userapi/util/phonehomestats.go @@ -139,7 +139,7 @@ func (p *phoneHomeStats) collect() { output := bytes.Buffer{} if err = json.NewEncoder(&output).Encode(p.stats); err != nil { - logrus.WithError(err).Error("unable to encode anonymous stats") + logrus.WithError(err).Error("Unable to encode phone-home statistics") return } @@ -147,14 +147,14 @@ func (p *phoneHomeStats) collect() { request, err := http.NewRequestWithContext(ctx, http.MethodPost, p.cfg.Global.ReportStats.Endpoint, &output) if err != nil { - logrus.WithError(err).Error("unable to create anonymous stats request") + logrus.WithError(err).Error("Unable to create phone-home statistics request") return } request.Header.Set("User-Agent", "Dendrite/"+internal.VersionString()) _, err = p.client.Do(request) if err != nil { - logrus.WithError(err).Error("unable to send anonymous stats") + logrus.WithError(err).Error("Unable to send phone-home statistics") return } } From 5c01306bb5add6f53907950982fc468c00ae266d Mon Sep 17 00:00:00 2001 From: Neil Alexander Date: Tue, 19 Jul 2022 12:15:48 +0100 Subject: [PATCH 08/18] Add event state key cache (#2576) --- internal/caching/cache_eventstatekeys.go | 18 ++++++++++++++++++ internal/caching/cache_roomservernids.go | 5 +++-- internal/caching/caches.go | 23 ++++++++++++----------- internal/caching/impl_ristretto.go | 8 +++++++- roomserver/storage/shared/storage.go | 19 ++++++++++++++++++- 5 files changed, 58 insertions(+), 15 deletions(-) create mode 100644 internal/caching/cache_eventstatekeys.go diff --git a/internal/caching/cache_eventstatekeys.go b/internal/caching/cache_eventstatekeys.go new file mode 100644 index 000000000..05580ab05 --- /dev/null +++ b/internal/caching/cache_eventstatekeys.go @@ -0,0 +1,18 @@ +package caching + +import "github.com/matrix-org/dendrite/roomserver/types" + +// EventStateKeyCache contains the subset of functions needed for +// a room event state key cache. +type EventStateKeyCache interface { + GetEventStateKey(eventStateKeyNID types.EventStateKeyNID) (string, bool) + StoreEventStateKey(eventStateKeyNID types.EventStateKeyNID, eventStateKey string) +} + +func (c Caches) GetEventStateKey(eventStateKeyNID types.EventStateKeyNID) (string, bool) { + return c.RoomServerStateKeys.Get(eventStateKeyNID) +} + +func (c Caches) StoreEventStateKey(eventStateKeyNID types.EventStateKeyNID, eventStateKey string) { + c.RoomServerStateKeys.Set(eventStateKeyNID, eventStateKey) +} diff --git a/internal/caching/cache_roomservernids.go b/internal/caching/cache_roomservernids.go index b409aeef2..f27154f19 100644 --- a/internal/caching/cache_roomservernids.go +++ b/internal/caching/cache_roomservernids.go @@ -9,6 +9,7 @@ type RoomServerCaches interface { RoomVersionCache RoomInfoCache RoomServerEventsCache + EventStateKeyCache } // RoomServerNIDsCache contains the subset of functions needed for @@ -19,9 +20,9 @@ type RoomServerNIDsCache interface { } func (c Caches) GetRoomServerRoomID(roomNID types.RoomNID) (string, bool) { - return c.RoomServerRoomIDs.Get(int64(roomNID)) + return c.RoomServerRoomIDs.Get(roomNID) } func (c Caches) StoreRoomServerRoomID(roomNID types.RoomNID, roomID string) { - c.RoomServerRoomIDs.Set(int64(roomNID), roomID) + c.RoomServerRoomIDs.Set(roomNID, roomID) } diff --git a/internal/caching/caches.go b/internal/caching/caches.go index e7914ce7d..f13f743d3 100644 --- a/internal/caching/caches.go +++ b/internal/caching/caches.go @@ -23,16 +23,17 @@ import ( // different implementations as long as they satisfy the Cache // interface. type Caches struct { - RoomVersions Cache[string, gomatrixserverlib.RoomVersion] // room ID -> room version - ServerKeys Cache[string, gomatrixserverlib.PublicKeyLookupResult] // server name -> server keys - RoomServerRoomNIDs Cache[string, types.RoomNID] // room ID -> room NID - RoomServerRoomIDs Cache[int64, string] // room NID -> room ID - RoomServerEvents Cache[int64, *gomatrixserverlib.Event] // event NID -> event - RoomInfos Cache[string, *types.RoomInfo] // room ID -> room info - FederationPDUs Cache[int64, *gomatrixserverlib.HeaderedEvent] // queue NID -> PDU - FederationEDUs Cache[int64, *gomatrixserverlib.EDU] // queue NID -> EDU - SpaceSummaryRooms Cache[string, gomatrixserverlib.MSC2946SpacesResponse] // room ID -> space response - LazyLoading Cache[lazyLoadingCacheKey, string] // composite key -> event ID + RoomVersions Cache[string, gomatrixserverlib.RoomVersion] // room ID -> room version + ServerKeys Cache[string, gomatrixserverlib.PublicKeyLookupResult] // server name -> server keys + RoomServerRoomNIDs Cache[string, types.RoomNID] // room ID -> room NID + RoomServerRoomIDs Cache[types.RoomNID, string] // room NID -> room ID + RoomServerEvents Cache[int64, *gomatrixserverlib.Event] // event NID -> event + RoomServerStateKeys Cache[types.EventStateKeyNID, string] // event NID -> event state key + RoomInfos Cache[string, *types.RoomInfo] // room ID -> room info + FederationPDUs Cache[int64, *gomatrixserverlib.HeaderedEvent] // queue NID -> PDU + FederationEDUs Cache[int64, *gomatrixserverlib.EDU] // queue NID -> EDU + SpaceSummaryRooms Cache[string, gomatrixserverlib.MSC2946SpacesResponse] // room ID -> space response + LazyLoading Cache[lazyLoadingCacheKey, string] // composite key -> event ID } // Cache is the interface that an implementation must satisfy. @@ -44,7 +45,7 @@ type Cache[K keyable, T any] interface { type keyable interface { // from https://github.com/dgraph-io/ristretto/blob/8e850b710d6df0383c375ec6a7beae4ce48fc8d5/z/z.go#L34 - uint64 | string | []byte | byte | int | int32 | uint32 | int64 | lazyLoadingCacheKey + ~uint64 | ~string | []byte | byte | ~int | ~int32 | ~uint32 | ~int64 | lazyLoadingCacheKey } type costable interface { diff --git a/internal/caching/impl_ristretto.go b/internal/caching/impl_ristretto.go index 01c97ad55..fdbbb4d72 100644 --- a/internal/caching/impl_ristretto.go +++ b/internal/caching/impl_ristretto.go @@ -40,6 +40,7 @@ const ( federationEDUsCache spaceSummaryRoomsCache lazyLoadingCache + eventStateKeyCache ) func NewRistrettoCache(maxCost config.DataUnit, maxAge time.Duration, enablePrometheus bool) *Caches { @@ -88,7 +89,7 @@ func NewRistrettoCache(maxCost config.DataUnit, maxAge time.Duration, enableProm Prefix: roomNIDsCache, MaxAge: maxAge, }, - RoomServerRoomIDs: &RistrettoCachePartition[int64, string]{ // room NID -> room ID + RoomServerRoomIDs: &RistrettoCachePartition[types.RoomNID, string]{ // room NID -> room ID cache: cache, Prefix: roomIDsCache, MaxAge: maxAge, @@ -100,6 +101,11 @@ func NewRistrettoCache(maxCost config.DataUnit, maxAge time.Duration, enableProm MaxAge: maxAge, }, }, + RoomServerStateKeys: &RistrettoCachePartition[types.EventStateKeyNID, string]{ // event NID -> event state key + cache: cache, + Prefix: eventStateKeyCache, + MaxAge: maxAge, + }, RoomInfos: &RistrettoCachePartition[string, *types.RoomInfo]{ // room ID -> room info cache: cache, Prefix: roomInfosCache, diff --git a/roomserver/storage/shared/storage.go b/roomserver/storage/shared/storage.go index d8d5f67c8..2f9932ff8 100644 --- a/roomserver/storage/shared/storage.go +++ b/roomserver/storage/shared/storage.go @@ -72,7 +72,24 @@ func (d *Database) eventTypeNIDs( func (d *Database) EventStateKeys( ctx context.Context, eventStateKeyNIDs []types.EventStateKeyNID, ) (map[types.EventStateKeyNID]string, error) { - return d.EventStateKeysTable.BulkSelectEventStateKey(ctx, nil, eventStateKeyNIDs) + result := make(map[types.EventStateKeyNID]string, len(eventStateKeyNIDs)) + fetch := make([]types.EventStateKeyNID, 0, len(eventStateKeyNIDs)) + for _, nid := range eventStateKeyNIDs { + if key, ok := d.Cache.GetEventStateKey(nid); ok { + result[nid] = key + } else { + fetch = append(fetch, nid) + } + } + fromDB, err := d.EventStateKeysTable.BulkSelectEventStateKey(ctx, nil, fetch) + if err != nil { + return nil, err + } + for nid, key := range fromDB { + result[nid] = key + d.Cache.StoreEventStateKey(nid, key) + } + return result, nil } func (d *Database) EventStateKeyNIDs( From 84a779788360101a2353679cc2baa695fba7f9f2 Mon Sep 17 00:00:00 2001 From: Tulir Asokan Date: Tue, 19 Jul 2022 17:19:03 +0300 Subject: [PATCH 09/18] Explain how SRV works in Matrix and discourage using it (#2577) * Explain how SRV works in Matrix and discourage using it * Minor tweaks to formatting Co-authored-by: Neil Alexander --- docs/installation/2_domainname.md | 60 +++++++++++++++++++------------ 1 file changed, 37 insertions(+), 23 deletions(-) diff --git a/docs/installation/2_domainname.md b/docs/installation/2_domainname.md index 0d4300eca..54064acb4 100644 --- a/docs/installation/2_domainname.md +++ b/docs/installation/2_domainname.md @@ -14,15 +14,18 @@ that take the format `@user:example.com`. For federation to work, the server name must be resolvable by other homeservers on the internet — that is, the domain must be registered and properly configured with the relevant DNS records. -Matrix servers discover each other when federating using the following methods: +Matrix servers usually discover each other when federating using the following methods: -1. If a well-known delegation exists on `example.com`, use the path server from the +1. If a well-known delegation exists on `example.com`, use the domain and port from the well-known file to connect to the remote homeserver; -2. If a DNS SRV delegation exists on `example.com`, use the hostname and port from the DNS SRV +2. If a DNS SRV delegation exists on `example.com`, use the IP address and port from the DNS SRV record to connect to the remote homeserver; 3. If neither well-known or DNS SRV delegation are configured, attempt to connect to the remote homeserver by connecting to `example.com` port TCP/8448 using HTTPS. +The exact details of how server name resolution works can be found in +[the spec](https://spec.matrix.org/v1.3/server-server-api/#resolving-server-names). + ## TLS certificates Matrix federation requires that valid TLS certificates are present on the domain. You must @@ -51,17 +54,12 @@ you will be able to delegate from `example.com` to `matrix.example.com` so that Delegation can be performed in one of two ways: -* **Well-known delegation**: A well-known text file is served over HTTPS on the domain name - that you want to use, pointing to your server on `matrix.example.com` port 8448; -* **DNS SRV delegation**: A DNS SRV record is created on the domain name that you want to - use, pointing to your server on `matrix.example.com` port TCP/8448. +* **Well-known delegation (preferred)**: A well-known text file is served over HTTPS on the domain + name that you want to use, pointing to your server on `matrix.example.com` port 8448; +* **DNS SRV delegation (not recommended)**: See the SRV delegation section below for details. -If you are using a reverse proxy to forward `/_matrix` to Dendrite, your well-known or DNS SRV -delegation must refer to the hostname and port that the reverse proxy is listening on instead. - -Well-known delegation is typically easier to set up and usually preferred. However, you can use -either or both methods to delegate. If you configure both methods of delegation, it is important -that they both agree and refer to the same hostname and port. +If you are using a reverse proxy to forward `/_matrix` to Dendrite, your well-known or delegation +must refer to the hostname and port that the reverse proxy is listening on instead. ## Well-known delegation @@ -74,20 +72,36 @@ and contain the following JSON document: ```json { - "m.server": "https://matrix.example.com:8448" + "m.server": "matrix.example.com:8448" } ``` +You can also serve `.well-known` with Dendrite itself by setting the `well_known_server_name` config +option to the value you want for `m.server`. This is primarily useful if Dendrite is exposed on +`example.com:443` and you don't want to set up a separate webserver just for serving the `.well-known` +file. + +```yaml +global: +... + well_known_server_name: "example.com:443" +``` + ## DNS SRV delegation -Using DNS SRV delegation requires creating DNS SRV records on the `example.com` zone which -refer to your Dendrite installation. +This method is not recommended, as the behavior of SRV records in Matrix is rather unintuitive: +SRV records will only change the IP address and port that other servers connect to, they won't +affect the domain name. In technical terms, the `Host` header and TLS SNI of federation requests +will still be `example.com` even if the SRV record points at `matrix.example.com`. -Assuming that your Dendrite installation is listening for HTTPS connections at `matrix.example.com` -port 8448, the DNS SRV record must have the following fields: +In practice, this means that the server must be configured with valid TLS certificates for +`example.com`, rather than `matrix.example.com` as one might intuitively expect. If there's a +reverse proxy in between, the proxy configuration must be written as if it's `example.com`, as the +proxy will never see the name `matrix.example.com` in incoming requests. -* Name: `@` (or whichever term your DNS provider uses to signal the root) -* Service: `_matrix` -* Protocol: `_tcp` -* Port: `8448` -* Target: `matrix.example.com` +This behavior also means that if `example.com` and `matrix.example.com` point at the same IP +address, there is no reason to have a SRV record pointing at `matrix.example.com`. It can still +be used to change the port number, but it won't do anything else. + +If you understand how SRV records work and still want to use them, the service name is `_matrix` and +the protocol is `_tcp`. From 9507966ebdbe0615e8d27be4b7a1d4440ada3e73 Mon Sep 17 00:00:00 2001 From: Till <2353100+S7evinK@users.noreply.github.com> Date: Wed, 20 Jul 2022 12:39:06 +0200 Subject: [PATCH 10/18] Fix issue with membership event_nid being 0 (#2580) --- roomserver/storage/shared/membership_updater.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/roomserver/storage/shared/membership_updater.go b/roomserver/storage/shared/membership_updater.go index ebfcef569..c79ca8995 100644 --- a/roomserver/storage/shared/membership_updater.go +++ b/roomserver/storage/shared/membership_updater.go @@ -105,8 +105,15 @@ func (u *MembershipUpdater) SetToInvite(event *gomatrixserverlib.Event) (bool, e if err != nil { return fmt.Errorf("u.d.InvitesTable.InsertInviteEvent: %w", err) } + + // Look up the NID of the invite event + nIDs, err := u.d.eventNIDs(u.ctx, u.txn, []string{event.EventID()}, false) + if err != nil { + return fmt.Errorf("u.d.EventNIDs: %w", err) + } + if u.membership != tables.MembershipStateInvite { - if inserted, err = u.d.MembershipTable.UpdateMembership(u.ctx, u.txn, u.roomNID, u.targetUserNID, senderUserNID, tables.MembershipStateInvite, 0, false); err != nil { + if inserted, err = u.d.MembershipTable.UpdateMembership(u.ctx, u.txn, u.roomNID, u.targetUserNID, senderUserNID, tables.MembershipStateInvite, nIDs[event.EventID()], false); err != nil { return fmt.Errorf("u.d.MembershipTable.UpdateMembership: %w", err) } } From 35ce551c8ffaef4d4bbfc5a4a8fd41a8b8ef2b27 Mon Sep 17 00:00:00 2001 From: Matt Holt Date: Wed, 20 Jul 2022 23:08:17 -0600 Subject: [PATCH 11/18] docs: Add build page; correct proxy info; fix Caddy example (#2579) * Add build page; correct proxy info; fix Caddy example * Improve Caddyfile example * Apply review comments; add polylith Caddyfile --- docs/CONTRIBUTING.md | 2 +- docs/caddy/monolith/CaddyFile | 68 ------------------- docs/caddy/monolith/Caddyfile | 57 ++++++++++++++++ docs/caddy/polylith/Caddyfile | 66 ++++++++++++++++++ ...ng_polylith.md => 10_starting_polylith.md} | 2 +- ...{10_optimisation.md => 11_optimisation.md} | 2 +- docs/installation/1_planning.md | 11 +-- docs/installation/2_domainname.md | 34 +++++++--- docs/installation/3_build.md | 38 +++++++++++ .../{3_database.md => 4_database.md} | 4 +- docs/installation/6_install_polylith.md | 5 +- docs/installation/7_configuration.md | 6 +- .../{4_signingkey.md => 8_signingkey.md} | 2 +- ...ing_monolith.md => 9_starting_monolith.md} | 5 +- 14 files changed, 209 insertions(+), 93 deletions(-) delete mode 100644 docs/caddy/monolith/CaddyFile create mode 100644 docs/caddy/monolith/Caddyfile create mode 100644 docs/caddy/polylith/Caddyfile rename docs/installation/{9_starting_polylith.md => 10_starting_polylith.md} (99%) rename docs/installation/{10_optimisation.md => 11_optimisation.md} (99%) create mode 100644 docs/installation/3_build.md rename docs/installation/{3_database.md => 4_database.md} (96%) rename docs/installation/{4_signingkey.md => 8_signingkey.md} (99%) rename docs/installation/{8_starting_monolith.md => 9_starting_monolith.md} (83%) diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 5a89e6841..169224b9e 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -24,7 +24,7 @@ Unfortunately we can't accept contributions without it. ## Getting up and running -See the [Installation](INSTALL.md) section for information on how to build an +See the [Installation](installation) section for information on how to build an instance of Dendrite. You will likely need this in order to test your changes. ## Code style diff --git a/docs/caddy/monolith/CaddyFile b/docs/caddy/monolith/CaddyFile deleted file mode 100644 index cd93f9e10..000000000 --- a/docs/caddy/monolith/CaddyFile +++ /dev/null @@ -1,68 +0,0 @@ -{ - # debug - admin off - email example@example.com - default_sni example.com - # Debug endpoint - # acme_ca https://acme-staging-v02.api.letsencrypt.org/directory -} - -####################################################################### -# Snippets -#______________________________________________________________________ - -(handle_errors_maintenance) { - handle_errors { - @maintenance expression {http.error.status_code} == 502 - rewrite @maintenance maintenance.html - root * "/path/to/service/pages" - file_server - } -} - -(matrix-well-known-header) { - # Headers - header Access-Control-Allow-Origin "*" - header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" - header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization" - header Content-Type "application/json" -} - -####################################################################### - -example.com { - - # ... - - handle /.well-known/matrix/server { - import matrix-well-known-header - respond `{ "m.server": "matrix.example.com:443" }` 200 - } - - handle /.well-known/matrix/client { - import matrix-well-known-header - respond `{ "m.homeserver": { "base_url": "https://matrix.example.com" } }` 200 - } - - import handle_errors_maintenance -} - -example.com:8448 { - # server<->server HTTPS traffic - reverse_proxy http://dendrite-host:8008 -} - -matrix.example.com { - - handle /_matrix/* { - # client<->server HTTPS traffic - reverse_proxy http://dendrite-host:8008 - } - - handle_path /* { - # Client webapp (Element SPA or ...) - file_server { - root /path/to/www/example.com/matrix-web-client/ - } - } -} diff --git a/docs/caddy/monolith/Caddyfile b/docs/caddy/monolith/Caddyfile new file mode 100644 index 000000000..82567c4a6 --- /dev/null +++ b/docs/caddy/monolith/Caddyfile @@ -0,0 +1,57 @@ +# Sample Caddyfile for using Caddy in front of Dendrite. +# +# Customize email address and domain names. +# Optional settings commented out. +# +# BE SURE YOUR DOMAINS ARE POINTED AT YOUR SERVER FIRST. +# Documentation: https://caddyserver.com/docs/ +# +# Bonus tip: If your IP address changes, use Caddy's +# dynamic DNS plugin to update your DNS records to +# point to your new IP automatically: +# https://github.com/mholt/caddy-dynamicdns +# + + +# Global options block +{ + # In case there is a problem with your certificates. + # email example@example.com + + # Turn off the admin endpoint if you don't need graceful config + # changes and/or are running untrusted code on your machine. + # admin off + + # Enable this if your clients don't send ServerName in TLS handshakes. + # default_sni example.com + + # Enable debug mode for verbose logging. + # debug + + # Use Let's Encrypt's staging endpoint for testing. + # acme_ca https://acme-staging-v02.api.letsencrypt.org/directory + + # If you're port-forwarding HTTP/HTTPS ports from 80/443 to something + # else, enable these and put the alternate port numbers here. + # http_port 8080 + # https_port 8443 +} + +# The server name of your matrix homeserver. This example shows +# "well-known delegation" from the registered domain to a subdomain, +# which is only needed if your server_name doesn't match your Matrix +# homeserver URL (i.e. you can show users a vanity domain that looks +# nice and is easy to remember but still have your Matrix server on +# its own subdomain or hosted service). +example.com { + header /.well-known/matrix/* Content-Type application/json + header /.well-known/matrix/* Access-Control-Allow-Origin * + respond /.well-known/matrix/server `{"m.server": "matrix.example.com:443"}` + respond /.well-known/matrix/client `{"m.homeserver": {"base_url": "https://matrix.example.com"}}` +} + +# The actual domain name whereby your Matrix server is accessed. +matrix.example.com { + # Set localhost:8008 to the address of your Dendrite server, if different + reverse_proxy /_matrix/* localhost:8008 +} diff --git a/docs/caddy/polylith/Caddyfile b/docs/caddy/polylith/Caddyfile new file mode 100644 index 000000000..244e50e7e --- /dev/null +++ b/docs/caddy/polylith/Caddyfile @@ -0,0 +1,66 @@ +# Sample Caddyfile for using Caddy in front of Dendrite. +# +# Customize email address and domain names. +# Optional settings commented out. +# +# BE SURE YOUR DOMAINS ARE POINTED AT YOUR SERVER FIRST. +# Documentation: https://caddyserver.com/docs/ +# +# Bonus tip: If your IP address changes, use Caddy's +# dynamic DNS plugin to update your DNS records to +# point to your new IP automatically: +# https://github.com/mholt/caddy-dynamicdns +# + + +# Global options block +{ + # In case there is a problem with your certificates. + # email example@example.com + + # Turn off the admin endpoint if you don't need graceful config + # changes and/or are running untrusted code on your machine. + # admin off + + # Enable this if your clients don't send ServerName in TLS handshakes. + # default_sni example.com + + # Enable debug mode for verbose logging. + # debug + + # Use Let's Encrypt's staging endpoint for testing. + # acme_ca https://acme-staging-v02.api.letsencrypt.org/directory + + # If you're port-forwarding HTTP/HTTPS ports from 80/443 to something + # else, enable these and put the alternate port numbers here. + # http_port 8080 + # https_port 8443 +} + +# The server name of your matrix homeserver. This example shows +# "well-known delegation" from the registered domain to a subdomain, +# which is only needed if your server_name doesn't match your Matrix +# homeserver URL (i.e. you can show users a vanity domain that looks +# nice and is easy to remember but still have your Matrix server on +# its own subdomain or hosted service). +example.com { + header /.well-known/matrix/* Content-Type application/json + header /.well-known/matrix/* Access-Control-Allow-Origin * + respond /.well-known/matrix/server `{"m.server": "matrix.example.com:443"}` + respond /.well-known/matrix/client `{"m.homeserver": {"base_url": "https://matrix.example.com"}}` +} + +# The actual domain name whereby your Matrix server is accessed. +matrix.example.com { + # Change the end of each reverse_proxy line to the correct + # address for your various services. + @sync_api { + path_regexp /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/messages)$ + } + reverse_proxy @sync_api sync_api:8073 + + reverse_proxy /_matrix/client* client_api:8071 + reverse_proxy /_matrix/federation* federation_api:8071 + reverse_proxy /_matrix/key* federation_api:8071 + reverse_proxy /_matrix/media* media_api:8071 +} diff --git a/docs/installation/9_starting_polylith.md b/docs/installation/10_starting_polylith.md similarity index 99% rename from docs/installation/9_starting_polylith.md rename to docs/installation/10_starting_polylith.md index 228e52e85..0c2e2af2b 100644 --- a/docs/installation/9_starting_polylith.md +++ b/docs/installation/10_starting_polylith.md @@ -2,7 +2,7 @@ title: Starting the polylith parent: Installation has_toc: true -nav_order: 9 +nav_order: 10 permalink: /installation/start/polylith --- diff --git a/docs/installation/10_optimisation.md b/docs/installation/11_optimisation.md similarity index 99% rename from docs/installation/10_optimisation.md rename to docs/installation/11_optimisation.md index c19b7a75e..f2f67c947 100644 --- a/docs/installation/10_optimisation.md +++ b/docs/installation/11_optimisation.md @@ -2,7 +2,7 @@ title: Optimise your installation parent: Installation has_toc: true -nav_order: 10 +nav_order: 11 permalink: /installation/start/optimisation --- diff --git a/docs/installation/1_planning.md b/docs/installation/1_planning.md index d4f3d7052..3aa5b4d85 100644 --- a/docs/installation/1_planning.md +++ b/docs/installation/1_planning.md @@ -95,12 +95,13 @@ enabled. To do so, follow the [NATS Server installation instructions](https://docs.nats.io/running-a-nats-service/introduction/installation) and then [start your NATS deployment](https://docs.nats.io/running-a-nats-service/introduction/running). JetStream must be enabled, either by passing the `-js` flag to `nats-server`, or by specifying the `store_dir` option in the the `jetstream` configuration. -### Reverse proxy (polylith deployments) +### Reverse proxy -Polylith deployments require a reverse proxy, such as [NGINX](https://www.nginx.com) or -[HAProxy](http://www.haproxy.org). Configuring those is not covered in this documentation, -although a [sample configuration for NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/polylith-sample.conf) -is provided. +A reverse proxy such as [Caddy](https://caddyserver.com), [NGINX](https://www.nginx.com) or +[HAProxy](http://www.haproxy.org) is required for polylith deployments and is useful for monolith +deployments. Configuring those is not covered in this documentation, although sample configurations +for [Caddy](https://github.com/matrix-org/dendrite/blob/main/docs/caddy) and +[NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx) are provided. ### Windows diff --git a/docs/installation/2_domainname.md b/docs/installation/2_domainname.md index 54064acb4..7d7fc86bd 100644 --- a/docs/installation/2_domainname.md +++ b/docs/installation/2_domainname.md @@ -29,15 +29,23 @@ The exact details of how server name resolution works can be found in ## TLS certificates Matrix federation requires that valid TLS certificates are present on the domain. You must -obtain certificates from a publicly accepted Certificate Authority (CA). [LetsEncrypt](https://letsencrypt.org) -is an example of such a CA that can be used. Self-signed certificates are not suitable for -federation and will typically not be accepted by other homeservers. +obtain certificates from a publicly-trusted certificate authority (CA). [Let's Encrypt](https://letsencrypt.org) +is a popular choice of CA because the certificates are publicly-trusted, free, and automated +via the ACME protocol. (Self-signed certificates are not suitable for federation and will typically +not be accepted by other homeservers.) -A common practice to help ease the management of certificates is to install a reverse proxy in -front of Dendrite which manages the TLS certificates and HTTPS proxying itself. Software such as -[NGINX](https://www.nginx.com) and [HAProxy](http://www.haproxy.org) can be used for the task. -Although the finer details of configuring these are not described here, you must reverse proxy -all `/_matrix` paths to your Dendrite server. +Automating the renewal of TLS certificates is best practice. There are many tools for this, +but the simplest way to achieve TLS automation is to have your reverse proxy do it for you. +[Caddy](https://caddyserver.com) is recommended as a production-grade reverse proxy with +automatic TLS which is commonly used in front of Dendrite. It obtains and renews TLS certificates +automatically and by default as long as your domain name is pointed at your server first. +Although the finer details of [configuring Caddy](https://caddyserver.com/docs/) is not described +here, in general, you must reverse proxy all `/_matrix` paths to your Dendrite server. For example, +with Caddy: + +``` +reverse_proxy /_matrix/* localhost:8008 +``` It is possible for the reverse proxy to listen on the standard HTTPS port TCP/443 so long as your domain delegation is configured to point to port TCP/443. @@ -76,6 +84,16 @@ and contain the following JSON document: } ``` +For example, this can be done with the following Caddy config: + +``` +handle /.well-known/matrix/client { + header Content-Type application/json + header Access-Control-Allow-Origin * + respond `{"m.homeserver": {"base_url": "https://matrix.example.com:8448"}}` +} +``` + You can also serve `.well-known` with Dendrite itself by setting the `well_known_server_name` config option to the value you want for `m.server`. This is primarily useful if Dendrite is exposed on `example.com:443` and you don't want to set up a separate webserver just for serving the `.well-known` diff --git a/docs/installation/3_build.md b/docs/installation/3_build.md new file mode 100644 index 000000000..aed2080db --- /dev/null +++ b/docs/installation/3_build.md @@ -0,0 +1,38 @@ +--- +title: Building Dendrite +parent: Installation +has_toc: true +nav_order: 3 +permalink: /installation/build +--- + +# Build all Dendrite commands + +Dendrite has numerous utility commands in addition to the actual server binaries. +Build them all from the root of the source repo with `build.sh` (Linux/Mac): + +```sh +./build.sh +``` + +or `build.cmd` (Windows): + +```powershell +build.cmd +``` + +The resulting binaries will be placed in the `bin` subfolder. + +# Installing as a monolith + +You can install the Dendrite monolith binary into `$GOPATH/bin` by using `go install`: + +```sh +go install ./cmd/dendrite-monolith-server +``` + +Alternatively, you can specify a custom path for the binary to be written to using `go build`: + +```sh +go build -o /usr/local/bin/ ./cmd/dendrite-monolith-server +``` diff --git a/docs/installation/3_database.md b/docs/installation/4_database.md similarity index 96% rename from docs/installation/3_database.md rename to docs/installation/4_database.md index f64fe9150..f6222a8d2 100644 --- a/docs/installation/3_database.md +++ b/docs/installation/4_database.md @@ -17,7 +17,9 @@ filenames in the Dendrite configuration file and start Dendrite. The databases w and populated automatically. Note that Dendrite **cannot share a single SQLite database across multiple components**. Each -component must be configured with its own SQLite database filename. +component must be configured with its own SQLite database filename. You will have to remove +the `global.database` section from your Dendrite config and add it to each individual section +instead in order to use SQLite. ### Connection strings diff --git a/docs/installation/6_install_polylith.md b/docs/installation/6_install_polylith.md index 375512f8f..ec4a77628 100644 --- a/docs/installation/6_install_polylith.md +++ b/docs/installation/6_install_polylith.md @@ -29,5 +29,6 @@ Polylith deployments require a reverse proxy in order to ensure that requests ar sent to the correct endpoint. You must ensure that a suitable reverse proxy is installed and configured. -A [sample configuration file](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/polylith-sample.conf) -is provided for [NGINX](https://www.nginx.com). +Sample configurations are provided +for [Caddy](https://github.com/matrix-org/dendrite/blob/main/docs/caddy/polylith/Caddyfile) +and [NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/polylith-sample.conf). \ No newline at end of file diff --git a/docs/installation/7_configuration.md b/docs/installation/7_configuration.md index e676afbe6..b1c747414 100644 --- a/docs/installation/7_configuration.md +++ b/docs/installation/7_configuration.md @@ -1,13 +1,13 @@ --- -title: Populate the configuration +title: Configuring Dendrite parent: Installation nav_order: 7 permalink: /installation/configuration --- -# Populate the configuration +# Configuring Dendrite -The configuration file is used to configure Dendrite. Sample configuration files are +A YAML configuration file is used to configure Dendrite. Sample configuration files are present in the top level of the Dendrite repository: * [`dendrite-sample.monolith.yaml`](https://github.com/matrix-org/dendrite/blob/main/dendrite-sample.monolith.yaml) diff --git a/docs/installation/4_signingkey.md b/docs/installation/8_signingkey.md similarity index 99% rename from docs/installation/4_signingkey.md rename to docs/installation/8_signingkey.md index 07dc485ff..323759a88 100644 --- a/docs/installation/4_signingkey.md +++ b/docs/installation/8_signingkey.md @@ -1,7 +1,7 @@ --- title: Generating signing keys parent: Installation -nav_order: 4 +nav_order: 8 permalink: /installation/signingkeys --- diff --git a/docs/installation/8_starting_monolith.md b/docs/installation/9_starting_monolith.md similarity index 83% rename from docs/installation/8_starting_monolith.md rename to docs/installation/9_starting_monolith.md index e0e7309d2..124477e73 100644 --- a/docs/installation/8_starting_monolith.md +++ b/docs/installation/9_starting_monolith.md @@ -15,8 +15,9 @@ you can start your Dendrite monolith deployment by starting the `dendrite-monoli ./dendrite-monolith-server -config /path/to/dendrite.yaml ``` -If you want to change the addresses or ports that Dendrite listens on, you -can use the `-http-bind-address` and `-https-bind-address` command line arguments: +By default, Dendrite will listen HTTP on port 8008. If you want to change the addresses +or ports that Dendrite listens on, you can use the `-http-bind-address` and +`-https-bind-address` command line arguments: ```bash ./dendrite-monolith-server -config /path/to/dendrite.yaml \ From a201b4400dd4fe99b4c391f94ac31884deccc12c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 22 Jul 2022 10:36:10 +0100 Subject: [PATCH 12/18] Bump tzinfo from 1.2.9 to 1.2.10 in /docs (#2584) Bumps [tzinfo](https://github.com/tzinfo/tzinfo) from 1.2.9 to 1.2.10. - [Release notes](https://github.com/tzinfo/tzinfo/releases) - [Changelog](https://github.com/tzinfo/tzinfo/blob/master/CHANGES.md) - [Commits](https://github.com/tzinfo/tzinfo/compare/v1.2.9...v1.2.10) --- updated-dependencies: - dependency-name: tzinfo dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/Gemfile.lock | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock index e62aa4ce3..88d33ae84 100644 --- a/docs/Gemfile.lock +++ b/docs/Gemfile.lock @@ -233,6 +233,8 @@ GEM multipart-post (2.1.1) nokogiri (1.13.6-arm64-darwin) racc (~> 1.4) + nokogiri (1.13.6-x86_64-linux) + racc (~> 1.4) octokit (4.22.0) faraday (>= 0.9) sawyer (~> 0.8.0, >= 0.5.3) @@ -263,7 +265,7 @@ GEM thread_safe (0.3.6) typhoeus (1.4.0) ethon (>= 0.9.0) - tzinfo (1.2.9) + tzinfo (1.2.10) thread_safe (~> 0.1) unf (0.1.4) unf_ext @@ -273,11 +275,11 @@ GEM PLATFORMS arm64-darwin-21 + x86_64-linux DEPENDENCIES github-pages (~> 226) jekyll-feed (~> 0.15.1) - minima (~> 2.5.1) BUNDLED WITH 2.3.7 From f0c8a03649ab270bb15156a3cc47cae2aee3f36a Mon Sep 17 00:00:00 2001 From: Neil Alexander Date: Fri, 22 Jul 2022 14:44:04 +0100 Subject: [PATCH 13/18] Membership updater refactoring (#2541) * Membership updater refactoring * Pass in membership state * Use membership check rather than referring to state directly * Delete irrelevant membership states * We don't need the leave event after all * Tweaks * Put a log entry in that I might stand a chance of finding * Be less panicky * Tweak invite handling * Don't freak if we can't find the event NID * Use event NID from `types.Event` * Clean up * Better invite handling * Placate the almighty linter * Blacklist a Sytest which is otherwise fine under Complement for reasons I don't understand * Fix the sytest after all (thanks @S7evinK for the spot) --- federationapi/routing/invite.go | 2 - roomserver/internal/helpers/helpers.go | 27 +++- roomserver/internal/input/input_membership.go | 117 +++++--------- roomserver/internal/perform/perform_invite.go | 70 +++++---- roomserver/internal/perform/perform_join.go | 20 ++- roomserver/internal/perform/perform_leave.go | 8 +- .../storage/postgres/membership_table.go | 15 ++ .../storage/shared/membership_updater.go | 143 ++++-------------- .../storage/sqlite3/membership_table.go | 15 ++ roomserver/storage/tables/interface.go | 1 + syncapi/consumers/roomserver.go | 8 +- 11 files changed, 181 insertions(+), 245 deletions(-) diff --git a/federationapi/routing/invite.go b/federationapi/routing/invite.go index cde87a0ac..4b795018c 100644 --- a/federationapi/routing/invite.go +++ b/federationapi/routing/invite.go @@ -26,7 +26,6 @@ import ( "github.com/matrix-org/dendrite/setup/config" "github.com/matrix-org/gomatrixserverlib" "github.com/matrix-org/util" - "github.com/sirupsen/logrus" ) // InviteV2 implements /_matrix/federation/v2/invite/{roomID}/{eventID} @@ -144,7 +143,6 @@ func processInvite( // Check that the event is signed by the server sending the request. redacted, err := gomatrixserverlib.RedactEventJSON(event.JSON(), event.Version()) if err != nil { - logrus.WithError(err).Errorf("XXX: invite.go") return util.JSONResponse{ Code: http.StatusBadRequest, JSON: jsonerror.BadJSON("The event JSON could not be redacted"), diff --git a/roomserver/internal/helpers/helpers.go b/roomserver/internal/helpers/helpers.go index e67bbfcaa..2653027e9 100644 --- a/roomserver/internal/helpers/helpers.go +++ b/roomserver/internal/helpers/helpers.go @@ -12,6 +12,7 @@ import ( "github.com/matrix-org/dendrite/roomserver/state" "github.com/matrix-org/dendrite/roomserver/storage" "github.com/matrix-org/dendrite/roomserver/storage/shared" + "github.com/matrix-org/dendrite/roomserver/storage/tables" "github.com/matrix-org/dendrite/roomserver/types" "github.com/matrix-org/gomatrixserverlib" "github.com/matrix-org/util" @@ -21,14 +22,14 @@ import ( // Move these to a more sensible place. func UpdateToInviteMembership( - mu *shared.MembershipUpdater, add *gomatrixserverlib.Event, updates []api.OutputEvent, + mu *shared.MembershipUpdater, add *types.Event, updates []api.OutputEvent, roomVersion gomatrixserverlib.RoomVersion, ) ([]api.OutputEvent, error) { // We may have already sent the invite to the user, either because we are // reprocessing this event, or because the we received this invite from a // remote server via the federation invite API. In those cases we don't need // to send the event. - needsSending, err := mu.SetToInvite(add) + needsSending, retired, err := mu.Update(tables.MembershipStateInvite, add) if err != nil { return nil, err } @@ -38,13 +39,23 @@ func UpdateToInviteMembership( // room event stream. This ensures that the consumers only have to // consider a single stream of events when determining whether a user // is invited, rather than having to combine multiple streams themselves. - onie := api.OutputNewInviteEvent{ - Event: add.Headered(roomVersion), - RoomVersion: roomVersion, - } updates = append(updates, api.OutputEvent{ - Type: api.OutputTypeNewInviteEvent, - NewInviteEvent: &onie, + Type: api.OutputTypeNewInviteEvent, + NewInviteEvent: &api.OutputNewInviteEvent{ + Event: add.Headered(roomVersion), + RoomVersion: roomVersion, + }, + }) + } + for _, eventID := range retired { + updates = append(updates, api.OutputEvent{ + Type: api.OutputTypeRetireInviteEvent, + RetireInviteEvent: &api.OutputRetireInviteEvent{ + EventID: eventID, + Membership: gomatrixserverlib.Join, + RetiredByEventID: add.EventID(), + TargetUserID: *add.StateKey(), + }, }) } return updates, nil diff --git a/roomserver/internal/input/input_membership.go b/roomserver/internal/input/input_membership.go index 3ce8791a3..28a54623b 100644 --- a/roomserver/internal/input/input_membership.go +++ b/roomserver/internal/input/input_membership.go @@ -21,6 +21,7 @@ import ( "github.com/matrix-org/dendrite/roomserver/api" "github.com/matrix-org/dendrite/roomserver/internal/helpers" "github.com/matrix-org/dendrite/roomserver/storage/shared" + "github.com/matrix-org/dendrite/roomserver/storage/tables" "github.com/matrix-org/dendrite/roomserver/types" "github.com/matrix-org/gomatrixserverlib" "github.com/opentracing/opentracing-go" @@ -60,20 +61,14 @@ func (r *Inputer) updateMemberships( var updates []api.OutputEvent for _, change := range changes { - var ae *gomatrixserverlib.Event - var re *gomatrixserverlib.Event + var ae *types.Event + var re *types.Event targetUserNID := change.EventStateKeyNID if change.removedEventNID != 0 { - ev, _ := helpers.EventMap(events).Lookup(change.removedEventNID) - if ev != nil { - re = ev.Event - } + re, _ = helpers.EventMap(events).Lookup(change.removedEventNID) } if change.addedEventNID != 0 { - ev, _ := helpers.EventMap(events).Lookup(change.addedEventNID) - if ev != nil { - ae = ev.Event - } + ae, _ = helpers.EventMap(events).Lookup(change.addedEventNID) } if updates, err = r.updateMembership(updater, targetUserNID, re, ae, updates); err != nil { return nil, err @@ -85,30 +80,27 @@ func (r *Inputer) updateMemberships( func (r *Inputer) updateMembership( updater *shared.RoomUpdater, targetUserNID types.EventStateKeyNID, - remove, add *gomatrixserverlib.Event, + remove, add *types.Event, updates []api.OutputEvent, ) ([]api.OutputEvent, error) { var err error // Default the membership to Leave if no event was added or removed. - oldMembership := gomatrixserverlib.Leave newMembership := gomatrixserverlib.Leave - - if remove != nil { - oldMembership, err = remove.Membership() - if err != nil { - return nil, err - } - } if add != nil { newMembership, err = add.Membership() if err != nil { return nil, err } } - if oldMembership == newMembership && newMembership != gomatrixserverlib.Join { - // If the membership is the same then nothing changed and we can return - // immediately, unless it's a Join update (e.g. profile update). - return updates, nil + + var targetLocal bool + if add != nil { + targetLocal = r.isLocalTarget(add) + } + + mu, err := updater.MembershipUpdater(targetUserNID, targetLocal) + if err != nil { + return nil, err } // In an ideal world, we shouldn't ever have "add" be nil and "remove" be @@ -120,17 +112,10 @@ func (r *Inputer) updateMembership( // after a state reset, often thinking that the user was still joined to // the room even though the room state said otherwise, and this would prevent // the user from being able to attempt to rejoin the room without modifying - // the database. So instead what we'll do is we'll just update the membership - // table to say that the user is "leave" and we'll use the old event to - // avoid nil pointer exceptions on the code path that follows. - if add == nil { - add = remove - newMembership = gomatrixserverlib.Leave - } - - mu, err := updater.MembershipUpdater(targetUserNID, r.isLocalTarget(add)) - if err != nil { - return nil, err + // the database. So instead we're going to remove the membership from the + // database altogether, so that it doesn't create future problems. + if add == nil && remove != nil { + return nil, mu.Delete() } switch newMembership { @@ -149,7 +134,7 @@ func (r *Inputer) updateMembership( } } -func (r *Inputer) isLocalTarget(event *gomatrixserverlib.Event) bool { +func (r *Inputer) isLocalTarget(event *types.Event) bool { isTargetLocalUser := false if statekey := event.StateKey(); statekey != nil { _, domain, _ := gomatrixserverlib.SplitID('@', *statekey) @@ -159,81 +144,61 @@ func (r *Inputer) isLocalTarget(event *gomatrixserverlib.Event) bool { } func updateToJoinMembership( - mu *shared.MembershipUpdater, add *gomatrixserverlib.Event, updates []api.OutputEvent, + mu *shared.MembershipUpdater, add *types.Event, updates []api.OutputEvent, ) ([]api.OutputEvent, error) { - // If the user is already marked as being joined, we call SetToJoin to update - // the event ID then we can return immediately. Retired is ignored as there - // is no invite event to retire. - if mu.IsJoin() { - _, err := mu.SetToJoin(add.Sender(), add.EventID(), true) - if err != nil { - return nil, err - } - return updates, nil - } // When we mark a user as being joined we will invalidate any invites that // are active for that user. We notify the consumers that the invites have // been retired using a special event, even though they could infer this // by studying the state changes in the room event stream. - retired, err := mu.SetToJoin(add.Sender(), add.EventID(), false) + _, retired, err := mu.Update(tables.MembershipStateJoin, add) if err != nil { return nil, err } for _, eventID := range retired { - orie := api.OutputRetireInviteEvent{ - EventID: eventID, - Membership: gomatrixserverlib.Join, - RetiredByEventID: add.EventID(), - TargetUserID: *add.StateKey(), - } updates = append(updates, api.OutputEvent{ - Type: api.OutputTypeRetireInviteEvent, - RetireInviteEvent: &orie, + Type: api.OutputTypeRetireInviteEvent, + RetireInviteEvent: &api.OutputRetireInviteEvent{ + EventID: eventID, + Membership: gomatrixserverlib.Join, + RetiredByEventID: add.EventID(), + TargetUserID: *add.StateKey(), + }, }) } return updates, nil } func updateToLeaveMembership( - mu *shared.MembershipUpdater, add *gomatrixserverlib.Event, + mu *shared.MembershipUpdater, add *types.Event, newMembership string, updates []api.OutputEvent, ) ([]api.OutputEvent, error) { - // If the user is already neither joined, nor invited to the room then we - // can return immediately. - if mu.IsLeave() { - return updates, nil - } // When we mark a user as having left we will invalidate any invites that // are active for that user. We notify the consumers that the invites have // been retired using a special event, even though they could infer this // by studying the state changes in the room event stream. - retired, err := mu.SetToLeave(add.Sender(), add.EventID()) + _, retired, err := mu.Update(tables.MembershipStateLeaveOrBan, add) if err != nil { return nil, err } for _, eventID := range retired { - orie := api.OutputRetireInviteEvent{ - EventID: eventID, - Membership: newMembership, - RetiredByEventID: add.EventID(), - TargetUserID: *add.StateKey(), - } updates = append(updates, api.OutputEvent{ - Type: api.OutputTypeRetireInviteEvent, - RetireInviteEvent: &orie, + Type: api.OutputTypeRetireInviteEvent, + RetireInviteEvent: &api.OutputRetireInviteEvent{ + EventID: eventID, + Membership: newMembership, + RetiredByEventID: add.EventID(), + TargetUserID: *add.StateKey(), + }, }) } return updates, nil } func updateToKnockMembership( - mu *shared.MembershipUpdater, add *gomatrixserverlib.Event, updates []api.OutputEvent, + mu *shared.MembershipUpdater, add *types.Event, updates []api.OutputEvent, ) ([]api.OutputEvent, error) { - if mu.IsLeave() { - _, err := mu.SetToKnock(add) - if err != nil { - return nil, err - } + if _, _, err := mu.Update(tables.MembershipStateKnock, add); err != nil { + return nil, err } return updates, nil } diff --git a/roomserver/internal/perform/perform_invite.go b/roomserver/internal/perform/perform_invite.go index 644c954b6..fce529305 100644 --- a/roomserver/internal/perform/perform_invite.go +++ b/roomserver/internal/perform/perform_invite.go @@ -39,11 +39,13 @@ type Inviter struct { Inputer *input.Inputer } +// nolint:gocyclo func (r *Inviter) PerformInvite( ctx context.Context, req *api.PerformInviteRequest, res *api.PerformInviteResponse, ) ([]api.OutputEvent, error) { + var outputUpdates []api.OutputEvent event := req.Event if event.StateKey() == nil { return nil, fmt.Errorf("invite must be a state event") @@ -66,6 +68,13 @@ func (r *Inviter) PerformInvite( } isTargetLocal := domain == r.Cfg.Matrix.ServerName isOriginLocal := event.Origin() == r.Cfg.Matrix.ServerName + if !isOriginLocal && !isTargetLocal { + res.Error = &api.PerformError{ + Code: api.PerformErrorBadRequest, + Msg: "The invite must be either from or to a local user", + } + return nil, nil + } logger := util.GetLogger(ctx).WithFields(map[string]interface{}{ "inviter": event.Sender(), @@ -97,6 +106,34 @@ func (r *Inviter) PerformInvite( } } + updateMembershipTableManually := func() ([]api.OutputEvent, error) { + var updater *shared.MembershipUpdater + if updater, err = r.DB.MembershipUpdater(ctx, roomID, targetUserID, isTargetLocal, req.RoomVersion); err != nil { + return nil, fmt.Errorf("r.DB.MembershipUpdater: %w", err) + } + outputUpdates, err = helpers.UpdateToInviteMembership(updater, &types.Event{ + EventNID: 0, + Event: event.Unwrap(), + }, outputUpdates, req.Event.RoomVersion) + if err != nil { + return nil, fmt.Errorf("updateToInviteMembership: %w", err) + } + if err = updater.Commit(); err != nil { + return nil, fmt.Errorf("updater.Commit: %w", err) + } + logger.Debugf("updated membership to invite and sending invite OutputEvent") + return outputUpdates, nil + } + + if (info == nil || info.IsStub) && !isOriginLocal && isTargetLocal { + // The invite came in over federation for a room that we don't know about + // yet. We need to handle this a bit differently to most invites because + // we don't know the room state, therefore the roomserver can't process + // an input event. Instead we will update the membership table with the + // new invite and generate an output event. + return updateMembershipTableManually() + } + var isAlreadyJoined bool if info != nil { _, isAlreadyJoined, _, err = r.DB.GetMembership(ctx, info.RoomNID, *event.StateKey()) @@ -140,31 +177,13 @@ func (r *Inviter) PerformInvite( return nil, nil } + // If the invite originated remotely then we can't send an + // InputRoomEvent for the invite as it will never pass auth checks + // due to lacking room state, but we still need to tell the client + // about the invite so we can accept it, hence we return an output + // event to send to the Sync API. if !isOriginLocal { - // The invite originated over federation. Process the membership - // update, which will notify the sync API etc about the incoming - // invite. We do NOT send an InputRoomEvent for the invite as it - // will never pass auth checks due to lacking room state, but we - // still need to tell the client about the invite so we can accept - // it, hence we return an output event to send to the sync api. - var updater *shared.MembershipUpdater - updater, err = r.DB.MembershipUpdater(ctx, roomID, targetUserID, isTargetLocal, req.RoomVersion) - if err != nil { - return nil, fmt.Errorf("r.DB.MembershipUpdater: %w", err) - } - - unwrapped := event.Unwrap() - var outputUpdates []api.OutputEvent - outputUpdates, err = helpers.UpdateToInviteMembership(updater, unwrapped, nil, req.Event.RoomVersion) - if err != nil { - return nil, fmt.Errorf("updateToInviteMembership: %w", err) - } - - if err = updater.Commit(); err != nil { - return nil, fmt.Errorf("updater.Commit: %w", err) - } - logger.Debugf("updated membership to invite and sending invite OutputEvent") - return outputUpdates, nil + return updateMembershipTableManually() } // The invite originated locally. Therefore we have a responsibility to @@ -229,12 +248,11 @@ func (r *Inviter) PerformInvite( Code: api.PerformErrorNotAllowed, } logger.WithError(err).WithField("event_id", event.EventID()).Error("r.InputRoomEvents failed") - return nil, nil } // Don't notify the sync api of this event in the same way as a federated invite so the invitee // gets the invite, as the roomserver will do this when it processes the m.room.member invite. - return nil, nil + return outputUpdates, nil } func buildInviteStrippedState( diff --git a/roomserver/internal/perform/perform_join.go b/roomserver/internal/perform/perform_join.go index c9e839198..1445b4088 100644 --- a/roomserver/internal/perform/perform_join.go +++ b/roomserver/internal/perform/perform_join.go @@ -268,21 +268,19 @@ func (r *Joiner) performJoinRoomByID( case nil: // The room join is local. Send the new join event into the // roomserver. First of all check that the user isn't already - // a member of the room. - alreadyJoined := false - for _, se := range buildRes.StateEvents { - if !se.StateKeyEquals(userID) { - continue - } - if membership, merr := se.Membership(); merr == nil { - alreadyJoined = (membership == gomatrixserverlib.Join) - break - } + // a member of the room. This is best-effort (as in we won't + // fail if we can't find the existing membership) because there + // is really no harm in just sending another membership event. + membershipReq := &api.QueryMembershipForUserRequest{ + RoomID: req.RoomIDOrAlias, + UserID: userID, } + membershipRes := &api.QueryMembershipForUserResponse{} + _ = r.Queryer.QueryMembershipForUser(ctx, membershipReq, membershipRes) // If we haven't already joined the room then send an event // into the room changing our membership status. - if !alreadyJoined { + if !membershipRes.RoomExists || !membershipRes.IsInRoom { inputReq := rsAPI.InputRoomEventsRequest{ InputRoomEvents: []rsAPI.InputRoomEvent{ { diff --git a/roomserver/internal/perform/perform_leave.go b/roomserver/internal/perform/perform_leave.go index c5b62ac00..56e7240d0 100644 --- a/roomserver/internal/perform/perform_leave.go +++ b/roomserver/internal/perform/perform_leave.go @@ -228,14 +228,14 @@ func (r *Leaver) performFederatedRejectInvite( util.GetLogger(ctx).WithError(err).Errorf("failed to get MembershipUpdater, still retiring invite event") } if updater != nil { - if _, err = updater.SetToLeave(req.UserID, eventID); err != nil { - util.GetLogger(ctx).WithError(err).Errorf("failed to set membership to leave, still retiring invite event") + if err = updater.Delete(); err != nil { + util.GetLogger(ctx).WithError(err).Errorf("failed to delete membership, still retiring invite event") if err = updater.Rollback(); err != nil { - util.GetLogger(ctx).WithError(err).Errorf("failed to rollback membership leave, still retiring invite event") + util.GetLogger(ctx).WithError(err).Errorf("failed to rollback deleting membership, still retiring invite event") } } else { if err = updater.Commit(); err != nil { - util.GetLogger(ctx).WithError(err).Errorf("failed to commit membership update, still retiring invite event") + util.GetLogger(ctx).WithError(err).Errorf("failed to commit deleting membership, still retiring invite event") } } } diff --git a/roomserver/storage/postgres/membership_table.go b/roomserver/storage/postgres/membership_table.go index ce626ad1d..ee457393a 100644 --- a/roomserver/storage/postgres/membership_table.go +++ b/roomserver/storage/postgres/membership_table.go @@ -118,6 +118,9 @@ const updateMembershipForgetRoom = "" + "UPDATE roomserver_membership SET forgotten = $3" + " WHERE room_nid = $1 AND target_nid = $2" +const deleteMembershipSQL = "" + + "DELETE FROM roomserver_membership WHERE room_nid = $1 AND target_nid = $2" + const selectRoomsWithMembershipSQL = "" + "SELECT room_nid FROM roomserver_membership WHERE membership_nid = $1 AND target_nid = $2 and forgotten = false" @@ -165,6 +168,7 @@ type membershipStatements struct { updateMembershipForgetRoomStmt *sql.Stmt selectLocalServerInRoomStmt *sql.Stmt selectServerInRoomStmt *sql.Stmt + deleteMembershipStmt *sql.Stmt } func CreateMembershipTable(db *sql.DB) error { @@ -191,6 +195,7 @@ func PrepareMembershipTable(db *sql.DB) (tables.Membership, error) { {&s.updateMembershipForgetRoomStmt, updateMembershipForgetRoom}, {&s.selectLocalServerInRoomStmt, selectLocalServerInRoomSQL}, {&s.selectServerInRoomStmt, selectServerInRoomSQL}, + {&s.deleteMembershipStmt, deleteMembershipSQL}, }.Prepare(db) } @@ -412,3 +417,13 @@ func (s *membershipStatements) SelectServerInRoom( } return roomNID == nid, nil } + +func (s *membershipStatements) DeleteMembership( + ctx context.Context, txn *sql.Tx, + roomNID types.RoomNID, targetUserNID types.EventStateKeyNID, +) error { + _, err := sqlutil.TxStmt(txn, s.deleteMembershipStmt).ExecContext( + ctx, roomNID, targetUserNID, + ) + return err +} diff --git a/roomserver/storage/shared/membership_updater.go b/roomserver/storage/shared/membership_updater.go index c79ca8995..07fb697f9 100644 --- a/roomserver/storage/shared/membership_updater.go +++ b/roomserver/storage/shared/membership_updater.go @@ -15,7 +15,7 @@ type MembershipUpdater struct { d *Database roomNID types.RoomNID targetUserNID types.EventStateKeyNID - membership tables.MembershipState + oldMembership tables.MembershipState } func NewMembershipUpdater( @@ -30,7 +30,6 @@ func NewMembershipUpdater( if err != nil { return err } - targetUserNID, err = d.assignStateKeyNID(ctx, targetUserID) if err != nil { return err @@ -73,146 +72,62 @@ func (d *Database) membershipUpdaterTxn( // IsInvite implements types.MembershipUpdater func (u *MembershipUpdater) IsInvite() bool { - return u.membership == tables.MembershipStateInvite + return u.oldMembership == tables.MembershipStateInvite } // IsJoin implements types.MembershipUpdater func (u *MembershipUpdater) IsJoin() bool { - return u.membership == tables.MembershipStateJoin + return u.oldMembership == tables.MembershipStateJoin } // IsLeave implements types.MembershipUpdater func (u *MembershipUpdater) IsLeave() bool { - return u.membership == tables.MembershipStateLeaveOrBan + return u.oldMembership == tables.MembershipStateLeaveOrBan } // IsKnock implements types.MembershipUpdater func (u *MembershipUpdater) IsKnock() bool { - return u.membership == tables.MembershipStateKnock + return u.oldMembership == tables.MembershipStateKnock } -// SetToInvite implements types.MembershipUpdater -func (u *MembershipUpdater) SetToInvite(event *gomatrixserverlib.Event) (bool, error) { - var inserted bool - err := u.d.Writer.Do(u.d.DB, u.txn, func(txn *sql.Tx) error { +func (u *MembershipUpdater) Delete() error { + if _, err := u.d.InvitesTable.UpdateInviteRetired(u.ctx, u.txn, u.roomNID, u.targetUserNID); err != nil { + return err + } + return u.d.MembershipTable.DeleteMembership(u.ctx, u.txn, u.roomNID, u.targetUserNID) +} + +func (u *MembershipUpdater) Update(newMembership tables.MembershipState, event *types.Event) (bool, []string, error) { + var inserted bool // Did the query result in a membership change? + var retired []string // Did we retire any updates in the process? + return inserted, retired, u.d.Writer.Do(u.d.DB, u.txn, func(txn *sql.Tx) error { senderUserNID, err := u.d.assignStateKeyNID(u.ctx, event.Sender()) if err != nil { return fmt.Errorf("u.d.AssignStateKeyNID: %w", err) } - inserted, err = u.d.InvitesTable.InsertInviteEvent( - u.ctx, u.txn, event.EventID(), u.roomNID, u.targetUserNID, senderUserNID, event.JSON(), - ) + inserted, err = u.d.MembershipTable.UpdateMembership(u.ctx, u.txn, u.roomNID, u.targetUserNID, senderUserNID, newMembership, event.EventNID, false) if err != nil { - return fmt.Errorf("u.d.InvitesTable.InsertInviteEvent: %w", err) + return fmt.Errorf("u.d.MembershipTable.UpdateMembership: %w", err) } - - // Look up the NID of the invite event - nIDs, err := u.d.eventNIDs(u.ctx, u.txn, []string{event.EventID()}, false) - if err != nil { - return fmt.Errorf("u.d.EventNIDs: %w", err) + if !inserted { + return nil } - - if u.membership != tables.MembershipStateInvite { - if inserted, err = u.d.MembershipTable.UpdateMembership(u.ctx, u.txn, u.roomNID, u.targetUserNID, senderUserNID, tables.MembershipStateInvite, nIDs[event.EventID()], false); err != nil { - return fmt.Errorf("u.d.MembershipTable.UpdateMembership: %w", err) + switch { + case u.oldMembership != tables.MembershipStateInvite && newMembership == tables.MembershipStateInvite: + inserted, err = u.d.InvitesTable.InsertInviteEvent( + u.ctx, u.txn, event.EventID(), u.roomNID, u.targetUserNID, senderUserNID, event.JSON(), + ) + if err != nil { + return fmt.Errorf("u.d.InvitesTable.InsertInviteEvent: %w", err) } - } - return nil - }) - return inserted, err -} - -// SetToJoin implements types.MembershipUpdater -func (u *MembershipUpdater) SetToJoin(senderUserID string, eventID string, isUpdate bool) ([]string, error) { - var inviteEventIDs []string - - err := u.d.Writer.Do(u.d.DB, u.txn, func(txn *sql.Tx) error { - senderUserNID, err := u.d.assignStateKeyNID(u.ctx, senderUserID) - if err != nil { - return fmt.Errorf("u.d.AssignStateKeyNID: %w", err) - } - - // If this is a join event update, there is no invite to update - if !isUpdate { - inviteEventIDs, err = u.d.InvitesTable.UpdateInviteRetired( + case u.oldMembership == tables.MembershipStateInvite && newMembership != tables.MembershipStateInvite: + retired, err = u.d.InvitesTable.UpdateInviteRetired( u.ctx, u.txn, u.roomNID, u.targetUserNID, ) if err != nil { return fmt.Errorf("u.d.InvitesTables.UpdateInviteRetired: %w", err) } } - - // Look up the NID of the new join event - nIDs, err := u.d.eventNIDs(u.ctx, u.txn, []string{eventID}, false) - if err != nil { - return fmt.Errorf("u.d.EventNIDs: %w", err) - } - - if u.membership != tables.MembershipStateJoin || isUpdate { - if _, err = u.d.MembershipTable.UpdateMembership(u.ctx, u.txn, u.roomNID, u.targetUserNID, senderUserNID, tables.MembershipStateJoin, nIDs[eventID], false); err != nil { - return fmt.Errorf("u.d.MembershipTable.UpdateMembership: %w", err) - } - } - return nil }) - - return inviteEventIDs, err -} - -// SetToLeave implements types.MembershipUpdater -func (u *MembershipUpdater) SetToLeave(senderUserID string, eventID string) ([]string, error) { - var inviteEventIDs []string - - err := u.d.Writer.Do(u.d.DB, u.txn, func(txn *sql.Tx) error { - senderUserNID, err := u.d.assignStateKeyNID(u.ctx, senderUserID) - if err != nil { - return fmt.Errorf("u.d.AssignStateKeyNID: %w", err) - } - inviteEventIDs, err = u.d.InvitesTable.UpdateInviteRetired( - u.ctx, u.txn, u.roomNID, u.targetUserNID, - ) - if err != nil { - return fmt.Errorf("u.d.InvitesTable.updateInviteRetired: %w", err) - } - - // Look up the NID of the new leave event - nIDs, err := u.d.eventNIDs(u.ctx, u.txn, []string{eventID}, false) - if err != nil { - return fmt.Errorf("u.d.EventNIDs: %w", err) - } - - if u.membership != tables.MembershipStateLeaveOrBan { - if _, err = u.d.MembershipTable.UpdateMembership(u.ctx, u.txn, u.roomNID, u.targetUserNID, senderUserNID, tables.MembershipStateLeaveOrBan, nIDs[eventID], false); err != nil { - return fmt.Errorf("u.d.MembershipTable.UpdateMembership: %w", err) - } - } - - return nil - }) - return inviteEventIDs, err -} - -// SetToKnock implements types.MembershipUpdater -func (u *MembershipUpdater) SetToKnock(event *gomatrixserverlib.Event) (bool, error) { - var inserted bool - err := u.d.Writer.Do(u.d.DB, u.txn, func(txn *sql.Tx) error { - senderUserNID, err := u.d.assignStateKeyNID(u.ctx, event.Sender()) - if err != nil { - return fmt.Errorf("u.d.AssignStateKeyNID: %w", err) - } - if u.membership != tables.MembershipStateKnock { - // Look up the NID of the new knock event - nIDs, err := u.d.eventNIDs(u.ctx, u.txn, []string{event.EventID()}, false) - if err != nil { - return fmt.Errorf("u.d.EventNIDs: %w", err) - } - - if inserted, err = u.d.MembershipTable.UpdateMembership(u.ctx, u.txn, u.roomNID, u.targetUserNID, senderUserNID, tables.MembershipStateKnock, nIDs[event.EventID()], false); err != nil { - return fmt.Errorf("u.d.MembershipTable.UpdateMembership: %w", err) - } - } - return nil - }) - return inserted, err } diff --git a/roomserver/storage/sqlite3/membership_table.go b/roomserver/storage/sqlite3/membership_table.go index 570d3919c..00a9bd4f9 100644 --- a/roomserver/storage/sqlite3/membership_table.go +++ b/roomserver/storage/sqlite3/membership_table.go @@ -125,6 +125,9 @@ const selectServerInRoomSQL = "" + " JOIN roomserver_event_state_keys ON roomserver_membership.target_nid = roomserver_event_state_keys.event_state_key_nid" + " WHERE membership_nid = $1 AND room_nid = $2 AND event_state_key LIKE '%:' || $3 LIMIT 1" +const deleteMembershipSQL = "" + + "DELETE FROM roomserver_membership WHERE room_nid = $1 AND target_nid = $2" + type membershipStatements struct { db *sql.DB insertMembershipStmt *sql.Stmt @@ -140,6 +143,7 @@ type membershipStatements struct { updateMembershipForgetRoomStmt *sql.Stmt selectLocalServerInRoomStmt *sql.Stmt selectServerInRoomStmt *sql.Stmt + deleteMembershipStmt *sql.Stmt } func CreateMembershipTable(db *sql.DB) error { @@ -166,6 +170,7 @@ func PrepareMembershipTable(db *sql.DB) (tables.Membership, error) { {&s.updateMembershipForgetRoomStmt, updateMembershipForgetRoom}, {&s.selectLocalServerInRoomStmt, selectLocalServerInRoomSQL}, {&s.selectServerInRoomStmt, selectServerInRoomSQL}, + {&s.deleteMembershipStmt, deleteMembershipSQL}, }.Prepare(db) } @@ -383,3 +388,13 @@ func (s *membershipStatements) SelectServerInRoom(ctx context.Context, txn *sql. } return roomNID == nid, nil } + +func (s *membershipStatements) DeleteMembership( + ctx context.Context, txn *sql.Tx, + roomNID types.RoomNID, targetUserNID types.EventStateKeyNID, +) error { + _, err := sqlutil.TxStmt(txn, s.deleteMembershipStmt).ExecContext( + ctx, roomNID, targetUserNID, + ) + return err +} diff --git a/roomserver/storage/tables/interface.go b/roomserver/storage/tables/interface.go index 116e11c4e..d228257b7 100644 --- a/roomserver/storage/tables/interface.go +++ b/roomserver/storage/tables/interface.go @@ -133,6 +133,7 @@ type Membership interface { UpdateForgetMembership(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, targetUserNID types.EventStateKeyNID, forget bool) error SelectLocalServerInRoom(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID) (bool, error) SelectServerInRoom(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, serverName gomatrixserverlib.ServerName) (bool, error) + DeleteMembership(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, targetUserNID types.EventStateKeyNID) error } type Published interface { diff --git a/syncapi/consumers/roomserver.go b/syncapi/consumers/roomserver.go index 328e1e9dd..f77b1673b 100644 --- a/syncapi/consumers/roomserver.go +++ b/syncapi/consumers/roomserver.go @@ -365,7 +365,7 @@ func (s *OutputRoomEventConsumer) onNewInviteEvent( "event": string(msg.Event.JSON()), "pdupos": pduPos, log.ErrorKey: err, - }).Panicf("roomserver output log: write invite failure") + }).Errorf("roomserver output log: write invite failure") return } @@ -385,7 +385,7 @@ func (s *OutputRoomEventConsumer) onRetireInviteEvent( log.WithFields(log.Fields{ "event_id": msg.EventID, log.ErrorKey: err, - }).Panicf("roomserver output log: remove invite failure") + }).Errorf("roomserver output log: remove invite failure") return } @@ -403,7 +403,7 @@ func (s *OutputRoomEventConsumer) onNewPeek( // panic rather than continue with an inconsistent database log.WithFields(log.Fields{ log.ErrorKey: err, - }).Panicf("roomserver output log: write peek failure") + }).Errorf("roomserver output log: write peek failure") return } @@ -422,7 +422,7 @@ func (s *OutputRoomEventConsumer) onRetirePeek( // panic rather than continue with an inconsistent database log.WithFields(log.Fields{ log.ErrorKey: err, - }).Panicf("roomserver output log: write peek failure") + }).Errorf("roomserver output log: write peek failure") return } From c7d978274d1e9606574106a9fee63ff555604d1d Mon Sep 17 00:00:00 2001 From: Neil Alexander Date: Fri, 22 Jul 2022 19:43:48 +0100 Subject: [PATCH 14/18] Try to fix HTTP 500s on `/members` (#2581) --- roomserver/internal/query/query.go | 7 +++++++ roomserver/storage/postgres/membership_table.go | 10 +++++----- roomserver/storage/sqlite3/membership_table.go | 10 +++++----- roomserver/storage/tables/membership_table_test.go | 3 +++ 4 files changed, 20 insertions(+), 10 deletions(-) diff --git a/roomserver/internal/query/query.go b/roomserver/internal/query/query.go index da1b32530..f652a8ea3 100644 --- a/roomserver/internal/query/query.go +++ b/roomserver/internal/query/query.go @@ -16,6 +16,7 @@ package query import ( "context" + "database/sql" "encoding/json" "errors" "fmt" @@ -225,6 +226,9 @@ func (r *Queryer) QueryMembershipsForRoom( var eventNIDs []types.EventNID eventNIDs, err = r.DB.GetMembershipEventNIDsForRoom(ctx, info.RoomNID, request.JoinedOnly, request.LocalOnly) if err != nil { + if err == sql.ErrNoRows { + return nil + } return fmt.Errorf("r.DB.GetMembershipEventNIDsForRoom: %w", err) } events, err = r.DB.Events(ctx, eventNIDs) @@ -260,6 +264,9 @@ func (r *Queryer) QueryMembershipsForRoom( var eventNIDs []types.EventNID eventNIDs, err = r.DB.GetMembershipEventNIDsForRoom(ctx, info.RoomNID, request.JoinedOnly, false) if err != nil { + if err == sql.ErrNoRows { + return nil + } return err } diff --git a/roomserver/storage/postgres/membership_table.go b/roomserver/storage/postgres/membership_table.go index ee457393a..eaaea22a1 100644 --- a/roomserver/storage/postgres/membership_table.go +++ b/roomserver/storage/postgres/membership_table.go @@ -86,24 +86,24 @@ const insertMembershipSQL = "" + const selectMembershipFromRoomAndTargetSQL = "" + "SELECT membership_nid, event_nid, forgotten FROM roomserver_membership" + - " WHERE room_nid = $1 AND target_nid = $2" + " WHERE room_nid = $1 AND event_nid != 0 AND target_nid = $2" const selectMembershipsFromRoomAndMembershipSQL = "" + "SELECT event_nid FROM roomserver_membership" + - " WHERE room_nid = $1 AND membership_nid = $2 and forgotten = false" + " WHERE room_nid = $1 AND event_nid != 0 AND membership_nid = $2 and forgotten = false" const selectLocalMembershipsFromRoomAndMembershipSQL = "" + "SELECT event_nid FROM roomserver_membership" + - " WHERE room_nid = $1 AND membership_nid = $2" + + " WHERE room_nid = $1 AND event_nid != 0 AND membership_nid = $2" + " AND target_local = true and forgotten = false" const selectMembershipsFromRoomSQL = "" + "SELECT event_nid FROM roomserver_membership" + - " WHERE room_nid = $1 and forgotten = false" + " WHERE room_nid = $1 AND event_nid != 0 and forgotten = false" const selectLocalMembershipsFromRoomSQL = "" + "SELECT event_nid FROM roomserver_membership" + - " WHERE room_nid = $1" + + " WHERE room_nid = $1 AND event_nid != 0" + " AND target_local = true and forgotten = false" const selectMembershipForUpdateSQL = "" + diff --git a/roomserver/storage/sqlite3/membership_table.go b/roomserver/storage/sqlite3/membership_table.go index 00a9bd4f9..9dda82fb2 100644 --- a/roomserver/storage/sqlite3/membership_table.go +++ b/roomserver/storage/sqlite3/membership_table.go @@ -62,24 +62,24 @@ const insertMembershipSQL = "" + const selectMembershipFromRoomAndTargetSQL = "" + "SELECT membership_nid, event_nid, forgotten FROM roomserver_membership" + - " WHERE room_nid = $1 AND target_nid = $2" + " WHERE room_nid = $1 AND event_nid != 0 AND target_nid = $2" const selectMembershipsFromRoomAndMembershipSQL = "" + "SELECT event_nid FROM roomserver_membership" + - " WHERE room_nid = $1 AND membership_nid = $2 and forgotten = false" + " WHERE room_nid = $1 AND event_nid != 0 AND membership_nid = $2 and forgotten = false" const selectLocalMembershipsFromRoomAndMembershipSQL = "" + "SELECT event_nid FROM roomserver_membership" + - " WHERE room_nid = $1 AND membership_nid = $2" + + " WHERE room_nid = $1 AND event_nid != 0 AND membership_nid = $2" + " AND target_local = true and forgotten = false" const selectMembershipsFromRoomSQL = "" + "SELECT event_nid FROM roomserver_membership" + - " WHERE room_nid = $1 and forgotten = false" + " WHERE room_nid = $1 AND event_nid != 0 and forgotten = false" const selectLocalMembershipsFromRoomSQL = "" + "SELECT event_nid FROM roomserver_membership" + - " WHERE room_nid = $1" + + " WHERE room_nid = $1 AND event_nid != 0" + " AND target_local = true and forgotten = false" const selectMembershipForUpdateSQL = "" + diff --git a/roomserver/storage/tables/membership_table_test.go b/roomserver/storage/tables/membership_table_test.go index 14e8ce50a..f789ef4ac 100644 --- a/roomserver/storage/tables/membership_table_test.go +++ b/roomserver/storage/tables/membership_table_test.go @@ -60,6 +60,9 @@ func TestMembershipTable(t *testing.T) { // This inserts a left user to the room err = tab.InsertMembership(ctx, nil, 1, stateKeyNID, true) assert.NoError(t, err) + // We must update the membership with a non-zero event NID or it will get filtered out in later queries + _, err = tab.UpdateMembership(ctx, nil, 1, stateKeyNID, userNIDs[0], tables.MembershipStateLeaveOrBan, 1, false) + assert.NoError(t, err) } // ... so this should be false From 081f5e722677fc0306934a814c557b57d56c2e0d Mon Sep 17 00:00:00 2001 From: Till <2353100+S7evinK@users.noreply.github.com> Date: Mon, 25 Jul 2022 11:39:22 +0200 Subject: [PATCH 15/18] Update database migrations, remove goose (#2264) * Add new db migration * Update migrations Remove goose * Add possibility to test direct upgrades * Try to fix WASM test * Add checks for specific migrations * Remove AddMigration Use WithTransaction Add Dendrite version to table * Fix linter issues * Update tests * Update comments, outdent if * Namespace migrations * Add direct upgrade tests, skipping over one version * Split migrations * Update go version in CI * Fix copy&paste mistake * Use contexts in migrations Co-authored-by: kegsay Co-authored-by: Neil Alexander --- .github/workflows/dendrite.yml | 27 ++- cmd/dendrite-upgrade-tests/main.go | 8 +- cmd/goose/README.md | 109 --------- cmd/goose/main.go | 154 ------------- .../postgres/deltas/2021020411080000_rooms.go | 16 +- federationapi/storage/postgres/storage.go | 10 +- .../sqlite3/deltas/2021020411080000_rooms.go | 16 +- federationapi/storage/sqlite3/storage.go | 10 +- go.mod | 1 - go.sum | 2 - internal/sqlutil/migrate.go | 218 +++++++++--------- internal/sqlutil/migrate_test.go | 112 +++++++++ .../postgres/cross_signing_sigs_table.go | 11 + .../deltas/2022012016470000_key_changes.go | 26 +-- .../deltas/2022042612000000_xsigning_idx.go | 15 +- .../storage/postgres/key_changes_table.go | 20 +- keyserver/storage/postgres/storage.go | 7 - .../sqlite3/cross_signing_sigs_table.go | 10 + .../deltas/2022012016470000_key_changes.go | 24 +- .../deltas/2022042612000000_xsigning_idx.go | 15 +- .../storage/sqlite3/key_changes_table.go | 20 +- keyserver/storage/sqlite3/storage.go | 7 - .../20201028212440_add_forgotten_column.go | 21 +- .../2021041615092700_state_blocks_refactor.go | 74 +++--- .../storage/postgres/membership_table.go | 11 +- roomserver/storage/postgres/storage.go | 25 +- .../20201028212440_add_forgotten_column.go | 21 +- .../2021041615092700_state_blocks_refactor.go | 69 +++--- .../storage/sqlite3/membership_table.go | 11 +- roomserver/storage/sqlite3/storage.go | 24 +- .../postgres/current_room_state_table.go | 12 + .../deltas/20201211125500_sequences.go | 21 +- .../20210112130000_sendtodevice_sentcolumn.go | 15 +- ...2061412000000_history_visibility_column.go | 24 +- .../postgres/output_room_events_table.go | 12 + syncapi/storage/postgres/receipt_table.go | 10 + .../storage/postgres/send_to_device_table.go | 10 + syncapi/storage/postgres/syncserver.go | 31 +-- .../sqlite3/current_room_state_table.go | 12 + .../deltas/20201211125500_sequences.go | 21 +- .../20210112130000_sendtodevice_sentcolumn.go | 15 +- ...2061412000000_history_visibility_column.go | 32 +-- .../sqlite3/output_room_events_table.go | 12 + syncapi/storage/sqlite3/receipt_table.go | 10 + .../storage/sqlite3/send_to_device_table.go | 10 + syncapi/storage/sqlite3/syncserver.go | 35 +-- userapi/storage/postgres/accounts_table.go | 18 ++ .../deltas/20200929203058_is_active.go | 22 +- .../deltas/20201001204705_last_seen_ts_ip.go | 15 +- .../2022021013023800_add_account_type.go | 15 +- userapi/storage/postgres/devices_table.go | 10 + userapi/storage/postgres/storage.go | 14 -- userapi/storage/sqlite3/accounts_table.go | 18 ++ .../deltas/20200929203058_is_active.go | 22 +- .../deltas/20201001204705_last_seen_ts_ip.go | 15 +- .../2022021012490600_add_account_type.go | 21 +- userapi/storage/sqlite3/devices_table.go | 10 + userapi/storage/sqlite3/storage.go | 17 -- 58 files changed, 734 insertions(+), 839 deletions(-) delete mode 100644 cmd/goose/README.md delete mode 100644 cmd/goose/main.go create mode 100644 internal/sqlutil/migrate_test.go diff --git a/.github/workflows/dendrite.yml b/.github/workflows/dendrite.yml index 9b452d849..4cbfb380f 100644 --- a/.github/workflows/dendrite.yml +++ b/.github/workflows/dendrite.yml @@ -223,6 +223,31 @@ jobs: - name: Test upgrade run: ./dendrite-upgrade-tests --head . + # run database upgrade tests, skipping over one version + upgrade_test_direct: + name: Upgrade tests from HEAD-2 + timeout-minutes: 20 + needs: initial-tests-done + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Setup go + uses: actions/setup-go@v2 + with: + go-version: "1.18" + - uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-upgrade-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go-upgrade + - name: Build upgrade-tests + run: go build ./cmd/dendrite-upgrade-tests + - name: Test upgrade + run: ./dendrite-upgrade-tests -direct -from HEAD-2 --head . + # run Sytest in different variations sytest: timeout-minutes: 20 @@ -359,7 +384,7 @@ jobs: integration-tests-done: name: Integration tests passed - needs: [initial-tests-done, upgrade_test, sytest, complement] + needs: [initial-tests-done, upgrade_test, upgrade_test_direct, sytest, complement] runs-on: ubuntu-latest if: ${{ !cancelled() }} # Run this even if prior jobs were skipped steps: diff --git a/cmd/dendrite-upgrade-tests/main.go b/cmd/dendrite-upgrade-tests/main.go index cabd07e70..39843dccb 100644 --- a/cmd/dendrite-upgrade-tests/main.go +++ b/cmd/dendrite-upgrade-tests/main.go @@ -37,6 +37,7 @@ var ( flagBuildConcurrency = flag.Int("build-concurrency", runtime.NumCPU(), "The amount of build concurrency when building images") flagHead = flag.String("head", "", "Location to a dendrite repository to treat as HEAD instead of Github") flagDockerHost = flag.String("docker-host", "localhost", "The hostname of the docker client. 'localhost' if running locally, 'host.docker.internal' if running in Docker.") + flagDirect = flag.Bool("direct", false, "If a direct upgrade from the defined FROM version to TO should be done") alphaNumerics = regexp.MustCompile("[^a-zA-Z0-9]+") ) @@ -229,7 +230,7 @@ func getAndSortVersionsFromGithub(httpClient *http.Client) (semVers []*semver.Ve return semVers, nil } -func calculateVersions(cli *http.Client, from, to string) []string { +func calculateVersions(cli *http.Client, from, to string, direct bool) []string { semvers, err := getAndSortVersionsFromGithub(cli) if err != nil { log.Fatalf("failed to collect semvers from github: %s", err) @@ -284,6 +285,9 @@ func calculateVersions(cli *http.Client, from, to string) []string { if to == HEAD { versions = append(versions, HEAD) } + if direct { + versions = []string{versions[0], versions[len(versions)-1]} + } return versions } @@ -461,7 +465,7 @@ func main() { os.Exit(1) } cleanup(dockerClient) - versions := calculateVersions(httpClient, *flagFrom, *flagTo) + versions := calculateVersions(httpClient, *flagFrom, *flagTo, *flagDirect) log.Printf("Testing dendrite versions: %v\n", versions) branchToImageID := buildDendriteImages(httpClient, dockerClient, *flagTempDir, *flagBuildConcurrency, versions) diff --git a/cmd/goose/README.md b/cmd/goose/README.md deleted file mode 100644 index 725c6a586..000000000 --- a/cmd/goose/README.md +++ /dev/null @@ -1,109 +0,0 @@ -## Database migrations - -We use [goose](https://github.com/pressly/goose) to handle database migrations. This allows us to execute -both SQL deltas (e.g `ALTER TABLE ...`) as well as manipulate data in the database in Go using Go functions. - -To run a migration, the `goose` binary in this directory needs to be built: -``` -$ go build ./cmd/goose -``` - -This binary allows Dendrite databases to be upgraded and downgraded. Sample usage for upgrading the roomserver database: - -``` -# for sqlite -$ ./goose -dir roomserver/storage/sqlite3/deltas sqlite3 ./roomserver.db up - -# for postgres -$ ./goose -dir roomserver/storage/postgres/deltas postgres "user=dendrite dbname=dendrite sslmode=disable" up -``` - -For a full list of options, including rollbacks, see https://github.com/pressly/goose or use `goose` with no args. - - -### Rationale - -Dendrite creates tables on startup using `CREATE TABLE IF NOT EXISTS`, so you might think that we should also -apply version upgrades on startup as well. This is convenient and doesn't involve an additional binary to run -which complicates upgrades. However, combining the upgrade mechanism and the server binary makes it difficult -to handle rollbacks. Firstly, how do you specify you wish to rollback? We would have to add additional flags -to the main server binary to say "rollback to version X". Secondly, if you roll back the server binary from -version 5 to version 4, the version 4 binary doesn't know how to rollback the database from version 5 to -version 4! For these reasons, we prefer to have a separate "upgrade" binary which is run for database upgrades. -Rather than roll-our-own migration tool, we decided to use [goose](https://github.com/pressly/goose) as it supports -complex migrations in Go code in addition to just executing SQL deltas. Other alternatives like -`github.com/golang-migrate/migrate` [do not support](https://github.com/golang-migrate/migrate/issues/15) these -kinds of complex migrations. - -### Adding new deltas - -You can add `.sql` or `.go` files manually or you can use goose to create them for you. - -If you only want to add a SQL delta then run: - -``` -$ ./goose -dir serverkeyapi/storage/sqlite3/deltas sqlite3 ./foo.db create new_col sql -2020/09/09 14:37:43 Created new file: serverkeyapi/storage/sqlite3/deltas/20200909143743_new_col.sql -``` - -In this case, the version number is `20200909143743`. The important thing is that it is always increasing. - -Then add up/downgrade SQL commands to the created file which looks like: -```sql --- +goose Up --- +goose StatementBegin -SELECT 'up SQL query'; --- +goose StatementEnd - --- +goose Down --- +goose StatementBegin -SELECT 'down SQL query'; --- +goose StatementEnd - -``` -You __must__ keep the `+goose` annotations. You'll need to repeat this process for Postgres. - -For complex Go migrations: - -``` -$ ./goose -dir serverkeyapi/storage/sqlite3/deltas sqlite3 ./foo.db create complex_update go -2020/09/09 14:40:38 Created new file: serverkeyapi/storage/sqlite3/deltas/20200909144038_complex_update.go -``` - -Then modify the created `.go` file which looks like: - -```go -package migrations - -import ( - "database/sql" - "fmt" - - "github.com/pressly/goose" -) - -func init() { - goose.AddMigration(upComplexUpdate, downComplexUpdate) -} - -func upComplexUpdate(tx *sql.Tx) error { - // This code is executed when the migration is applied. - return nil -} - -func downComplexUpdate(tx *sql.Tx) error { - // This code is executed when the migration is rolled back. - return nil -} - -``` - -You __must__ import the package in `/cmd/goose/main.go` so `func init()` gets called. - - -#### Database limitations - -- SQLite3 does NOT support `ALTER TABLE table_name DROP COLUMN` - you would have to rename the column or drop the table - entirely and recreate it. ([example](https://github.com/matrix-org/dendrite/blob/master/userapi/storage/accounts/sqlite3/deltas/20200929203058_is_active.sql)) - - More information: [sqlite.org](https://www.sqlite.org/lang_altertable.html) diff --git a/cmd/goose/main.go b/cmd/goose/main.go deleted file mode 100644 index 31a5b0050..000000000 --- a/cmd/goose/main.go +++ /dev/null @@ -1,154 +0,0 @@ -// This is custom goose binary - -package main - -import ( - "flag" - "fmt" - "log" - "os" - - "github.com/pressly/goose" - - pgusers "github.com/matrix-org/dendrite/userapi/storage/postgres/deltas" - slusers "github.com/matrix-org/dendrite/userapi/storage/sqlite3/deltas" - - _ "github.com/lib/pq" - _ "github.com/mattn/go-sqlite3" -) - -const ( - AppService = "appservice" - FederationSender = "federationapi" - KeyServer = "keyserver" - MediaAPI = "mediaapi" - RoomServer = "roomserver" - SigningKeyServer = "signingkeyserver" - SyncAPI = "syncapi" - UserAPI = "userapi" -) - -var ( - dir = flags.String("dir", "", "directory with migration files") - flags = flag.NewFlagSet("goose", flag.ExitOnError) - component = flags.String("component", "", "dendrite component name") - knownDBs = []string{ - AppService, FederationSender, KeyServer, MediaAPI, RoomServer, SigningKeyServer, SyncAPI, UserAPI, - } -) - -// nolint: gocyclo -func main() { - err := flags.Parse(os.Args[1:]) - if err != nil { - panic(err.Error()) - } - args := flags.Args() - - if len(args) < 3 { - fmt.Println( - `Usage: goose [OPTIONS] DRIVER DBSTRING COMMAND - -Drivers: - postgres - sqlite3 - -Examples: - goose -component roomserver sqlite3 ./roomserver.db status - goose -component roomserver sqlite3 ./roomserver.db up - - goose -component roomserver postgres "user=dendrite dbname=dendrite sslmode=disable" status - -Options: - -component string - Dendrite component name e.g roomserver, signingkeyserver, clientapi, syncapi - -table string - migrations table name (default "goose_db_version") - -h print help - -v enable verbose mode - -dir string - directory with migration files, only relevant when creating new migrations. - -version - print version - -Commands: - up Migrate the DB to the most recent version available - up-by-one Migrate the DB up by 1 - up-to VERSION Migrate the DB to a specific VERSION - down Roll back the version by 1 - down-to VERSION Roll back to a specific VERSION - redo Re-run the latest migration - reset Roll back all migrations - status Dump the migration status for the current DB - version Print the current version of the database - create NAME [sql|go] Creates new migration file with the current timestamp - fix Apply sequential ordering to migrations`, - ) - return - } - - engine := args[0] - if engine != "sqlite3" && engine != "postgres" { - fmt.Println("engine must be one of 'sqlite3' or 'postgres'") - return - } - - knownComponent := false - for _, c := range knownDBs { - if c == *component { - knownComponent = true - break - } - } - if !knownComponent { - fmt.Printf("component must be one of %v\n", knownDBs) - return - } - - if engine == "sqlite3" { - loadSQLiteDeltas(*component) - } else { - loadPostgresDeltas(*component) - } - - dbstring, command := args[1], args[2] - - db, err := goose.OpenDBWithDriver(engine, dbstring) - if err != nil { - log.Fatalf("goose: failed to open DB: %v\n", err) - } - - defer func() { - if err := db.Close(); err != nil { - log.Fatalf("goose: failed to close DB: %v\n", err) - } - }() - - arguments := []string{} - if len(args) > 3 { - arguments = append(arguments, args[3:]...) - } - - // goose demands a directory even though we don't use it for upgrades - d := *dir - if d == "" { - d = os.TempDir() - } - if err := goose.Run(command, db, d, arguments...); err != nil { - log.Fatalf("goose %v: %v", command, err) - } -} - -func loadSQLiteDeltas(component string) { - switch component { - case UserAPI: - slusers.LoadFromGoose() - } -} - -func loadPostgresDeltas(component string) { - switch component { - case UserAPI: - pgusers.LoadFromGoose() - } -} diff --git a/federationapi/storage/postgres/deltas/2021020411080000_rooms.go b/federationapi/storage/postgres/deltas/2021020411080000_rooms.go index cc4bdadfd..fc894846d 100644 --- a/federationapi/storage/postgres/deltas/2021020411080000_rooms.go +++ b/federationapi/storage/postgres/deltas/2021020411080000_rooms.go @@ -15,23 +15,13 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" - "github.com/pressly/goose" ) -func LoadFromGoose() { - goose.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable) -} - -func LoadRemoveRoomsTable(m *sqlutil.Migrations) { - m.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable) -} - -func UpRemoveRoomsTable(tx *sql.Tx) error { - _, err := tx.Exec(` +func UpRemoveRoomsTable(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` DROP TABLE IF EXISTS federationsender_rooms; `) if err != nil { diff --git a/federationapi/storage/postgres/storage.go b/federationapi/storage/postgres/storage.go index 9863afb2b..7c2883c1b 100644 --- a/federationapi/storage/postgres/storage.go +++ b/federationapi/storage/postgres/storage.go @@ -82,9 +82,13 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions, if err != nil { return nil, err } - m := sqlutil.NewMigrations() - deltas.LoadRemoveRoomsTable(m) - if err = m.RunDeltas(d.db, dbProperties); err != nil { + m := sqlutil.NewMigrator(d.db) + m.AddMigrations(sqlutil.Migration{ + Version: "federationsender: drop federationsender_rooms", + Up: deltas.UpRemoveRoomsTable, + }) + err = m.Up(base.Context()) + if err != nil { return nil, err } d.Database = shared.Database{ diff --git a/federationapi/storage/sqlite3/deltas/2021020411080000_rooms.go b/federationapi/storage/sqlite3/deltas/2021020411080000_rooms.go index cc4bdadfd..fc894846d 100644 --- a/federationapi/storage/sqlite3/deltas/2021020411080000_rooms.go +++ b/federationapi/storage/sqlite3/deltas/2021020411080000_rooms.go @@ -15,23 +15,13 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" - "github.com/pressly/goose" ) -func LoadFromGoose() { - goose.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable) -} - -func LoadRemoveRoomsTable(m *sqlutil.Migrations) { - m.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable) -} - -func UpRemoveRoomsTable(tx *sql.Tx) error { - _, err := tx.Exec(` +func UpRemoveRoomsTable(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` DROP TABLE IF EXISTS federationsender_rooms; `) if err != nil { diff --git a/federationapi/storage/sqlite3/storage.go b/federationapi/storage/sqlite3/storage.go index 7d0cee90e..9594aaec6 100644 --- a/federationapi/storage/sqlite3/storage.go +++ b/federationapi/storage/sqlite3/storage.go @@ -81,9 +81,13 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions, if err != nil { return nil, err } - m := sqlutil.NewMigrations() - deltas.LoadRemoveRoomsTable(m) - if err = m.RunDeltas(d.db, dbProperties); err != nil { + m := sqlutil.NewMigrator(d.db) + m.AddMigrations(sqlutil.Migration{ + Version: "federationsender: drop federationsender_rooms", + Up: deltas.UpRemoveRoomsTable, + }) + err = m.Up(base.Context()) + if err != nil { return nil, err } d.Database = shared.Database{ diff --git a/go.mod b/go.mod index 0a965ec8e..79216f0fa 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,6 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 - github.com/pressly/goose v2.7.0+incompatible github.com/prometheus/client_golang v1.12.2 github.com/sirupsen/logrus v1.8.1 github.com/stretchr/testify v1.7.1 diff --git a/go.sum b/go.sum index df3feeb6e..7107d4ff6 100644 --- a/go.sum +++ b/go.sum @@ -432,8 +432,6 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pressly/goose v2.7.0+incompatible h1:PWejVEv07LCerQEzMMeAtjuyCKbyprZ/LBa6K5P0OCQ= -github.com/pressly/goose v2.7.0+incompatible/go.mod h1:m+QHWCqxR3k8D9l7qfzuC/djtlfzxr34mozWDYEu1z8= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= diff --git a/internal/sqlutil/migrate.go b/internal/sqlutil/migrate.go index 7518df3c8..18020a902 100644 --- a/internal/sqlutil/migrate.go +++ b/internal/sqlutil/migrate.go @@ -1,130 +1,142 @@ +// Copyright 2022 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package sqlutil import ( + "context" "database/sql" "fmt" - "runtime" - "sort" + "sync" + "time" - "github.com/matrix-org/dendrite/setup/config" - "github.com/pressly/goose" + "github.com/matrix-org/dendrite/internal" + "github.com/sirupsen/logrus" ) -type Migrations struct { - registeredGoMigrations map[int64]*goose.Migration +const createDBMigrationsSQL = "" + + "CREATE TABLE IF NOT EXISTS db_migrations (" + + " version TEXT PRIMARY KEY NOT NULL," + + " time TEXT NOT NULL," + + " dendrite_version TEXT NOT NULL" + + ");" + +const insertVersionSQL = "" + + "INSERT INTO db_migrations (version, time, dendrite_version)" + + " VALUES ($1, $2, $3)" + +const selectDBMigrationsSQL = "SELECT version FROM db_migrations" + +// Migration defines a migration to be run. +type Migration struct { + // Version is a simple description/name of this migration. + Version string + // Up defines the function to execute for an upgrade. + Up func(ctx context.Context, txn *sql.Tx) error + // Down defines the function to execute for a downgrade (not implemented yet). + Down func(ctx context.Context, txn *sql.Tx) error } -func NewMigrations() *Migrations { - return &Migrations{ - registeredGoMigrations: make(map[int64]*goose.Migration), +// Migrator +type Migrator struct { + db *sql.DB + migrations []Migration + knownMigrations map[string]struct{} + mutex *sync.Mutex +} + +// NewMigrator creates a new DB migrator. +func NewMigrator(db *sql.DB) *Migrator { + return &Migrator{ + db: db, + migrations: []Migration{}, + knownMigrations: make(map[string]struct{}), + mutex: &sync.Mutex{}, } } -// Copy-pasted from goose directly to store migrations into a map we control - -// AddMigration adds a migration. -func (m *Migrations) AddMigration(up func(*sql.Tx) error, down func(*sql.Tx) error) { - _, filename, _, _ := runtime.Caller(1) - m.AddNamedMigration(filename, up, down) -} - -// AddNamedMigration : Add a named migration. -func (m *Migrations) AddNamedMigration(filename string, up func(*sql.Tx) error, down func(*sql.Tx) error) { - v, _ := goose.NumericComponent(filename) - migration := &goose.Migration{Version: v, Next: -1, Previous: -1, Registered: true, UpFn: up, DownFn: down, Source: filename} - - if existing, ok := m.registeredGoMigrations[v]; ok { - panic(fmt.Sprintf("failed to add migration %q: version conflicts with %q", filename, existing.Source)) +// AddMigrations appends migrations to the list of migrations. Migrations are executed +// in the order they are added to the list. De-duplicates migrations using their Version field. +func (m *Migrator) AddMigrations(migrations ...Migration) { + m.mutex.Lock() + defer m.mutex.Unlock() + for _, mig := range migrations { + if _, ok := m.knownMigrations[mig.Version]; !ok { + m.migrations = append(m.migrations, mig) + m.knownMigrations[mig.Version] = struct{}{} + } } - - m.registeredGoMigrations[v] = migration } -// RunDeltas up to the latest version. -func (m *Migrations) RunDeltas(db *sql.DB, props *config.DatabaseOptions) error { - maxVer := goose.MaxVersion - minVer := int64(0) - migrations, err := m.collect(minVer, maxVer) +// Up executes all migrations in order they were added. +func (m *Migrator) Up(ctx context.Context) error { + var ( + err error + dendriteVersion = internal.VersionString() + ) + // ensure there is a table for known migrations + executedMigrations, err := m.ExecutedMigrations(ctx) if err != nil { - return fmt.Errorf("runDeltas: Failed to collect migrations: %w", err) + return fmt.Errorf("unable to create/get migrations: %w", err) } - if props.ConnectionString.IsPostgres() { - if err = goose.SetDialect("postgres"); err != nil { - return err - } - } else if props.ConnectionString.IsSQLite() { - if err = goose.SetDialect("sqlite3"); err != nil { - return err - } - } else { - return fmt.Errorf("unknown connection string: %s", props.ConnectionString) - } - for { - current, err := goose.EnsureDBVersion(db) - if err != nil { - return fmt.Errorf("runDeltas: Failed to EnsureDBVersion: %w", err) - } - next, err := migrations.Next(current) - if err != nil { - if err == goose.ErrNoNextVersion { - return nil + return WithTransaction(m.db, func(txn *sql.Tx) error { + for i := range m.migrations { + now := time.Now().UTC().Format(time.RFC3339) + migration := m.migrations[i] + logrus.Debugf("Executing database migration '%s'", migration.Version) + // Skip migration if it was already executed + if _, ok := executedMigrations[migration.Version]; ok { + continue + } + err = migration.Up(ctx, txn) + if err != nil { + return fmt.Errorf("unable to execute migration '%s': %w", migration.Version, err) + } + _, err = txn.ExecContext(ctx, insertVersionSQL, + migration.Version, + now, + dendriteVersion, + ) + if err != nil { + return fmt.Errorf("unable to insert executed migrations: %w", err) } - - return fmt.Errorf("runDeltas: Failed to load next migration to %+v : %w", next, err) } - - if err = next.Up(db); err != nil { - return fmt.Errorf("runDeltas: Failed run migration: %w", err) - } - } + return nil + }) } -func (m *Migrations) collect(current, target int64) (goose.Migrations, error) { - var migrations goose.Migrations - - // Go migrations registered via goose.AddMigration(). - for _, migration := range m.registeredGoMigrations { - v, err := goose.NumericComponent(migration.Source) - if err != nil { - return nil, err - } - if versionFilter(v, current, target) { - migrations = append(migrations, migration) +// ExecutedMigrations returns a map with already executed migrations in addition to creating the +// migrations table, if it doesn't exist. +func (m *Migrator) ExecutedMigrations(ctx context.Context) (map[string]struct{}, error) { + result := make(map[string]struct{}) + _, err := m.db.ExecContext(ctx, createDBMigrationsSQL) + if err != nil { + return nil, fmt.Errorf("unable to create db_migrations: %w", err) + } + rows, err := m.db.QueryContext(ctx, selectDBMigrationsSQL) + if err != nil { + return nil, fmt.Errorf("unable to query db_migrations: %w", err) + } + defer internal.CloseAndLogIfError(ctx, rows, "ExecutedMigrations: rows.close() failed") + var version string + for rows.Next() { + if err = rows.Scan(&version); err != nil { + return nil, fmt.Errorf("unable to scan version: %w", err) } + result[version] = struct{}{} } - migrations = sortAndConnectMigrations(migrations) - - return migrations, nil -} - -func sortAndConnectMigrations(migrations goose.Migrations) goose.Migrations { - sort.Sort(migrations) - - // now that we're sorted in the appropriate direction, - // populate next and previous for each migration - for i, m := range migrations { - prev := int64(-1) - if i > 0 { - prev = migrations[i-1].Version - migrations[i-1].Next = m.Version - } - migrations[i].Previous = prev - } - - return migrations -} - -func versionFilter(v, current, target int64) bool { - - if target > current { - return v > current && v <= target - } - - if target < current { - return v <= current && v > target - } - - return false + return result, rows.Err() } diff --git a/internal/sqlutil/migrate_test.go b/internal/sqlutil/migrate_test.go new file mode 100644 index 000000000..d8bcae196 --- /dev/null +++ b/internal/sqlutil/migrate_test.go @@ -0,0 +1,112 @@ +package sqlutil_test + +import ( + "context" + "database/sql" + "fmt" + "reflect" + "testing" + + "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/matrix-org/dendrite/test" + _ "github.com/mattn/go-sqlite3" +) + +var dummyMigrations = []sqlutil.Migration{ + { + Version: "init", + Up: func(ctx context.Context, txn *sql.Tx) error { + _, err := txn.ExecContext(ctx, "CREATE TABLE IF NOT EXISTS dummy ( test TEXT );") + return err + }, + }, + { + Version: "v2", + Up: func(ctx context.Context, txn *sql.Tx) error { + _, err := txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test2 TEXT;") + return err + }, + }, + { + Version: "v2", // duplicate, this migration will be skipped + Up: func(ctx context.Context, txn *sql.Tx) error { + _, err := txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test2 TEXT;") + return err + }, + }, + { + Version: "multiple execs", + Up: func(ctx context.Context, txn *sql.Tx) error { + _, err := txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test3 TEXT;") + if err != nil { + return err + } + _, err = txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test4 TEXT;") + return err + }, + }, +} + +var failMigration = sqlutil.Migration{ + Version: "iFail", + Up: func(ctx context.Context, txn *sql.Tx) error { + return fmt.Errorf("iFail") + }, + Down: nil, +} + +func Test_migrations_Up(t *testing.T) { + withFail := append(dummyMigrations, failMigration) + + tests := []struct { + name string + migrations []sqlutil.Migration + wantResult map[string]struct{} + wantErr bool + }{ + { + name: "dummy migration", + migrations: dummyMigrations, + wantResult: map[string]struct{}{ + "init": {}, + "v2": {}, + "multiple execs": {}, + }, + }, + { + name: "with fail", + migrations: withFail, + wantErr: true, + }, + } + + ctx := context.Background() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) { + conStr, close := test.PrepareDBConnectionString(t, dbType) + defer close() + driverName := "sqlite3" + if dbType == test.DBTypePostgres { + driverName = "postgres" + } + db, err := sql.Open(driverName, conStr) + if err != nil { + t.Errorf("unable to open database: %v", err) + } + m := sqlutil.NewMigrator(db) + m.AddMigrations(tt.migrations...) + if err = m.Up(ctx); (err != nil) != tt.wantErr { + t.Errorf("Up() error = %v, wantErr %v", err, tt.wantErr) + } + result, err := m.ExecutedMigrations(ctx) + if err != nil { + t.Errorf("unable to get executed migrations: %v", err) + } + if !tt.wantErr && !reflect.DeepEqual(result, tt.wantResult) { + t.Errorf("expected: %+v, got %v", tt.wantResult, result) + } + }) + }) + } +} diff --git a/keyserver/storage/postgres/cross_signing_sigs_table.go b/keyserver/storage/postgres/cross_signing_sigs_table.go index b101e7ce5..8b2a865b9 100644 --- a/keyserver/storage/postgres/cross_signing_sigs_table.go +++ b/keyserver/storage/postgres/cross_signing_sigs_table.go @@ -21,6 +21,7 @@ import ( "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/matrix-org/dendrite/keyserver/storage/postgres/deltas" "github.com/matrix-org/dendrite/keyserver/storage/tables" "github.com/matrix-org/dendrite/keyserver/types" "github.com/matrix-org/gomatrixserverlib" @@ -66,6 +67,16 @@ func NewPostgresCrossSigningSigsTable(db *sql.DB) (tables.CrossSigningSigs, erro if err != nil { return nil, err } + + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "keyserver: cross signing signature indexes", + Up: deltas.UpFixCrossSigningSignatureIndexes, + }) + if err = m.Up(context.Background()); err != nil { + return nil, err + } + return s, sqlutil.StatementList{ {&s.selectCrossSigningSigsForTargetStmt, selectCrossSigningSigsForTargetSQL}, {&s.upsertCrossSigningSigsForTargetStmt, upsertCrossSigningSigsForTargetSQL}, diff --git a/keyserver/storage/postgres/deltas/2022012016470000_key_changes.go b/keyserver/storage/postgres/deltas/2022012016470000_key_changes.go index e5bcf08d1..0cfe9e791 100644 --- a/keyserver/storage/postgres/deltas/2022012016470000_key_changes.go +++ b/keyserver/storage/postgres/deltas/2022012016470000_key_changes.go @@ -15,37 +15,27 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" - "github.com/pressly/goose" ) -func LoadFromGoose() { - goose.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges) -} - -func LoadRefactorKeyChanges(m *sqlutil.Migrations) { - m.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges) -} - -func UpRefactorKeyChanges(tx *sql.Tx) error { +func UpRefactorKeyChanges(ctx context.Context, tx *sql.Tx) error { // start counting from the last max offset, else 0. We need to do a count(*) first to see if there // even are entries in this table to know if we can query for log_offset. Without the count then // the query to SELECT the max log offset fails on new Dendrite instances as log_offset doesn't // exist on that table. Even though we discard the error, the txn is tainted and gets aborted :/ var count int - _ = tx.QueryRow(`SELECT count(*) FROM keyserver_key_changes`).Scan(&count) + _ = tx.QueryRowContext(ctx, `SELECT count(*) FROM keyserver_key_changes`).Scan(&count) if count > 0 { var maxOffset int64 - _ = tx.QueryRow(`SELECT coalesce(MAX(log_offset), 0) AS offset FROM keyserver_key_changes`).Scan(&maxOffset) - if _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE IF NOT EXISTS keyserver_key_changes_seq START %d`, maxOffset)); err != nil { + _ = tx.QueryRowContext(ctx, `SELECT coalesce(MAX(log_offset), 0) AS offset FROM keyserver_key_changes`).Scan(&maxOffset) + if _, err := tx.ExecContext(ctx, fmt.Sprintf(`CREATE SEQUENCE IF NOT EXISTS keyserver_key_changes_seq START %d`, maxOffset)); err != nil { return fmt.Errorf("failed to CREATE SEQUENCE for key changes, starting at %d: %s", maxOffset, err) } } - _, err := tx.Exec(` + _, err := tx.ExecContext(ctx, ` -- make the new table DROP TABLE IF EXISTS keyserver_key_changes; CREATE TABLE IF NOT EXISTS keyserver_key_changes ( @@ -60,8 +50,8 @@ func UpRefactorKeyChanges(tx *sql.Tx) error { return nil } -func DownRefactorKeyChanges(tx *sql.Tx) error { - _, err := tx.Exec(` +func DownRefactorKeyChanges(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` -- Drop all data and revert back, we can't keep the data as Kafka offsets determine the numbers DROP SEQUENCE IF EXISTS keyserver_key_changes_seq; DROP TABLE IF EXISTS keyserver_key_changes; diff --git a/keyserver/storage/postgres/deltas/2022042612000000_xsigning_idx.go b/keyserver/storage/postgres/deltas/2022042612000000_xsigning_idx.go index 12956e3b4..1a3d4fee9 100644 --- a/keyserver/storage/postgres/deltas/2022042612000000_xsigning_idx.go +++ b/keyserver/storage/postgres/deltas/2022042612000000_xsigning_idx.go @@ -15,18 +15,13 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" ) -func LoadFixCrossSigningSignatureIndexes(m *sqlutil.Migrations) { - m.AddMigration(UpFixCrossSigningSignatureIndexes, DownFixCrossSigningSignatureIndexes) -} - -func UpFixCrossSigningSignatureIndexes(tx *sql.Tx) error { - _, err := tx.Exec(` +func UpFixCrossSigningSignatureIndexes(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE keyserver_cross_signing_sigs DROP CONSTRAINT keyserver_cross_signing_sigs_pkey; ALTER TABLE keyserver_cross_signing_sigs ADD PRIMARY KEY (origin_user_id, origin_key_id, target_user_id, target_key_id); @@ -38,8 +33,8 @@ func UpFixCrossSigningSignatureIndexes(tx *sql.Tx) error { return nil } -func DownFixCrossSigningSignatureIndexes(tx *sql.Tx) error { - _, err := tx.Exec(` +func DownFixCrossSigningSignatureIndexes(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE keyserver_cross_signing_sigs DROP CONSTRAINT keyserver_cross_signing_sigs_pkey; ALTER TABLE keyserver_cross_signing_sigs ADD PRIMARY KEY (origin_user_id, target_user_id, target_key_id); diff --git a/keyserver/storage/postgres/key_changes_table.go b/keyserver/storage/postgres/key_changes_table.go index f93a94bd3..6894d7b7c 100644 --- a/keyserver/storage/postgres/key_changes_table.go +++ b/keyserver/storage/postgres/key_changes_table.go @@ -19,6 +19,8 @@ import ( "database/sql" "github.com/matrix-org/dendrite/internal" + "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/matrix-org/dendrite/keyserver/storage/postgres/deltas" "github.com/matrix-org/dendrite/keyserver/storage/tables" ) @@ -55,7 +57,23 @@ func NewPostgresKeyChangesTable(db *sql.DB) (tables.KeyChanges, error) { db: db, } _, err := db.Exec(keyChangesSchema) - return s, err + if err != nil { + return s, err + } + + // TODO: Remove when we are sure we are not having goose artefacts in the db + // This forces an error, which indicates the migration is already applied, since the + // column partition was removed from the table + err = db.QueryRow("SELECT partition FROM keyserver_key_changes LIMIT 1;").Scan() + if err == nil { + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "keyserver: refactor key changes", + Up: deltas.UpRefactorKeyChanges, + }) + return s, m.Up(context.Background()) + } + return s, nil } func (s *keyChangesStatements) Prepare() (err error) { diff --git a/keyserver/storage/postgres/storage.go b/keyserver/storage/postgres/storage.go index b8f70acf8..35e630559 100644 --- a/keyserver/storage/postgres/storage.go +++ b/keyserver/storage/postgres/storage.go @@ -16,7 +16,6 @@ package postgres import ( "github.com/matrix-org/dendrite/internal/sqlutil" - "github.com/matrix-org/dendrite/keyserver/storage/postgres/deltas" "github.com/matrix-org/dendrite/keyserver/storage/shared" "github.com/matrix-org/dendrite/setup/base" "github.com/matrix-org/dendrite/setup/config" @@ -53,12 +52,6 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions) if err != nil { return nil, err } - m := sqlutil.NewMigrations() - deltas.LoadRefactorKeyChanges(m) - deltas.LoadFixCrossSigningSignatureIndexes(m) - if err = m.RunDeltas(db, dbProperties); err != nil { - return nil, err - } if err = kc.Prepare(); err != nil { return nil, err } diff --git a/keyserver/storage/sqlite3/cross_signing_sigs_table.go b/keyserver/storage/sqlite3/cross_signing_sigs_table.go index 36d562b8a..ea431151e 100644 --- a/keyserver/storage/sqlite3/cross_signing_sigs_table.go +++ b/keyserver/storage/sqlite3/cross_signing_sigs_table.go @@ -21,6 +21,7 @@ import ( "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/matrix-org/dendrite/keyserver/storage/sqlite3/deltas" "github.com/matrix-org/dendrite/keyserver/storage/tables" "github.com/matrix-org/dendrite/keyserver/types" "github.com/matrix-org/gomatrixserverlib" @@ -65,6 +66,15 @@ func NewSqliteCrossSigningSigsTable(db *sql.DB) (tables.CrossSigningSigs, error) if err != nil { return nil, err } + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "keyserver: cross signing signature indexes", + Up: deltas.UpFixCrossSigningSignatureIndexes, + }) + if err = m.Up(context.Background()); err != nil { + return nil, err + } + return s, sqlutil.StatementList{ {&s.selectCrossSigningSigsForTargetStmt, selectCrossSigningSigsForTargetSQL}, {&s.upsertCrossSigningSigsForTargetStmt, upsertCrossSigningSigsForTargetSQL}, diff --git a/keyserver/storage/sqlite3/deltas/2022012016470000_key_changes.go b/keyserver/storage/sqlite3/deltas/2022012016470000_key_changes.go index fbc548c38..cd0f19df9 100644 --- a/keyserver/storage/sqlite3/deltas/2022012016470000_key_changes.go +++ b/keyserver/storage/sqlite3/deltas/2022012016470000_key_changes.go @@ -15,28 +15,18 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" - "github.com/pressly/goose" ) -func LoadFromGoose() { - goose.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges) -} - -func LoadRefactorKeyChanges(m *sqlutil.Migrations) { - m.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges) -} - -func UpRefactorKeyChanges(tx *sql.Tx) error { +func UpRefactorKeyChanges(ctx context.Context, tx *sql.Tx) error { // start counting from the last max offset, else 0. var maxOffset int64 var userID string - _ = tx.QueryRow(`SELECT user_id, MAX(log_offset) FROM keyserver_key_changes GROUP BY user_id`).Scan(&userID, &maxOffset) + _ = tx.QueryRowContext(ctx, `SELECT user_id, MAX(log_offset) FROM keyserver_key_changes GROUP BY user_id`).Scan(&userID, &maxOffset) - _, err := tx.Exec(` + _, err := tx.ExecContext(ctx, ` -- make the new table DROP TABLE IF EXISTS keyserver_key_changes; CREATE TABLE IF NOT EXISTS keyserver_key_changes ( @@ -51,14 +41,14 @@ func UpRefactorKeyChanges(tx *sql.Tx) error { } // to start counting from maxOffset, insert a row with that value if userID != "" { - _, err = tx.Exec(`INSERT INTO keyserver_key_changes(change_id, user_id) VALUES($1, $2)`, maxOffset, userID) + _, err = tx.ExecContext(ctx, `INSERT INTO keyserver_key_changes(change_id, user_id) VALUES($1, $2)`, maxOffset, userID) return err } return nil } -func DownRefactorKeyChanges(tx *sql.Tx) error { - _, err := tx.Exec(` +func DownRefactorKeyChanges(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` -- Drop all data and revert back, we can't keep the data as Kafka offsets determine the numbers DROP TABLE IF EXISTS keyserver_key_changes; CREATE TABLE IF NOT EXISTS keyserver_key_changes ( diff --git a/keyserver/storage/sqlite3/deltas/2022042612000000_xsigning_idx.go b/keyserver/storage/sqlite3/deltas/2022042612000000_xsigning_idx.go index 230e39fef..d4e38dea5 100644 --- a/keyserver/storage/sqlite3/deltas/2022042612000000_xsigning_idx.go +++ b/keyserver/storage/sqlite3/deltas/2022042612000000_xsigning_idx.go @@ -15,18 +15,13 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" ) -func LoadFixCrossSigningSignatureIndexes(m *sqlutil.Migrations) { - m.AddMigration(UpFixCrossSigningSignatureIndexes, DownFixCrossSigningSignatureIndexes) -} - -func UpFixCrossSigningSignatureIndexes(tx *sql.Tx) error { - _, err := tx.Exec(` +func UpFixCrossSigningSignatureIndexes(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` CREATE TABLE IF NOT EXISTS keyserver_cross_signing_sigs_tmp ( origin_user_id TEXT NOT NULL, origin_key_id TEXT NOT NULL, @@ -50,8 +45,8 @@ func UpFixCrossSigningSignatureIndexes(tx *sql.Tx) error { return nil } -func DownFixCrossSigningSignatureIndexes(tx *sql.Tx) error { - _, err := tx.Exec(` +func DownFixCrossSigningSignatureIndexes(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` CREATE TABLE IF NOT EXISTS keyserver_cross_signing_sigs_tmp ( origin_user_id TEXT NOT NULL, origin_key_id TEXT NOT NULL, diff --git a/keyserver/storage/sqlite3/key_changes_table.go b/keyserver/storage/sqlite3/key_changes_table.go index e035e8c9c..1b27c3d05 100644 --- a/keyserver/storage/sqlite3/key_changes_table.go +++ b/keyserver/storage/sqlite3/key_changes_table.go @@ -19,6 +19,8 @@ import ( "database/sql" "github.com/matrix-org/dendrite/internal" + "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/matrix-org/dendrite/keyserver/storage/sqlite3/deltas" "github.com/matrix-org/dendrite/keyserver/storage/tables" ) @@ -53,7 +55,23 @@ func NewSqliteKeyChangesTable(db *sql.DB) (tables.KeyChanges, error) { db: db, } _, err := db.Exec(keyChangesSchema) - return s, err + if err != nil { + return s, err + } + // TODO: Remove when we are sure we are not having goose artefacts in the db + // This forces an error, which indicates the migration is already applied, since the + // column partition was removed from the table + err = db.QueryRow("SELECT partition FROM keyserver_key_changes LIMIT 1;").Scan() + if err == nil { + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "keyserver: refactor key changes", + Up: deltas.UpRefactorKeyChanges, + }) + return s, m.Up(context.Background()) + } + + return s, nil } func (s *keyChangesStatements) Prepare() (err error) { diff --git a/keyserver/storage/sqlite3/storage.go b/keyserver/storage/sqlite3/storage.go index aeea9eac6..873fe3e24 100644 --- a/keyserver/storage/sqlite3/storage.go +++ b/keyserver/storage/sqlite3/storage.go @@ -17,7 +17,6 @@ package sqlite3 import ( "github.com/matrix-org/dendrite/internal/sqlutil" "github.com/matrix-org/dendrite/keyserver/storage/shared" - "github.com/matrix-org/dendrite/keyserver/storage/sqlite3/deltas" "github.com/matrix-org/dendrite/setup/base" "github.com/matrix-org/dendrite/setup/config" ) @@ -52,12 +51,6 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions) return nil, err } - m := sqlutil.NewMigrations() - deltas.LoadRefactorKeyChanges(m) - deltas.LoadFixCrossSigningSignatureIndexes(m) - if err = m.RunDeltas(db, dbProperties); err != nil { - return nil, err - } if err = kc.Prepare(); err != nil { return nil, err } diff --git a/roomserver/storage/postgres/deltas/20201028212440_add_forgotten_column.go b/roomserver/storage/postgres/deltas/20201028212440_add_forgotten_column.go index f3bd8632f..61d4dba87 100644 --- a/roomserver/storage/postgres/deltas/20201028212440_add_forgotten_column.go +++ b/roomserver/storage/postgres/deltas/20201028212440_add_forgotten_column.go @@ -15,32 +15,21 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" - "github.com/pressly/goose" ) -func LoadFromGoose() { - goose.AddMigration(UpAddForgottenColumn, DownAddForgottenColumn) - goose.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor) -} - -func LoadAddForgottenColumn(m *sqlutil.Migrations) { - m.AddMigration(UpAddForgottenColumn, DownAddForgottenColumn) -} - -func UpAddForgottenColumn(tx *sql.Tx) error { - _, err := tx.Exec(`ALTER TABLE roomserver_membership ADD COLUMN IF NOT EXISTS forgotten BOOLEAN NOT NULL DEFAULT false;`) +func UpAddForgottenColumn(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, `ALTER TABLE roomserver_membership ADD COLUMN IF NOT EXISTS forgotten BOOLEAN NOT NULL DEFAULT false;`) if err != nil { return fmt.Errorf("failed to execute upgrade: %w", err) } return nil } -func DownAddForgottenColumn(tx *sql.Tx) error { - _, err := tx.Exec(`ALTER TABLE roomserver_membership DROP COLUMN IF EXISTS forgotten;`) +func DownAddForgottenColumn(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, `ALTER TABLE roomserver_membership DROP COLUMN IF EXISTS forgotten;`) if err != nil { return fmt.Errorf("failed to execute downgrade: %w", err) } diff --git a/roomserver/storage/postgres/deltas/2021041615092700_state_blocks_refactor.go b/roomserver/storage/postgres/deltas/2021041615092700_state_blocks_refactor.go index 06442a4c3..355c49b14 100644 --- a/roomserver/storage/postgres/deltas/2021041615092700_state_blocks_refactor.go +++ b/roomserver/storage/postgres/deltas/2021041615092700_state_blocks_refactor.go @@ -15,11 +15,11 @@ package deltas import ( + "context" "database/sql" "fmt" "github.com/lib/pq" - "github.com/matrix-org/dendrite/internal/sqlutil" "github.com/matrix-org/dendrite/roomserver/types" "github.com/matrix-org/util" "github.com/sirupsen/logrus" @@ -36,48 +36,44 @@ type stateBlockData struct { EventNIDs types.EventNIDs } -func LoadStateBlocksRefactor(m *sqlutil.Migrations) { - m.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor) -} - // nolint:gocyclo -func UpStateBlocksRefactor(tx *sql.Tx) error { +func UpStateBlocksRefactor(ctx context.Context, tx *sql.Tx) error { logrus.Warn("Performing state storage upgrade. Please wait, this may take some time!") defer logrus.Warn("State storage upgrade complete") var snapshotcount int var maxsnapshotid int var maxblockid int - if err := tx.QueryRow(`SELECT COUNT(DISTINCT state_snapshot_nid) FROM roomserver_state_snapshots;`).Scan(&snapshotcount); err != nil { - return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err) + if err := tx.QueryRowContext(ctx, `SELECT COUNT(DISTINCT state_snapshot_nid) FROM roomserver_state_snapshots;`).Scan(&snapshotcount); err != nil { + return fmt.Errorf("tx.QueryRowContext.Scan (count snapshots): %w", err) } - if err := tx.QueryRow(`SELECT COALESCE(MAX(state_snapshot_nid),0) FROM roomserver_state_snapshots;`).Scan(&maxsnapshotid); err != nil { - return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err) + if err := tx.QueryRowContext(ctx, `SELECT COALESCE(MAX(state_snapshot_nid),0) FROM roomserver_state_snapshots;`).Scan(&maxsnapshotid); err != nil { + return fmt.Errorf("tx.QueryRowContext.Scan (count snapshots): %w", err) } - if err := tx.QueryRow(`SELECT COALESCE(MAX(state_block_nid),0) FROM roomserver_state_block;`).Scan(&maxblockid); err != nil { - return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err) + if err := tx.QueryRowContext(ctx, `SELECT COALESCE(MAX(state_block_nid),0) FROM roomserver_state_block;`).Scan(&maxblockid); err != nil { + return fmt.Errorf("tx.QueryRowContext.Scan (count snapshots): %w", err) } maxsnapshotid++ maxblockid++ - if _, err := tx.Exec(`ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil { - return fmt.Errorf("tx.Exec: %w", err) + if _, err := tx.ExecContext(ctx, `ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil { + return fmt.Errorf("tx.ExecContext: %w", err) } - if _, err := tx.Exec(`ALTER TABLE roomserver_state_snapshots RENAME TO _roomserver_state_snapshots;`); err != nil { - return fmt.Errorf("tx.Exec: %w", err) + if _, err := tx.ExecContext(ctx, `ALTER TABLE roomserver_state_snapshots RENAME TO _roomserver_state_snapshots;`); err != nil { + return fmt.Errorf("tx.ExecContext: %w", err) } // We create new sequences starting with the maximum state snapshot and block NIDs. // This means that all newly created snapshots and blocks by the migration will have // NIDs higher than these values, so that when we come to update the references to // these NIDs using UPDATE statements, we can guarantee we are only ever updating old // values and not accidentally overwriting new ones. - if _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE roomserver_state_block_nid_sequence START WITH %d;`, maxblockid)); err != nil { - return fmt.Errorf("tx.Exec: %w", err) + if _, err := tx.ExecContext(ctx, fmt.Sprintf(`CREATE SEQUENCE roomserver_state_block_nid_sequence START WITH %d;`, maxblockid)); err != nil { + return fmt.Errorf("tx.ExecContext: %w", err) } - if _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE roomserver_state_snapshot_nid_sequence START WITH %d;`, maxsnapshotid)); err != nil { - return fmt.Errorf("tx.Exec: %w", err) + if _, err := tx.ExecContext(ctx, fmt.Sprintf(`CREATE SEQUENCE roomserver_state_snapshot_nid_sequence START WITH %d;`, maxsnapshotid)); err != nil { + return fmt.Errorf("tx.ExecContext: %w", err) } - _, err := tx.Exec(` + _, err := tx.ExecContext(ctx, ` CREATE TABLE IF NOT EXISTS roomserver_state_block ( state_block_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_block_nid_sequence'), state_block_hash BYTEA UNIQUE, @@ -87,7 +83,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { if err != nil { return fmt.Errorf("tx.Exec (create blocks table): %w", err) } - _, err = tx.Exec(` + _, err = tx.ExecContext(ctx, ` CREATE TABLE IF NOT EXISTS roomserver_state_snapshots ( state_snapshot_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_snapshot_nid_sequence'), state_snapshot_hash BYTEA UNIQUE, @@ -104,7 +100,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { // in question a state snapshot NID of 0 to indicate 'no snapshot'. // If we don't do this, we'll fail the assertions later on which try to ensure we didn't forget // any snapshots. - _, err = tx.Exec( + _, err = tx.ExecContext(ctx, `UPDATE roomserver_events SET state_snapshot_nid = 0 WHERE event_type_nid = $1 AND event_state_key_nid = $2`, types.MRoomCreateNID, types.EmptyStateKeyNID, ) @@ -115,7 +111,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { batchsize := 100 for batchoffset := 0; batchoffset < snapshotcount; batchoffset += batchsize { var snapshotrows *sql.Rows - snapshotrows, err = tx.Query(` + snapshotrows, err = tx.QueryContext(ctx, ` SELECT state_snapshot_nid, room_nid, @@ -146,7 +142,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { state_block_nid; `, batchsize, batchoffset) if err != nil { - return fmt.Errorf("tx.Query: %w", err) + return fmt.Errorf("tx.QueryContext: %w", err) } logrus.Warnf("Rewriting snapshots %d-%d of %d...", batchoffset, batchoffset+batchsize, snapshotcount) @@ -183,7 +179,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { // fill in bad create snapshots for _, s := range badCreateSnapshots { var createEventNID types.EventNID - err = tx.QueryRow( + err = tx.QueryRowContext(ctx, `SELECT event_nid FROM roomserver_events WHERE state_snapshot_nid = $1 AND event_type_nid = 1`, s.StateSnapshotNID, ).Scan(&createEventNID) if err == sql.ErrNoRows { @@ -208,7 +204,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { } var blocknid types.StateBlockNID - err = tx.QueryRow(` + err = tx.QueryRowContext(ctx, ` INSERT INTO roomserver_state_block (state_block_hash, event_nids) VALUES ($1, $2) ON CONFLICT (state_block_hash) DO UPDATE SET event_nids=$2 @@ -227,7 +223,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { } var newNID types.StateSnapshotNID - err = tx.QueryRow(` + err = tx.QueryRowContext(ctx, ` INSERT INTO roomserver_state_snapshots (state_snapshot_hash, room_nid, state_block_nids) VALUES ($1, $2, $3) ON CONFLICT (state_snapshot_hash) DO UPDATE SET room_nid=$2 @@ -237,12 +233,12 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { return fmt.Errorf("tx.QueryRow.Scan (insert new snapshot): %w", err) } - if _, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil { - return fmt.Errorf("tx.Exec (update events): %w", err) + if _, err = tx.ExecContext(ctx, `UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil { + return fmt.Errorf("tx.ExecContext (update events): %w", err) } - if _, err = tx.Exec(`UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil { - return fmt.Errorf("tx.Exec (update rooms): %w", err) + if _, err = tx.ExecContext(ctx, `UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil { + return fmt.Errorf("tx.ExecContext (update rooms): %w", err) } } } @@ -252,13 +248,13 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { // in roomserver_state_snapshots var count int64 - if err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_events WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid).Scan(&count); err != nil { + if err = tx.QueryRowContext(ctx, `SELECT COUNT(*) FROM roomserver_events WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid).Scan(&count); err != nil { return fmt.Errorf("assertion query failed: %s", err) } if count > 0 { var res sql.Result var c int64 - res, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid = 0 WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid) + res, err = tx.ExecContext(ctx, `UPDATE roomserver_events SET state_snapshot_nid = 0 WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid) if err != nil && err != sql.ErrNoRows { return fmt.Errorf("failed to reset invalid state snapshots: %w", err) } @@ -268,13 +264,13 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { return fmt.Errorf("expected to reset %d event(s) but only updated %d event(s)", count, c) } } - if err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_rooms WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid).Scan(&count); err != nil { + if err = tx.QueryRowContext(ctx, `SELECT COUNT(*) FROM roomserver_rooms WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid).Scan(&count); err != nil { return fmt.Errorf("assertion query failed: %s", err) } if count > 0 { var debugRoomID string var debugSnapNID, debugLastEventNID int64 - err = tx.QueryRow( + err = tx.QueryRowContext(ctx, `SELECT room_id, state_snapshot_nid, last_event_sent_nid FROM roomserver_rooms WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid, ).Scan(&debugRoomID, &debugSnapNID, &debugLastEventNID) if err != nil { @@ -291,13 +287,13 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { return fmt.Errorf("%d rooms exist in roomserver_rooms which have not been converted to a new state_snapshot_nid; this is a bug, please report", count) } - if _, err = tx.Exec(` + if _, err = tx.ExecContext(ctx, ` DROP TABLE _roomserver_state_snapshots; DROP SEQUENCE roomserver_state_snapshot_nid_seq; `); err != nil { return fmt.Errorf("tx.Exec (delete old snapshot table): %w", err) } - if _, err = tx.Exec(` + if _, err = tx.ExecContext(ctx, ` DROP TABLE _roomserver_state_block; DROP SEQUENCE roomserver_state_block_nid_seq; `); err != nil { @@ -307,6 +303,6 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { return nil } -func DownStateBlocksRefactor(tx *sql.Tx) error { +func DownStateBlocksRefactor(ctx context.Context, tx *sql.Tx) error { panic("Downgrading state storage is not supported") } diff --git a/roomserver/storage/postgres/membership_table.go b/roomserver/storage/postgres/membership_table.go index eaaea22a1..bd3fd5592 100644 --- a/roomserver/storage/postgres/membership_table.go +++ b/roomserver/storage/postgres/membership_table.go @@ -23,6 +23,7 @@ import ( "github.com/lib/pq" "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/matrix-org/dendrite/roomserver/storage/postgres/deltas" "github.com/matrix-org/dendrite/roomserver/storage/tables" "github.com/matrix-org/dendrite/roomserver/types" "github.com/matrix-org/gomatrixserverlib" @@ -173,7 +174,15 @@ type membershipStatements struct { func CreateMembershipTable(db *sql.DB) error { _, err := db.Exec(membershipSchema) - return err + if err != nil { + return err + } + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "roomserver: add forgotten column", + Up: deltas.UpAddForgottenColumn, + }) + return m.Up(context.Background()) } func PrepareMembershipTable(db *sql.DB) (tables.Membership, error) { diff --git a/roomserver/storage/postgres/storage.go b/roomserver/storage/postgres/storage.go index 70ea4d8ba..4c271ea9b 100644 --- a/roomserver/storage/postgres/storage.go +++ b/roomserver/storage/postgres/storage.go @@ -21,7 +21,6 @@ import ( // Import the postgres database driver. _ "github.com/lib/pq" - "github.com/matrix-org/dendrite/internal/caching" "github.com/matrix-org/dendrite/internal/sqlutil" "github.com/matrix-org/dendrite/roomserver/storage/postgres/deltas" @@ -45,17 +44,25 @@ func Open(base *base.BaseDendrite, dbProperties *config.DatabaseOptions, cache c } // Create the tables. - if err := d.create(db); err != nil { + if err = d.create(db); err != nil { return nil, err } - // Then execute the migrations. By this point the tables are created with the latest - // schemas. - m := sqlutil.NewMigrations() - deltas.LoadAddForgottenColumn(m) - deltas.LoadStateBlocksRefactor(m) - if err := m.RunDeltas(db, dbProperties); err != nil { - return nil, err + // Special case, since this migration uses several tables, so it needs to + // be sure that all tables are created first. + // TODO: Remove when we are sure we are not having goose artefacts in the db + // This forces an error, which indicates the migration is already applied, since the + // column event_nid was removed from the table + err = db.QueryRow("SELECT event_nid FROM roomserver_state_block LIMIT 1;").Scan() + if err == nil { + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "roomserver: state blocks refactor", + Up: deltas.UpStateBlocksRefactor, + }) + if err := m.Up(base.Context()); err != nil { + return nil, err + } } // Then prepare the statements. Now that the migrations have run, any columns referred diff --git a/roomserver/storage/sqlite3/deltas/20201028212440_add_forgotten_column.go b/roomserver/storage/sqlite3/deltas/20201028212440_add_forgotten_column.go index d08ab02d5..4c002e33d 100644 --- a/roomserver/storage/sqlite3/deltas/20201028212440_add_forgotten_column.go +++ b/roomserver/storage/sqlite3/deltas/20201028212440_add_forgotten_column.go @@ -15,24 +15,13 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" - "github.com/pressly/goose" ) -func LoadFromGoose() { - goose.AddMigration(UpAddForgottenColumn, DownAddForgottenColumn) - goose.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor) -} - -func LoadAddForgottenColumn(m *sqlutil.Migrations) { - m.AddMigration(UpAddForgottenColumn, DownAddForgottenColumn) -} - -func UpAddForgottenColumn(tx *sql.Tx) error { - _, err := tx.Exec(` ALTER TABLE roomserver_membership RENAME TO roomserver_membership_tmp; +func UpAddForgottenColumn(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE roomserver_membership RENAME TO roomserver_membership_tmp; CREATE TABLE IF NOT EXISTS roomserver_membership ( room_nid INTEGER NOT NULL, target_nid INTEGER NOT NULL, @@ -57,8 +46,8 @@ DROP TABLE roomserver_membership_tmp;`) return nil } -func DownAddForgottenColumn(tx *sql.Tx) error { - _, err := tx.Exec(` ALTER TABLE roomserver_membership RENAME TO roomserver_membership_tmp; +func DownAddForgottenColumn(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE roomserver_membership RENAME TO roomserver_membership_tmp; CREATE TABLE IF NOT EXISTS roomserver_membership ( room_nid INTEGER NOT NULL, target_nid INTEGER NOT NULL, diff --git a/roomserver/storage/sqlite3/deltas/2021041615092700_state_blocks_refactor.go b/roomserver/storage/sqlite3/deltas/2021041615092700_state_blocks_refactor.go index 8f5ab8fc5..00978121f 100644 --- a/roomserver/storage/sqlite3/deltas/2021041615092700_state_blocks_refactor.go +++ b/roomserver/storage/sqlite3/deltas/2021041615092700_state_blocks_refactor.go @@ -21,40 +21,35 @@ import ( "fmt" "github.com/matrix-org/dendrite/internal" - "github.com/matrix-org/dendrite/internal/sqlutil" "github.com/matrix-org/dendrite/roomserver/types" "github.com/matrix-org/util" "github.com/sirupsen/logrus" ) -func LoadStateBlocksRefactor(m *sqlutil.Migrations) { - m.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor) -} - // nolint:gocyclo -func UpStateBlocksRefactor(tx *sql.Tx) error { +func UpStateBlocksRefactor(ctx context.Context, tx *sql.Tx) error { logrus.Warn("Performing state storage upgrade. Please wait, this may take some time!") defer logrus.Warn("State storage upgrade complete") var maxsnapshotid int var maxblockid int - if err := tx.QueryRow(`SELECT IFNULL(MAX(state_snapshot_nid),0) FROM roomserver_state_snapshots;`).Scan(&maxsnapshotid); err != nil { - return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err) + if err := tx.QueryRowContext(ctx, `SELECT IFNULL(MAX(state_snapshot_nid),0) FROM roomserver_state_snapshots;`).Scan(&maxsnapshotid); err != nil { + return fmt.Errorf("tx.QueryRowContext.Scan (count snapshots): %w", err) } - if err := tx.QueryRow(`SELECT IFNULL(MAX(state_block_nid),0) FROM roomserver_state_block;`).Scan(&maxblockid); err != nil { - return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err) + if err := tx.QueryRowContext(ctx, `SELECT IFNULL(MAX(state_block_nid),0) FROM roomserver_state_block;`).Scan(&maxblockid); err != nil { + return fmt.Errorf("tx.QueryRowContext.Scan (count snapshots): %w", err) } maxsnapshotid++ maxblockid++ oldMaxSnapshotID := maxsnapshotid - if _, err := tx.Exec(`ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil { - return fmt.Errorf("tx.Exec: %w", err) + if _, err := tx.ExecContext(ctx, `ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil { + return fmt.Errorf("tx.ExecContext: %w", err) } - if _, err := tx.Exec(`ALTER TABLE roomserver_state_snapshots RENAME TO _roomserver_state_snapshots;`); err != nil { - return fmt.Errorf("tx.Exec: %w", err) + if _, err := tx.ExecContext(ctx, `ALTER TABLE roomserver_state_snapshots RENAME TO _roomserver_state_snapshots;`); err != nil { + return fmt.Errorf("tx.ExecContext: %w", err) } - _, err := tx.Exec(` + _, err := tx.ExecContext(ctx, ` CREATE TABLE IF NOT EXISTS roomserver_state_block ( state_block_nid INTEGER PRIMARY KEY AUTOINCREMENT, state_block_hash BLOB UNIQUE, @@ -62,9 +57,9 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { ); `) if err != nil { - return fmt.Errorf("tx.Exec: %w", err) + return fmt.Errorf("tx.ExecContext: %w", err) } - _, err = tx.Exec(` + _, err = tx.ExecContext(ctx, ` CREATE TABLE IF NOT EXISTS roomserver_state_snapshots ( state_snapshot_nid INTEGER PRIMARY KEY AUTOINCREMENT, state_snapshot_hash BLOB UNIQUE, @@ -73,11 +68,11 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { ); `) if err != nil { - return fmt.Errorf("tx.Exec: %w", err) + return fmt.Errorf("tx.ExecContext: %w", err) } - snapshotrows, err := tx.Query(`SELECT state_snapshot_nid, room_nid, state_block_nids FROM _roomserver_state_snapshots;`) + snapshotrows, err := tx.QueryContext(ctx, `SELECT state_snapshot_nid, room_nid, state_block_nids FROM _roomserver_state_snapshots;`) if err != nil { - return fmt.Errorf("tx.Query: %w", err) + return fmt.Errorf("tx.QueryContext: %w", err) } defer internal.CloseAndLogIfError(context.TODO(), snapshotrows, "rows.close() failed") for snapshotrows.Next() { @@ -99,7 +94,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { // in question a state snapshot NID of 0 to indicate 'no snapshot'. // If we don't do this, we'll fail the assertions later on which try to ensure we didn't forget // any snapshots. - _, err = tx.Exec( + _, err = tx.ExecContext(ctx, `UPDATE roomserver_events SET state_snapshot_nid = 0 WHERE event_type_nid = $1 AND event_state_key_nid = $2 AND state_snapshot_nid = $3`, types.MRoomCreateNID, types.EmptyStateKeyNID, snapshot, ) @@ -109,9 +104,9 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { } for _, block := range blocks { if err = func() error { - blockrows, berr := tx.Query(`SELECT event_nid FROM _roomserver_state_block WHERE state_block_nid = $1`, block) + blockrows, berr := tx.QueryContext(ctx, `SELECT event_nid FROM _roomserver_state_block WHERE state_block_nid = $1`, block) if berr != nil { - return fmt.Errorf("tx.Query (event nids from old block): %w", berr) + return fmt.Errorf("tx.QueryContext (event nids from old block): %w", berr) } defer internal.CloseAndLogIfError(context.TODO(), blockrows, "rows.close() failed") events := types.EventNIDs{} @@ -129,14 +124,14 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { } var blocknid types.StateBlockNID - err = tx.QueryRow(` + err = tx.QueryRowContext(ctx, ` INSERT INTO roomserver_state_block (state_block_nid, state_block_hash, event_nids) VALUES ($1, $2, $3) ON CONFLICT (state_block_hash) DO UPDATE SET event_nids=$3 RETURNING state_block_nid `, maxblockid, events.Hash(), eventjson).Scan(&blocknid) if err != nil { - return fmt.Errorf("tx.QueryRow.Scan (insert new block): %w", err) + return fmt.Errorf("tx.QueryRowContext.Scan (insert new block): %w", err) } maxblockid++ newblocks = append(newblocks, blocknid) @@ -151,22 +146,22 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { } var newsnapshot types.StateSnapshotNID - err = tx.QueryRow(` + err = tx.QueryRowContext(ctx, ` INSERT INTO roomserver_state_snapshots (state_snapshot_nid, state_snapshot_hash, room_nid, state_block_nids) VALUES ($1, $2, $3, $4) ON CONFLICT (state_snapshot_hash) DO UPDATE SET room_nid=$3 RETURNING state_snapshot_nid `, maxsnapshotid, newblocks.Hash(), room, newblocksjson).Scan(&newsnapshot) if err != nil { - return fmt.Errorf("tx.QueryRow.Scan (insert new snapshot): %w", err) + return fmt.Errorf("tx.QueryRowContext.Scan (insert new snapshot): %w", err) } maxsnapshotid++ - _, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid) + _, err = tx.ExecContext(ctx, `UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid) if err != nil { - return fmt.Errorf("tx.Exec (update events): %w", err) + return fmt.Errorf("tx.ExecContext (update events): %w", err) } - if _, err = tx.Exec(`UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid); err != nil { - return fmt.Errorf("tx.Exec (update rooms): %w", err) + if _, err = tx.ExecContext(ctx, `UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid); err != nil { + return fmt.Errorf("tx.ExecContext (update rooms): %w", err) } } } @@ -175,13 +170,13 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { // If we do, this is a problem if Dendrite tries to load the snapshot as it will not exist // in roomserver_state_snapshots var count int64 - if err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_events WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, oldMaxSnapshotID).Scan(&count); err != nil { + if err = tx.QueryRowContext(ctx, `SELECT COUNT(*) FROM roomserver_events WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, oldMaxSnapshotID).Scan(&count); err != nil { return fmt.Errorf("assertion query failed: %s", err) } if count > 0 { var res sql.Result var c int64 - res, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid = 0 WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, oldMaxSnapshotID) + res, err = tx.ExecContext(ctx, `UPDATE roomserver_events SET state_snapshot_nid = 0 WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, oldMaxSnapshotID) if err != nil && err != sql.ErrNoRows { return fmt.Errorf("failed to reset invalid state snapshots: %w", err) } @@ -191,23 +186,23 @@ func UpStateBlocksRefactor(tx *sql.Tx) error { return fmt.Errorf("expected to reset %d event(s) but only updated %d event(s)", count, c) } } - if err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_rooms WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, oldMaxSnapshotID).Scan(&count); err != nil { + if err = tx.QueryRowContext(ctx, `SELECT COUNT(*) FROM roomserver_rooms WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, oldMaxSnapshotID).Scan(&count); err != nil { return fmt.Errorf("assertion query failed: %s", err) } if count > 0 { return fmt.Errorf("%d rooms exist in roomserver_rooms which have not been converted to a new state_snapshot_nid; this is a bug, please report", count) } - if _, err = tx.Exec(`DROP TABLE _roomserver_state_snapshots;`); err != nil { + if _, err = tx.ExecContext(ctx, `DROP TABLE _roomserver_state_snapshots;`); err != nil { return fmt.Errorf("tx.Exec (delete old snapshot table): %w", err) } - if _, err = tx.Exec(`DROP TABLE _roomserver_state_block;`); err != nil { + if _, err = tx.ExecContext(ctx, `DROP TABLE _roomserver_state_block;`); err != nil { return fmt.Errorf("tx.Exec (delete old block table): %w", err) } return nil } -func DownStateBlocksRefactor(tx *sql.Tx) error { +func DownStateBlocksRefactor(ctx context.Context, tx *sql.Tx) error { panic("Downgrading state storage is not supported") } diff --git a/roomserver/storage/sqlite3/membership_table.go b/roomserver/storage/sqlite3/membership_table.go index 9dda82fb2..f3303eb0e 100644 --- a/roomserver/storage/sqlite3/membership_table.go +++ b/roomserver/storage/sqlite3/membership_table.go @@ -23,6 +23,7 @@ import ( "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/matrix-org/dendrite/roomserver/storage/sqlite3/deltas" "github.com/matrix-org/dendrite/roomserver/storage/tables" "github.com/matrix-org/dendrite/roomserver/types" "github.com/matrix-org/gomatrixserverlib" @@ -148,7 +149,15 @@ type membershipStatements struct { func CreateMembershipTable(db *sql.DB) error { _, err := db.Exec(membershipSchema) - return err + if err != nil { + return err + } + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "roomserver: add forgotten column", + Up: deltas.UpAddForgottenColumn, + }) + return m.Up(context.Background()) } func PrepareMembershipTable(db *sql.DB) (tables.Membership, error) { diff --git a/roomserver/storage/sqlite3/storage.go b/roomserver/storage/sqlite3/storage.go index 8325fdad5..bb9c15b5a 100644 --- a/roomserver/storage/sqlite3/storage.go +++ b/roomserver/storage/sqlite3/storage.go @@ -54,17 +54,25 @@ func Open(base *base.BaseDendrite, dbProperties *config.DatabaseOptions, cache c // db.SetMaxOpenConns(20) // Create the tables. - if err := d.create(db); err != nil { + if err = d.create(db); err != nil { return nil, err } - // Then execute the migrations. By this point the tables are created with the latest - // schemas. - m := sqlutil.NewMigrations() - deltas.LoadAddForgottenColumn(m) - deltas.LoadStateBlocksRefactor(m) - if err := m.RunDeltas(db, dbProperties); err != nil { - return nil, err + // Special case, since this migration uses several tables, so it needs to + // be sure that all tables are created first. + // TODO: Remove when we are sure we are not having goose artefacts in the db + // This forces an error, which indicates the migration is already applied, since the + // column event_nid was removed from the table + err = db.QueryRow("SELECT event_nid FROM roomserver_state_block LIMIT 1;").Scan() + if err == nil { + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "roomserver: state blocks refactor", + Up: deltas.UpStateBlocksRefactor, + }) + if err := m.Up(base.Context()); err != nil { + return nil, err + } } // Then prepare the statements. Now that the migrations have run, any columns referred diff --git a/syncapi/storage/postgres/current_room_state_table.go b/syncapi/storage/postgres/current_room_state_table.go index aae2d8c33..d13b7be41 100644 --- a/syncapi/storage/postgres/current_room_state_table.go +++ b/syncapi/storage/postgres/current_room_state_table.go @@ -23,6 +23,7 @@ import ( "github.com/lib/pq" "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/matrix-org/dendrite/syncapi/storage/postgres/deltas" "github.com/matrix-org/dendrite/syncapi/storage/tables" "github.com/matrix-org/dendrite/syncapi/types" "github.com/matrix-org/gomatrixserverlib" @@ -133,6 +134,17 @@ func NewPostgresCurrentRoomStateTable(db *sql.DB) (tables.CurrentRoomState, erro if err != nil { return nil, err } + + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "syncapi: add history visibility column (current_room_state)", + Up: deltas.UpAddHistoryVisibilityColumnCurrentRoomState, + }) + err = m.Up(context.Background()) + if err != nil { + return nil, err + } + if s.upsertRoomStateStmt, err = db.Prepare(upsertRoomStateSQL); err != nil { return nil, err } diff --git a/syncapi/storage/postgres/deltas/20201211125500_sequences.go b/syncapi/storage/postgres/deltas/20201211125500_sequences.go index 7db524da5..6303c9472 100644 --- a/syncapi/storage/postgres/deltas/20201211125500_sequences.go +++ b/syncapi/storage/postgres/deltas/20201211125500_sequences.go @@ -15,24 +15,13 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" - "github.com/pressly/goose" ) -func LoadFromGoose() { - goose.AddMigration(UpFixSequences, DownFixSequences) - goose.AddMigration(UpRemoveSendToDeviceSentColumn, DownRemoveSendToDeviceSentColumn) -} - -func LoadFixSequences(m *sqlutil.Migrations) { - m.AddMigration(UpFixSequences, DownFixSequences) -} - -func UpFixSequences(tx *sql.Tx) error { - _, err := tx.Exec(` +func UpFixSequences(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` -- We need to delete all of the existing receipts because the indexes -- will be wrong, and we'll get primary key violations if we try to -- reuse existing stream IDs from a different sequence. @@ -49,8 +38,8 @@ func UpFixSequences(tx *sql.Tx) error { return nil } -func DownFixSequences(tx *sql.Tx) error { - _, err := tx.Exec(` +func DownFixSequences(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` -- We need to delete all of the existing receipts because the indexes -- will be wrong, and we'll get primary key violations if we try to -- reuse existing stream IDs from a different sequence. diff --git a/syncapi/storage/postgres/deltas/20210112130000_sendtodevice_sentcolumn.go b/syncapi/storage/postgres/deltas/20210112130000_sendtodevice_sentcolumn.go index 3690eca8e..77b083ae2 100644 --- a/syncapi/storage/postgres/deltas/20210112130000_sendtodevice_sentcolumn.go +++ b/syncapi/storage/postgres/deltas/20210112130000_sendtodevice_sentcolumn.go @@ -15,18 +15,13 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" ) -func LoadRemoveSendToDeviceSentColumn(m *sqlutil.Migrations) { - m.AddMigration(UpRemoveSendToDeviceSentColumn, DownRemoveSendToDeviceSentColumn) -} - -func UpRemoveSendToDeviceSentColumn(tx *sql.Tx) error { - _, err := tx.Exec(` +func UpRemoveSendToDeviceSentColumn(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE syncapi_send_to_device DROP COLUMN IF EXISTS sent_by_token; `) @@ -36,8 +31,8 @@ func UpRemoveSendToDeviceSentColumn(tx *sql.Tx) error { return nil } -func DownRemoveSendToDeviceSentColumn(tx *sql.Tx) error { - _, err := tx.Exec(` +func DownRemoveSendToDeviceSentColumn(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE syncapi_send_to_device ADD COLUMN IF NOT EXISTS sent_by_token TEXT; `) diff --git a/syncapi/storage/postgres/deltas/2022061412000000_history_visibility_column.go b/syncapi/storage/postgres/deltas/2022061412000000_history_visibility_column.go index 2b4b65287..29008adef 100644 --- a/syncapi/storage/postgres/deltas/2022061412000000_history_visibility_column.go +++ b/syncapi/storage/postgres/deltas/2022061412000000_history_visibility_column.go @@ -15,20 +15,24 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" ) -func LoadAddHistoryVisibilityColumn(m *sqlutil.Migrations) { - m.AddMigration(UpAddHistoryVisibilityColumn, DownAddHistoryVisibilityColumn) -} - -func UpAddHistoryVisibilityColumn(tx *sql.Tx) error { - _, err := tx.Exec(` +func UpAddHistoryVisibilityColumnOutputRoomEvents(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE syncapi_output_room_events ADD COLUMN IF NOT EXISTS history_visibility SMALLINT NOT NULL DEFAULT 2; UPDATE syncapi_output_room_events SET history_visibility = 4 WHERE type IN ('m.room.message', 'm.room.encrypted'); + `) + if err != nil { + return fmt.Errorf("failed to execute upgrade: %w", err) + } + return nil +} + +func UpAddHistoryVisibilityColumnCurrentRoomState(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE syncapi_current_room_state ADD COLUMN IF NOT EXISTS history_visibility SMALLINT NOT NULL DEFAULT 2; UPDATE syncapi_current_room_state SET history_visibility = 4 WHERE type IN ('m.room.message', 'm.room.encrypted'); `) @@ -38,8 +42,8 @@ func UpAddHistoryVisibilityColumn(tx *sql.Tx) error { return nil } -func DownAddHistoryVisibilityColumn(tx *sql.Tx) error { - _, err := tx.Exec(` +func DownAddHistoryVisibilityColumn(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE syncapi_output_room_events DROP COLUMN IF EXISTS history_visibility; ALTER TABLE syncapi_current_room_state DROP COLUMN IF EXISTS history_visibility; `) diff --git a/syncapi/storage/postgres/output_room_events_table.go b/syncapi/storage/postgres/output_room_events_table.go index ddef27383..34ff6700f 100644 --- a/syncapi/storage/postgres/output_room_events_table.go +++ b/syncapi/storage/postgres/output_room_events_table.go @@ -23,6 +23,7 @@ import ( "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/roomserver/api" + "github.com/matrix-org/dendrite/syncapi/storage/postgres/deltas" "github.com/matrix-org/dendrite/syncapi/storage/tables" "github.com/matrix-org/dendrite/syncapi/types" @@ -188,6 +189,17 @@ func NewPostgresEventsTable(db *sql.DB) (tables.Events, error) { if err != nil { return nil, err } + + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "syncapi: add history visibility column (output_room_events)", + Up: deltas.UpAddHistoryVisibilityColumnOutputRoomEvents, + }) + err = m.Up(context.Background()) + if err != nil { + return nil, err + } + return s, sqlutil.StatementList{ {&s.insertEventStmt, insertEventSQL}, {&s.selectEventsStmt, selectEventsSQL}, diff --git a/syncapi/storage/postgres/receipt_table.go b/syncapi/storage/postgres/receipt_table.go index 2a42ffd74..bbddaa939 100644 --- a/syncapi/storage/postgres/receipt_table.go +++ b/syncapi/storage/postgres/receipt_table.go @@ -23,6 +23,7 @@ import ( "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/matrix-org/dendrite/syncapi/storage/postgres/deltas" "github.com/matrix-org/dendrite/syncapi/storage/tables" "github.com/matrix-org/dendrite/syncapi/types" "github.com/matrix-org/gomatrixserverlib" @@ -73,6 +74,15 @@ func NewPostgresReceiptsTable(db *sql.DB) (tables.Receipts, error) { if err != nil { return nil, err } + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "syncapi: fix sequences", + Up: deltas.UpFixSequences, + }) + err = m.Up(context.Background()) + if err != nil { + return nil, err + } r := &receiptStatements{ db: db, } diff --git a/syncapi/storage/postgres/send_to_device_table.go b/syncapi/storage/postgres/send_to_device_table.go index 96d6844fd..2734fef3e 100644 --- a/syncapi/storage/postgres/send_to_device_table.go +++ b/syncapi/storage/postgres/send_to_device_table.go @@ -21,6 +21,7 @@ import ( "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/matrix-org/dendrite/syncapi/storage/postgres/deltas" "github.com/matrix-org/dendrite/syncapi/storage/tables" "github.com/matrix-org/dendrite/syncapi/types" "github.com/sirupsen/logrus" @@ -76,6 +77,15 @@ func NewPostgresSendToDeviceTable(db *sql.DB) (tables.SendToDevice, error) { if err != nil { return nil, err } + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "syncapi: drop sent_by_token", + Up: deltas.UpRemoveSendToDeviceSentColumn, + }) + err = m.Up(context.Background()) + if err != nil { + return nil, err + } if s.insertSendToDeviceMessageStmt, err = db.Prepare(insertSendToDeviceMessageSQL); err != nil { return nil, err } diff --git a/syncapi/storage/postgres/syncserver.go b/syncapi/storage/postgres/syncserver.go index c6121e419..a044716ce 100644 --- a/syncapi/storage/postgres/syncserver.go +++ b/syncapi/storage/postgres/syncserver.go @@ -23,7 +23,6 @@ import ( "github.com/matrix-org/dendrite/internal/sqlutil" "github.com/matrix-org/dendrite/setup/base" "github.com/matrix-org/dendrite/setup/config" - "github.com/matrix-org/dendrite/syncapi/storage/postgres/deltas" "github.com/matrix-org/dendrite/syncapi/storage/shared" ) @@ -42,16 +41,18 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions) if d.db, d.writer, err = base.DatabaseConnection(dbProperties, sqlutil.NewDummyWriter()); err != nil { return nil, err } - if _, err = d.db.Exec(outputRoomEventsSchema); err != nil { - return nil, err - } - if _, err = d.db.Exec(currentRoomStateSchema); err != nil { - return nil, err - } accountData, err := NewPostgresAccountDataTable(d.db) if err != nil { return nil, err } + events, err := NewPostgresEventsTable(d.db) + if err != nil { + return nil, err + } + currState, err := NewPostgresCurrentRoomStateTable(d.db) + if err != nil { + return nil, err + } invites, err := NewPostgresInvitesTable(d.db) if err != nil { return nil, err @@ -96,22 +97,6 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions) if err != nil { return nil, err } - m := sqlutil.NewMigrations() - deltas.LoadFixSequences(m) - deltas.LoadRemoveSendToDeviceSentColumn(m) - deltas.LoadAddHistoryVisibilityColumn(m) - if err = m.RunDeltas(d.db, dbProperties); err != nil { - return nil, err - } - // prepare statements after the migrations have run - events, err := NewPostgresEventsTable(d.db) - if err != nil { - return nil, err - } - currState, err := NewPostgresCurrentRoomStateTable(d.db) - if err != nil { - return nil, err - } d.Database = shared.Database{ DB: d.db, Writer: d.writer, diff --git a/syncapi/storage/sqlite3/current_room_state_table.go b/syncapi/storage/sqlite3/current_room_state_table.go index 501d0ee98..e19298aee 100644 --- a/syncapi/storage/sqlite3/current_room_state_table.go +++ b/syncapi/storage/sqlite3/current_room_state_table.go @@ -24,6 +24,7 @@ import ( "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/matrix-org/dendrite/syncapi/storage/sqlite3/deltas" "github.com/matrix-org/dendrite/syncapi/storage/tables" "github.com/matrix-org/dendrite/syncapi/types" "github.com/matrix-org/gomatrixserverlib" @@ -120,6 +121,17 @@ func NewSqliteCurrentRoomStateTable(db *sql.DB, streamID *StreamIDStatements) (t if err != nil { return nil, err } + + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "syncapi: add history visibility column (current_room_state)", + Up: deltas.UpAddHistoryVisibilityColumnCurrentRoomState, + }) + err = m.Up(context.Background()) + if err != nil { + return nil, err + } + if s.upsertRoomStateStmt, err = db.Prepare(upsertRoomStateSQL); err != nil { return nil, err } diff --git a/syncapi/storage/sqlite3/deltas/20201211125500_sequences.go b/syncapi/storage/sqlite3/deltas/20201211125500_sequences.go index 8e7ebff86..f476335d5 100644 --- a/syncapi/storage/sqlite3/deltas/20201211125500_sequences.go +++ b/syncapi/storage/sqlite3/deltas/20201211125500_sequences.go @@ -15,24 +15,13 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" - "github.com/pressly/goose" ) -func LoadFromGoose() { - goose.AddMigration(UpFixSequences, DownFixSequences) - goose.AddMigration(UpRemoveSendToDeviceSentColumn, DownRemoveSendToDeviceSentColumn) -} - -func LoadFixSequences(m *sqlutil.Migrations) { - m.AddMigration(UpFixSequences, DownFixSequences) -} - -func UpFixSequences(tx *sql.Tx) error { - _, err := tx.Exec(` +func UpFixSequences(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` -- We need to delete all of the existing receipts because the indexes -- will be wrong, and we'll get primary key violations if we try to -- reuse existing stream IDs from a different sequence. @@ -45,8 +34,8 @@ func UpFixSequences(tx *sql.Tx) error { return nil } -func DownFixSequences(tx *sql.Tx) error { - _, err := tx.Exec(` +func DownFixSequences(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` -- We need to delete all of the existing receipts because the indexes -- will be wrong, and we'll get primary key violations if we try to -- reuse existing stream IDs from a different sequence. diff --git a/syncapi/storage/sqlite3/deltas/20210112130000_sendtodevice_sentcolumn.go b/syncapi/storage/sqlite3/deltas/20210112130000_sendtodevice_sentcolumn.go index e0c514102..34cae2241 100644 --- a/syncapi/storage/sqlite3/deltas/20210112130000_sendtodevice_sentcolumn.go +++ b/syncapi/storage/sqlite3/deltas/20210112130000_sendtodevice_sentcolumn.go @@ -15,18 +15,13 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" ) -func LoadRemoveSendToDeviceSentColumn(m *sqlutil.Migrations) { - m.AddMigration(UpRemoveSendToDeviceSentColumn, DownRemoveSendToDeviceSentColumn) -} - -func UpRemoveSendToDeviceSentColumn(tx *sql.Tx) error { - _, err := tx.Exec(` +func UpRemoveSendToDeviceSentColumn(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` CREATE TEMPORARY TABLE syncapi_send_to_device_backup(id, user_id, device_id, content); INSERT INTO syncapi_send_to_device_backup SELECT id, user_id, device_id, content FROM syncapi_send_to_device; DROP TABLE syncapi_send_to_device; @@ -45,8 +40,8 @@ func UpRemoveSendToDeviceSentColumn(tx *sql.Tx) error { return nil } -func DownRemoveSendToDeviceSentColumn(tx *sql.Tx) error { - _, err := tx.Exec(` +func DownRemoveSendToDeviceSentColumn(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` CREATE TEMPORARY TABLE syncapi_send_to_device_backup(id, user_id, device_id, content); INSERT INTO syncapi_send_to_device_backup SELECT id, user_id, device_id, content FROM syncapi_send_to_device; DROP TABLE syncapi_send_to_device; diff --git a/syncapi/storage/sqlite3/deltas/2022061412000000_history_visibility_column.go b/syncapi/storage/sqlite3/deltas/2022061412000000_history_visibility_column.go index f6bcaddfb..079177217 100644 --- a/syncapi/storage/sqlite3/deltas/2022061412000000_history_visibility_column.go +++ b/syncapi/storage/sqlite3/deltas/2022061412000000_history_visibility_column.go @@ -15,36 +15,36 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" ) -func LoadAddHistoryVisibilityColumn(m *sqlutil.Migrations) { - m.AddMigration(UpAddHistoryVisibilityColumn, DownAddHistoryVisibilityColumn) -} - -func UpAddHistoryVisibilityColumn(tx *sql.Tx) error { +func UpAddHistoryVisibilityColumnOutputRoomEvents(ctx context.Context, tx *sql.Tx) error { // SQLite doesn't have "if exists", so check if the column exists. If the query doesn't return an error, it already exists. // Required for unit tests, as otherwise a duplicate column error will show up. - _, err := tx.Query("SELECT history_visibility FROM syncapi_output_room_events LIMIT 1") + _, err := tx.QueryContext(ctx, "SELECT history_visibility FROM syncapi_output_room_events LIMIT 1") if err == nil { return nil } - _, err = tx.Exec(` + _, err = tx.ExecContext(ctx, ` ALTER TABLE syncapi_output_room_events ADD COLUMN history_visibility SMALLINT NOT NULL DEFAULT 2; UPDATE syncapi_output_room_events SET history_visibility = 4 WHERE type IN ('m.room.message', 'm.room.encrypted'); `) if err != nil { return fmt.Errorf("failed to execute upgrade: %w", err) } + return nil +} - _, err = tx.Query("SELECT history_visibility FROM syncapi_current_room_state LIMIT 1") +func UpAddHistoryVisibilityColumnCurrentRoomState(ctx context.Context, tx *sql.Tx) error { + // SQLite doesn't have "if exists", so check if the column exists. If the query doesn't return an error, it already exists. + // Required for unit tests, as otherwise a duplicate column error will show up. + _, err := tx.QueryContext(ctx, "SELECT history_visibility FROM syncapi_current_room_state LIMIT 1") if err == nil { return nil } - _, err = tx.Exec(` + _, err = tx.ExecContext(ctx, ` ALTER TABLE syncapi_current_room_state ADD COLUMN history_visibility SMALLINT NOT NULL DEFAULT 2; UPDATE syncapi_current_room_state SET history_visibility = 4 WHERE type IN ('m.room.message', 'm.room.encrypted'); `) @@ -54,25 +54,25 @@ func UpAddHistoryVisibilityColumn(tx *sql.Tx) error { return nil } -func DownAddHistoryVisibilityColumn(tx *sql.Tx) error { +func DownAddHistoryVisibilityColumn(ctx context.Context, tx *sql.Tx) error { // SQLite doesn't have "if exists", so check if the column exists. - _, err := tx.Query("SELECT history_visibility FROM syncapi_output_room_events LIMIT 1") + _, err := tx.QueryContext(ctx, "SELECT history_visibility FROM syncapi_output_room_events LIMIT 1") if err != nil { // The column probably doesn't exist return nil } - _, err = tx.Exec(` + _, err = tx.ExecContext(ctx, ` ALTER TABLE syncapi_output_room_events DROP COLUMN history_visibility; `) if err != nil { return fmt.Errorf("failed to execute downgrade: %w", err) } - _, err = tx.Query("SELECT history_visibility FROM syncapi_current_room_state LIMIT 1") + _, err = tx.QueryContext(ctx, "SELECT history_visibility FROM syncapi_current_room_state LIMIT 1") if err != nil { // The column probably doesn't exist return nil } - _, err = tx.Exec(` + _, err = tx.ExecContext(ctx, ` ALTER TABLE syncapi_current_room_state DROP COLUMN history_visibility; `) if err != nil { diff --git a/syncapi/storage/sqlite3/output_room_events_table.go b/syncapi/storage/sqlite3/output_room_events_table.go index b3dcb44cb..de389fa9b 100644 --- a/syncapi/storage/sqlite3/output_room_events_table.go +++ b/syncapi/storage/sqlite3/output_room_events_table.go @@ -25,6 +25,7 @@ import ( "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/roomserver/api" + "github.com/matrix-org/dendrite/syncapi/storage/sqlite3/deltas" "github.com/matrix-org/dendrite/syncapi/storage/tables" "github.com/matrix-org/dendrite/syncapi/types" @@ -136,6 +137,17 @@ func NewSqliteEventsTable(db *sql.DB, streamID *StreamIDStatements) (tables.Even if err != nil { return nil, err } + + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "syncapi: add history visibility column (output_room_events)", + Up: deltas.UpAddHistoryVisibilityColumnOutputRoomEvents, + }) + err = m.Up(context.Background()) + if err != nil { + return nil, err + } + return s, sqlutil.StatementList{ {&s.insertEventStmt, insertEventSQL}, {&s.selectMaxEventIDStmt, selectMaxEventIDSQL}, diff --git a/syncapi/storage/sqlite3/receipt_table.go b/syncapi/storage/sqlite3/receipt_table.go index bd778bf3c..31adb005b 100644 --- a/syncapi/storage/sqlite3/receipt_table.go +++ b/syncapi/storage/sqlite3/receipt_table.go @@ -22,6 +22,7 @@ import ( "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/matrix-org/dendrite/syncapi/storage/sqlite3/deltas" "github.com/matrix-org/dendrite/syncapi/storage/tables" "github.com/matrix-org/dendrite/syncapi/types" "github.com/matrix-org/gomatrixserverlib" @@ -70,6 +71,15 @@ func NewSqliteReceiptsTable(db *sql.DB, streamID *StreamIDStatements) (tables.Re if err != nil { return nil, err } + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "syncapi: fix sequences", + Up: deltas.UpFixSequences, + }) + err = m.Up(context.Background()) + if err != nil { + return nil, err + } r := &receiptStatements{ db: db, streamIDStatements: streamID, diff --git a/syncapi/storage/sqlite3/send_to_device_table.go b/syncapi/storage/sqlite3/send_to_device_table.go index 5285acbe6..d05d3fe72 100644 --- a/syncapi/storage/sqlite3/send_to_device_table.go +++ b/syncapi/storage/sqlite3/send_to_device_table.go @@ -21,6 +21,7 @@ import ( "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/matrix-org/dendrite/syncapi/storage/sqlite3/deltas" "github.com/matrix-org/dendrite/syncapi/storage/tables" "github.com/matrix-org/dendrite/syncapi/types" "github.com/sirupsen/logrus" @@ -76,6 +77,15 @@ func NewSqliteSendToDeviceTable(db *sql.DB) (tables.SendToDevice, error) { if err != nil { return nil, err } + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "syncapi: drop sent_by_token", + Up: deltas.UpRemoveSendToDeviceSentColumn, + }) + err = m.Up(context.Background()) + if err != nil { + return nil, err + } if s.insertSendToDeviceMessageStmt, err = db.Prepare(insertSendToDeviceMessageSQL); err != nil { return nil, err } diff --git a/syncapi/storage/sqlite3/syncserver.go b/syncapi/storage/sqlite3/syncserver.go index 39ceec81d..65b2bb38a 100644 --- a/syncapi/storage/sqlite3/syncserver.go +++ b/syncapi/storage/sqlite3/syncserver.go @@ -22,7 +22,6 @@ import ( "github.com/matrix-org/dendrite/setup/base" "github.com/matrix-org/dendrite/setup/config" "github.com/matrix-org/dendrite/syncapi/storage/shared" - "github.com/matrix-org/dendrite/syncapi/storage/sqlite3/deltas" ) // SyncServerDatasource represents a sync server datasource which manages @@ -42,26 +41,28 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions) if d.db, d.writer, err = base.DatabaseConnection(dbProperties, sqlutil.NewExclusiveWriter()); err != nil { return nil, err } - if err = d.prepare(dbProperties); err != nil { + if err = d.prepare(); err != nil { return nil, err } return &d, nil } -func (d *SyncServerDatasource) prepare(dbProperties *config.DatabaseOptions) (err error) { +func (d *SyncServerDatasource) prepare() (err error) { if err = d.streamID.Prepare(d.db); err != nil { return err } - if _, err = d.db.Exec(outputRoomEventsSchema); err != nil { - return err - } - if _, err = d.db.Exec(currentRoomStateSchema); err != nil { - return err - } accountData, err := NewSqliteAccountDataTable(d.db, &d.streamID) if err != nil { return err } + events, err := NewSqliteEventsTable(d.db, &d.streamID) + if err != nil { + return err + } + roomState, err := NewSqliteCurrentRoomStateTable(d.db, &d.streamID) + if err != nil { + return err + } invites, err := NewSqliteInvitesTable(d.db, &d.streamID) if err != nil { return err @@ -106,22 +107,6 @@ func (d *SyncServerDatasource) prepare(dbProperties *config.DatabaseOptions) (er if err != nil { return err } - m := sqlutil.NewMigrations() - deltas.LoadFixSequences(m) - deltas.LoadRemoveSendToDeviceSentColumn(m) - deltas.LoadAddHistoryVisibilityColumn(m) - if err = m.RunDeltas(d.db, dbProperties); err != nil { - return err - } - // prepare statements after the migrations have run - events, err := NewSqliteEventsTable(d.db, &d.streamID) - if err != nil { - return err - } - roomState, err := NewSqliteCurrentRoomStateTable(d.db, &d.streamID) - if err != nil { - return err - } d.Database = shared.Database{ DB: d.db, Writer: d.writer, diff --git a/userapi/storage/postgres/accounts_table.go b/userapi/storage/postgres/accounts_table.go index e3cab56ee..33fb6dd42 100644 --- a/userapi/storage/postgres/accounts_table.go +++ b/userapi/storage/postgres/accounts_table.go @@ -24,6 +24,7 @@ import ( "github.com/matrix-org/dendrite/clientapi/userutil" "github.com/matrix-org/dendrite/internal/sqlutil" "github.com/matrix-org/dendrite/userapi/api" + "github.com/matrix-org/dendrite/userapi/storage/postgres/deltas" "github.com/matrix-org/dendrite/userapi/storage/tables" log "github.com/sirupsen/logrus" @@ -85,6 +86,23 @@ func NewPostgresAccountsTable(db *sql.DB, serverName gomatrixserverlib.ServerNam if err != nil { return nil, err } + m := sqlutil.NewMigrator(db) + m.AddMigrations([]sqlutil.Migration{ + { + Version: "userapi: add is active", + Up: deltas.UpIsActive, + Down: deltas.DownIsActive, + }, + { + Version: "userapi: add account type", + Up: deltas.UpAddAccountType, + Down: deltas.DownAddAccountType, + }, + }...) + err = m.Up(context.Background()) + if err != nil { + return nil, err + } return s, sqlutil.StatementList{ {&s.insertAccountStmt, insertAccountSQL}, {&s.updatePasswordStmt, updatePasswordSQL}, diff --git a/userapi/storage/postgres/deltas/20200929203058_is_active.go b/userapi/storage/postgres/deltas/20200929203058_is_active.go index 32d3235be..24f87e073 100644 --- a/userapi/storage/postgres/deltas/20200929203058_is_active.go +++ b/userapi/storage/postgres/deltas/20200929203058_is_active.go @@ -1,33 +1,21 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/pressly/goose" - - "github.com/matrix-org/dendrite/internal/sqlutil" ) -func LoadFromGoose() { - goose.AddMigration(UpIsActive, DownIsActive) - goose.AddMigration(UpAddAccountType, DownAddAccountType) -} - -func LoadIsActive(m *sqlutil.Migrations) { - m.AddMigration(UpIsActive, DownIsActive) -} - -func UpIsActive(tx *sql.Tx) error { - _, err := tx.Exec("ALTER TABLE account_accounts ADD COLUMN IF NOT EXISTS is_deactivated BOOLEAN DEFAULT FALSE;") +func UpIsActive(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE account_accounts ADD COLUMN IF NOT EXISTS is_deactivated BOOLEAN DEFAULT FALSE;") if err != nil { return fmt.Errorf("failed to execute upgrade: %w", err) } return nil } -func DownIsActive(tx *sql.Tx) error { - _, err := tx.Exec("ALTER TABLE account_accounts DROP COLUMN is_deactivated;") +func DownIsActive(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE account_accounts DROP COLUMN is_deactivated;") if err != nil { return fmt.Errorf("failed to execute downgrade: %w", err) } diff --git a/userapi/storage/postgres/deltas/20201001204705_last_seen_ts_ip.go b/userapi/storage/postgres/deltas/20201001204705_last_seen_ts_ip.go index 1bbb0a9d3..edd3353f0 100644 --- a/userapi/storage/postgres/deltas/20201001204705_last_seen_ts_ip.go +++ b/userapi/storage/postgres/deltas/20201001204705_last_seen_ts_ip.go @@ -1,18 +1,13 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" ) -func LoadLastSeenTSIP(m *sqlutil.Migrations) { - m.AddMigration(UpLastSeenTSIP, DownLastSeenTSIP) -} - -func UpLastSeenTSIP(tx *sql.Tx) error { - _, err := tx.Exec(` +func UpLastSeenTSIP(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE device_devices ADD COLUMN IF NOT EXISTS last_seen_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM CURRENT_TIMESTAMP)*1000; ALTER TABLE device_devices ADD COLUMN IF NOT EXISTS ip TEXT; ALTER TABLE device_devices ADD COLUMN IF NOT EXISTS user_agent TEXT;`) @@ -22,8 +17,8 @@ ALTER TABLE device_devices ADD COLUMN IF NOT EXISTS user_agent TEXT;`) return nil } -func DownLastSeenTSIP(tx *sql.Tx) error { - _, err := tx.Exec(` +func DownLastSeenTSIP(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE device_devices DROP COLUMN last_seen_ts; ALTER TABLE device_devices DROP COLUMN ip; ALTER TABLE device_devices DROP COLUMN user_agent;`) diff --git a/userapi/storage/postgres/deltas/2022021013023800_add_account_type.go b/userapi/storage/postgres/deltas/2022021013023800_add_account_type.go index 2fae00cb9..eb7c3a958 100644 --- a/userapi/storage/postgres/deltas/2022021013023800_add_account_type.go +++ b/userapi/storage/postgres/deltas/2022021013023800_add_account_type.go @@ -1,20 +1,15 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" ) -func LoadAddAccountType(m *sqlutil.Migrations) { - m.AddMigration(UpAddAccountType, DownAddAccountType) -} - -func UpAddAccountType(tx *sql.Tx) error { +func UpAddAccountType(ctx context.Context, tx *sql.Tx) error { // initially set every account to useraccount, change appservice and guest accounts afterwards // (user = 1, guest = 2, admin = 3, appservice = 4) - _, err := tx.Exec(`ALTER TABLE account_accounts ADD COLUMN IF NOT EXISTS account_type SMALLINT NOT NULL DEFAULT 1; + _, err := tx.ExecContext(ctx, `ALTER TABLE account_accounts ADD COLUMN IF NOT EXISTS account_type SMALLINT NOT NULL DEFAULT 1; UPDATE account_accounts SET account_type = 4 WHERE appservice_id <> ''; UPDATE account_accounts SET account_type = 2 WHERE localpart ~ '^[0-9]+$'; ALTER TABLE account_accounts ALTER COLUMN account_type DROP DEFAULT;`, @@ -25,8 +20,8 @@ ALTER TABLE account_accounts ALTER COLUMN account_type DROP DEFAULT;`, return nil } -func DownAddAccountType(tx *sql.Tx) error { - _, err := tx.Exec("ALTER TABLE account_accounts DROP COLUMN account_type;") +func DownAddAccountType(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, "ALTER TABLE account_accounts DROP COLUMN account_type;") if err != nil { return fmt.Errorf("failed to execute downgrade: %w", err) } diff --git a/userapi/storage/postgres/devices_table.go b/userapi/storage/postgres/devices_table.go index ccb776672..f65681aae 100644 --- a/userapi/storage/postgres/devices_table.go +++ b/userapi/storage/postgres/devices_table.go @@ -24,6 +24,7 @@ import ( "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/internal/sqlutil" "github.com/matrix-org/dendrite/userapi/api" + "github.com/matrix-org/dendrite/userapi/storage/postgres/deltas" "github.com/matrix-org/dendrite/userapi/storage/tables" "github.com/matrix-org/gomatrixserverlib" ) @@ -120,6 +121,15 @@ func NewPostgresDevicesTable(db *sql.DB, serverName gomatrixserverlib.ServerName if err != nil { return nil, err } + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "userapi: add last_seen_ts", + Up: deltas.UpLastSeenTSIP, + }) + err = m.Up(context.Background()) + if err != nil { + return nil, err + } return s, sqlutil.StatementList{ {&s.insertDeviceStmt, insertDeviceSQL}, {&s.selectDeviceByTokenStmt, selectDeviceByTokenSQL}, diff --git a/userapi/storage/postgres/storage.go b/userapi/storage/postgres/storage.go index b9afb5a56..7d3b9b6a5 100644 --- a/userapi/storage/postgres/storage.go +++ b/userapi/storage/postgres/storage.go @@ -23,7 +23,6 @@ import ( "github.com/matrix-org/dendrite/internal/sqlutil" "github.com/matrix-org/dendrite/setup/base" "github.com/matrix-org/dendrite/setup/config" - "github.com/matrix-org/dendrite/userapi/storage/postgres/deltas" "github.com/matrix-org/dendrite/userapi/storage/shared" // Import the postgres database driver. @@ -37,19 +36,6 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions, return nil, err } - m := sqlutil.NewMigrations() - if _, err = db.Exec(accountsSchema); err != nil { - // do this so that the migration can and we don't fail on - // preparing statements for columns that don't exist yet - return nil, err - } - deltas.LoadIsActive(m) - //deltas.LoadLastSeenTSIP(m) - deltas.LoadAddAccountType(m) - if err = m.RunDeltas(db, dbProperties); err != nil { - return nil, err - } - accountDataTable, err := NewPostgresAccountDataTable(db) if err != nil { return nil, fmt.Errorf("NewPostgresAccountDataTable: %w", err) diff --git a/userapi/storage/sqlite3/accounts_table.go b/userapi/storage/sqlite3/accounts_table.go index 6c5fe3071..484e90056 100644 --- a/userapi/storage/sqlite3/accounts_table.go +++ b/userapi/storage/sqlite3/accounts_table.go @@ -24,6 +24,7 @@ import ( "github.com/matrix-org/dendrite/clientapi/userutil" "github.com/matrix-org/dendrite/internal/sqlutil" "github.com/matrix-org/dendrite/userapi/api" + "github.com/matrix-org/dendrite/userapi/storage/sqlite3/deltas" "github.com/matrix-org/dendrite/userapi/storage/tables" log "github.com/sirupsen/logrus" @@ -87,6 +88,23 @@ func NewSQLiteAccountsTable(db *sql.DB, serverName gomatrixserverlib.ServerName) if err != nil { return nil, err } + m := sqlutil.NewMigrator(db) + m.AddMigrations([]sqlutil.Migration{ + { + Version: "userapi: add is active", + Up: deltas.UpIsActive, + Down: deltas.DownIsActive, + }, + { + Version: "userapi: add account type", + Up: deltas.UpAddAccountType, + Down: deltas.DownAddAccountType, + }, + }...) + err = m.Up(context.Background()) + if err != nil { + return nil, err + } return s, sqlutil.StatementList{ {&s.insertAccountStmt, insertAccountSQL}, {&s.updatePasswordStmt, updatePasswordSQL}, diff --git a/userapi/storage/sqlite3/deltas/20200929203058_is_active.go b/userapi/storage/sqlite3/deltas/20200929203058_is_active.go index c69614e83..e25efc695 100644 --- a/userapi/storage/sqlite3/deltas/20200929203058_is_active.go +++ b/userapi/storage/sqlite3/deltas/20200929203058_is_active.go @@ -1,25 +1,13 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/pressly/goose" - - "github.com/matrix-org/dendrite/internal/sqlutil" ) -func LoadFromGoose() { - goose.AddMigration(UpIsActive, DownIsActive) - goose.AddMigration(UpAddAccountType, DownAddAccountType) -} - -func LoadIsActive(m *sqlutil.Migrations) { - m.AddMigration(UpIsActive, DownIsActive) -} - -func UpIsActive(tx *sql.Tx) error { - _, err := tx.Exec(` +func UpIsActive(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE account_accounts RENAME TO account_accounts_tmp; CREATE TABLE account_accounts ( localpart TEXT NOT NULL PRIMARY KEY, @@ -42,8 +30,8 @@ DROP TABLE account_accounts_tmp;`) return nil } -func DownIsActive(tx *sql.Tx) error { - _, err := tx.Exec(` +func DownIsActive(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE account_accounts RENAME TO account_accounts_tmp; CREATE TABLE account_accounts ( localpart TEXT NOT NULL PRIMARY KEY, diff --git a/userapi/storage/sqlite3/deltas/20201001204705_last_seen_ts_ip.go b/userapi/storage/sqlite3/deltas/20201001204705_last_seen_ts_ip.go index ebf908001..7f7e95d2d 100644 --- a/userapi/storage/sqlite3/deltas/20201001204705_last_seen_ts_ip.go +++ b/userapi/storage/sqlite3/deltas/20201001204705_last_seen_ts_ip.go @@ -1,18 +1,13 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/matrix-org/dendrite/internal/sqlutil" ) -func LoadLastSeenTSIP(m *sqlutil.Migrations) { - m.AddMigration(UpLastSeenTSIP, DownLastSeenTSIP) -} - -func UpLastSeenTSIP(tx *sql.Tx) error { - _, err := tx.Exec(` +func UpLastSeenTSIP(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE device_devices RENAME TO device_devices_tmp; CREATE TABLE device_devices ( access_token TEXT PRIMARY KEY, @@ -39,8 +34,8 @@ func UpLastSeenTSIP(tx *sql.Tx) error { return nil } -func DownLastSeenTSIP(tx *sql.Tx) error { - _, err := tx.Exec(` +func DownLastSeenTSIP(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, ` ALTER TABLE device_devices RENAME TO device_devices_tmp; CREATE TABLE IF NOT EXISTS device_devices ( access_token TEXT PRIMARY KEY, diff --git a/userapi/storage/sqlite3/deltas/2022021012490600_add_account_type.go b/userapi/storage/sqlite3/deltas/2022021012490600_add_account_type.go index 9b058dedd..46532698c 100644 --- a/userapi/storage/sqlite3/deltas/2022021012490600_add_account_type.go +++ b/userapi/storage/sqlite3/deltas/2022021012490600_add_account_type.go @@ -1,26 +1,15 @@ package deltas import ( + "context" "database/sql" "fmt" - - "github.com/pressly/goose" - - "github.com/matrix-org/dendrite/internal/sqlutil" ) -func init() { - goose.AddMigration(UpAddAccountType, DownAddAccountType) -} - -func LoadAddAccountType(m *sqlutil.Migrations) { - m.AddMigration(UpAddAccountType, DownAddAccountType) -} - -func UpAddAccountType(tx *sql.Tx) error { +func UpAddAccountType(ctx context.Context, tx *sql.Tx) error { // initially set every account to useraccount, change appservice and guest accounts afterwards // (user = 1, guest = 2, admin = 3, appservice = 4) - _, err := tx.Exec(`ALTER TABLE account_accounts RENAME TO account_accounts_tmp; + _, err := tx.ExecContext(ctx, `ALTER TABLE account_accounts RENAME TO account_accounts_tmp; CREATE TABLE account_accounts ( localpart TEXT NOT NULL PRIMARY KEY, created_ts BIGINT NOT NULL, @@ -45,8 +34,8 @@ DROP TABLE account_accounts_tmp;`) return nil } -func DownAddAccountType(tx *sql.Tx) error { - _, err := tx.Exec(`ALTER TABLE account_accounts DROP COLUMN account_type;`) +func DownAddAccountType(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, `ALTER TABLE account_accounts DROP COLUMN account_type;`) if err != nil { return fmt.Errorf("failed to execute downgrade: %w", err) } diff --git a/userapi/storage/sqlite3/devices_table.go b/userapi/storage/sqlite3/devices_table.go index 93291e6ad..27a7524d6 100644 --- a/userapi/storage/sqlite3/devices_table.go +++ b/userapi/storage/sqlite3/devices_table.go @@ -23,6 +23,7 @@ import ( "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/internal/sqlutil" "github.com/matrix-org/dendrite/userapi/api" + "github.com/matrix-org/dendrite/userapi/storage/sqlite3/deltas" "github.com/matrix-org/dendrite/userapi/storage/tables" "github.com/matrix-org/dendrite/clientapi/userutil" @@ -107,6 +108,15 @@ func NewSQLiteDevicesTable(db *sql.DB, serverName gomatrixserverlib.ServerName) if err != nil { return nil, err } + m := sqlutil.NewMigrator(db) + m.AddMigrations(sqlutil.Migration{ + Version: "userapi: add last_seen_ts", + Up: deltas.UpLastSeenTSIP, + }) + if err = m.Up(context.Background()); err != nil { + return nil, err + } + return s, sqlutil.StatementList{ {&s.insertDeviceStmt, insertDeviceSQL}, {&s.selectDevicesCountStmt, selectDevicesCountSQL}, diff --git a/userapi/storage/sqlite3/storage.go b/userapi/storage/sqlite3/storage.go index a822f687d..78b7ce588 100644 --- a/userapi/storage/sqlite3/storage.go +++ b/userapi/storage/sqlite3/storage.go @@ -25,10 +25,6 @@ import ( "github.com/matrix-org/dendrite/setup/config" "github.com/matrix-org/dendrite/userapi/storage/shared" - "github.com/matrix-org/dendrite/userapi/storage/sqlite3/deltas" - - // Import the postgres database driver. - _ "github.com/lib/pq" ) // NewDatabase creates a new accounts and profiles database @@ -38,19 +34,6 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions, return nil, err } - m := sqlutil.NewMigrations() - if _, err = db.Exec(accountsSchema); err != nil { - // do this so that the migration can and we don't fail on - // preparing statements for columns that don't exist yet - return nil, err - } - deltas.LoadIsActive(m) - //deltas.LoadLastSeenTSIP(m) - deltas.LoadAddAccountType(m) - if err = m.RunDeltas(db, dbProperties); err != nil { - return nil, err - } - accountDataTable, err := NewSQLiteAccountDataTable(db) if err != nil { return nil, fmt.Errorf("NewSQLiteAccountDataTable: %w", err) From b836243a24b7928d9986c97d0ae66be57f71ab42 Mon Sep 17 00:00:00 2001 From: Jonathan Bartlett <34320158+Jonnobrow@users.noreply.github.com> Date: Mon, 25 Jul 2022 10:39:57 +0100 Subject: [PATCH 16/18] Add .well-known/matrix/client to clientapi (#2551) Signed-off-by: Jonathan Bartlett Co-authored-by: Neil Alexander --- clientapi/clientapi.go | 1 + clientapi/routing/routing.go | 22 +++++++++++++++++++++- dendrite-sample.monolith.yaml | 4 ++++ dendrite-sample.polylith.yaml | 4 ++++ setup/config/config_global.go | 3 +++ setup/config/config_test.go | 1 + 6 files changed, 34 insertions(+), 1 deletion(-) diff --git a/clientapi/clientapi.go b/clientapi/clientapi.go index f550c29bb..557355604 100644 --- a/clientapi/clientapi.go +++ b/clientapi/clientapi.go @@ -59,6 +59,7 @@ func AddPublicRoutes( routing.Setup( base.PublicClientAPIMux, + base.PublicWellKnownAPIMux, base.SynapseAdminMux, base.DendriteAdminMux, cfg, rsAPI, asAPI, diff --git a/clientapi/routing/routing.go b/clientapi/routing/routing.go index 0460850ef..ced4fdbcf 100644 --- a/clientapi/routing/routing.go +++ b/clientapi/routing/routing.go @@ -48,7 +48,7 @@ import ( // applied: // nolint: gocyclo func Setup( - publicAPIMux, synapseAdminRouter, dendriteAdminRouter *mux.Router, + publicAPIMux, wkMux, synapseAdminRouter, dendriteAdminRouter *mux.Router, cfg *config.ClientAPI, rsAPI roomserverAPI.ClientRoomserverAPI, asAPI appserviceAPI.AppServiceInternalAPI, @@ -74,6 +74,26 @@ func Setup( unstableFeatures["org.matrix."+msc] = true } + if cfg.Matrix.WellKnownClientName != "" { + logrus.Infof("Setting m.homeserver base_url as %s at /.well-known/matrix/client", cfg.Matrix.WellKnownClientName) + wkMux.Handle("/client", httputil.MakeExternalAPI("wellknown", func(r *http.Request) util.JSONResponse { + return util.JSONResponse{ + Code: http.StatusOK, + JSON: struct { + HomeserverName struct { + BaseUrl string `json:"base_url"` + } `json:"m.homeserver"` + }{ + HomeserverName: struct { + BaseUrl string `json:"base_url"` + }{ + BaseUrl: cfg.Matrix.WellKnownClientName, + }, + }, + } + })).Methods(http.MethodGet, http.MethodOptions) + } + publicAPIMux.Handle("/versions", httputil.MakeExternalAPI("versions", func(req *http.Request) util.JSONResponse { return util.JSONResponse{ diff --git a/dendrite-sample.monolith.yaml b/dendrite-sample.monolith.yaml index c6050e407..cc6c173e8 100644 --- a/dendrite-sample.monolith.yaml +++ b/dendrite-sample.monolith.yaml @@ -64,6 +64,10 @@ global: # e.g. localhost:443 well_known_server_name: "" + # The server name to delegate client-server communications to, with optional port + # e.g. localhost:443 + well_known_client_name: "" + # Lists of domains that the server will trust as identity servers to verify third # party identifiers such as phone numbers and email addresses. trusted_third_party_id_servers: diff --git a/dendrite-sample.polylith.yaml b/dendrite-sample.polylith.yaml index 723547f55..92cab19b1 100644 --- a/dendrite-sample.polylith.yaml +++ b/dendrite-sample.polylith.yaml @@ -54,6 +54,10 @@ global: # e.g. localhost:443 well_known_server_name: "" + # The server name to delegate client-server communications to, with optional port + # e.g. localhost:443 + well_known_client_name: "" + # Lists of domains that the server will trust as identity servers to verify third # party identifiers such as phone numbers and email addresses. trusted_third_party_id_servers: diff --git a/setup/config/config_global.go b/setup/config/config_global.go index 10827bb21..d4e54e203 100644 --- a/setup/config/config_global.go +++ b/setup/config/config_global.go @@ -46,6 +46,9 @@ type Global struct { // The server name to delegate server-server communications to, with optional port WellKnownServerName string `yaml:"well_known_server_name"` + // The server name to delegate client-server communications to, with optional port + WellKnownClientName string `yaml:"well_known_client_name"` + // Disables federation. Dendrite will not be able to make any outbound HTTP requests // to other servers and the federation API will not be exposed. DisableFederation bool `yaml:"disable_federation"` diff --git a/setup/config/config_test.go b/setup/config/config_test.go index b9b1e7bb5..ee7e7389c 100644 --- a/setup/config/config_test.go +++ b/setup/config/config_test.go @@ -42,6 +42,7 @@ global: key_id: ed25519:auto key_validity_period: 168h0m0s well_known_server_name: "localhost:443" + well_known_client_name: "localhost:443" trusted_third_party_id_servers: - matrix.org - vector.im From ccea23cd4083aba534b96aead02c6cd10f95f833 Mon Sep 17 00:00:00 2001 From: Neil Alexander Date: Mon, 25 Jul 2022 11:42:22 +0100 Subject: [PATCH 17/18] Remove `room_id` field from MSC2946 stripped events (closes #2588) --- go.mod | 2 +- go.sum | 4 ++-- setup/mscs/msc2946/msc2946.go | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 79216f0fa..377a156fe 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91 github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 - github.com/matrix-org/gomatrixserverlib v0.0.0-20220718085240-f08f98af7d2d + github.com/matrix-org/gomatrixserverlib v0.0.0-20220725104114-b6003e522771 github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 github.com/mattn/go-sqlite3 v1.14.13 diff --git a/go.sum b/go.sum index 7107d4ff6..10b1638a5 100644 --- a/go.sum +++ b/go.sum @@ -341,8 +341,8 @@ github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91/go.mod h1 github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26/go.mod h1:3fxX6gUjWyI/2Bt7J1OLhpCzOfO/bB3AiX0cJtEKud0= github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 h1:ZtO5uywdd5dLDCud4r0r55eP4j9FuUNpl60Gmntcop4= github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s= -github.com/matrix-org/gomatrixserverlib v0.0.0-20220718085240-f08f98af7d2d h1:BWInUURXVOW+OiifMapoRIS7i122KWdEKj6fnDFXgBo= -github.com/matrix-org/gomatrixserverlib v0.0.0-20220718085240-f08f98af7d2d/go.mod h1:jX38yp3SSLJNftBg3PXU1ayd0PCLIiDHQ4xAc9DIixk= +github.com/matrix-org/gomatrixserverlib v0.0.0-20220725104114-b6003e522771 h1:ZIPHFIPNDS9dmEbPEiJbNmyCGJtn9exfpLC7JOcn/bE= +github.com/matrix-org/gomatrixserverlib v0.0.0-20220725104114-b6003e522771/go.mod h1:jX38yp3SSLJNftBg3PXU1ayd0PCLIiDHQ4xAc9DIixk= github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a h1:DdG8vXMlZ65EAtc4V+3t7zHZ2Gqs24pSnyXS+4BRHUs= github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a/go.mod h1:ulJzsVOTssIVp1j/m5eI//4VpAGDkMt5NrRuAVX7wpc= github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U= diff --git a/setup/mscs/msc2946/msc2946.go b/setup/mscs/msc2946/msc2946.go index 61520d50e..4cffa82ad 100644 --- a/setup/mscs/msc2946/msc2946.go +++ b/setup/mscs/msc2946/msc2946.go @@ -708,7 +708,6 @@ func stripped(ev *gomatrixserverlib.Event) *gomatrixserverlib.MSC2946StrippedEve StateKey: *ev.StateKey(), Content: ev.Content(), Sender: ev.Sender(), - RoomID: ev.RoomID(), OriginServerTS: ev.OriginServerTS(), } } From 497ab4e1b7cb4225f57b87932a3dccfbbec74463 Mon Sep 17 00:00:00 2001 From: Neil Alexander Date: Mon, 25 Jul 2022 14:46:35 +0100 Subject: [PATCH 18/18] Remove `goose` from Dockerfiles --- build/docker/Dockerfile.monolith | 1 - build/docker/Dockerfile.polylith | 1 - 2 files changed, 2 deletions(-) diff --git a/build/docker/Dockerfile.monolith b/build/docker/Dockerfile.monolith index 891a3a9e0..bb02934cd 100644 --- a/build/docker/Dockerfile.monolith +++ b/build/docker/Dockerfile.monolith @@ -8,7 +8,6 @@ COPY . /build RUN mkdir -p bin RUN go build -trimpath -o bin/ ./cmd/dendrite-monolith-server -RUN go build -trimpath -o bin/ ./cmd/goose RUN go build -trimpath -o bin/ ./cmd/create-account RUN go build -trimpath -o bin/ ./cmd/generate-keys diff --git a/build/docker/Dockerfile.polylith b/build/docker/Dockerfile.polylith index ffdc35586..166ea99cb 100644 --- a/build/docker/Dockerfile.polylith +++ b/build/docker/Dockerfile.polylith @@ -8,7 +8,6 @@ COPY . /build RUN mkdir -p bin RUN go build -trimpath -o bin/ ./cmd/dendrite-polylith-multi -RUN go build -trimpath -o bin/ ./cmd/goose RUN go build -trimpath -o bin/ ./cmd/create-account RUN go build -trimpath -o bin/ ./cmd/generate-keys