mirror of
https://github.com/matrix-org/dendrite.git
synced 2026-01-16 18:43:10 -06:00
Merge branch 'main' of github.com:matrix-org/dendrite into s7evink/usagestats
This commit is contained in:
commit
6948542cf1
|
|
@ -101,6 +101,11 @@ func (s *OutputRoomEventConsumer) onMessage(
|
|||
log.WithField("appservice", state.ID).Tracef("Appservice worker received %d message(s) from roomserver", len(msgs))
|
||||
events := make([]*gomatrixserverlib.HeaderedEvent, 0, len(msgs))
|
||||
for _, msg := range msgs {
|
||||
// Only handle events we care about
|
||||
receivedType := api.OutputType(msg.Header.Get(jetstream.RoomEventType))
|
||||
if receivedType != api.OutputTypeNewRoomEvent && receivedType != api.OutputTypeNewInviteEvent {
|
||||
continue
|
||||
}
|
||||
// Parse out the event JSON
|
||||
var output api.OutputEvent
|
||||
if err := json.Unmarshal(msg.Data, &output); err != nil {
|
||||
|
|
|
|||
|
|
@ -11,6 +11,41 @@ permalink: /installation/start/optimisation
|
|||
Now that you have Dendrite running, the following tweaks will improve the reliability
|
||||
and performance of your installation.
|
||||
|
||||
## PostgreSQL connection limit
|
||||
|
||||
A PostgreSQL database engine is configured to allow only a certain number of connections.
|
||||
This is typically controlled by the `max_connections` and `superuser_reserved_connections`
|
||||
configuration items in `postgresql.conf`. Once these limits are violated, **PostgreSQL will
|
||||
immediately stop accepting new connections** until some of the existing connections are closed.
|
||||
This is a common source of misconfiguration and requires particular care.
|
||||
|
||||
If your PostgreSQL `max_connections` is set to `100` and `superuser_reserved_connections` is
|
||||
set to `3` then you have an effective connection limit of 97 database connections. It is
|
||||
therefore important to ensure that Dendrite doesn't violate that limit, otherwise database
|
||||
queries will unexpectedly fail and this will cause problems both within Dendrite and for users.
|
||||
|
||||
If you are also running other software that uses the same PostgreSQL database engine, then you
|
||||
must also take into account that some connections will be already used by your other software
|
||||
and therefore will not be available to Dendrite. Check the configuration of any other software
|
||||
using the same database engine for their configured connection limits and adjust your calculations
|
||||
accordingly.
|
||||
|
||||
Dendrite has a `max_open_conns` configuration item in each `database` block to control how many
|
||||
connections it will open to the database.
|
||||
|
||||
**If you are using the `global` database pool** then you only need to configure the
|
||||
`max_open_conns` setting once in the `global` section.
|
||||
|
||||
**If you are defining a `database` config per component** then you will need to ensure that
|
||||
the **sum total** of all configured `max_open_conns` to a given database server do not exceed
|
||||
the connection limit. If you configure a total that adds up to more connections than are available
|
||||
then this will cause database queries to fail.
|
||||
|
||||
You may wish to raise the `max_connections` limit on your PostgreSQL server to accommodate
|
||||
additional connections, in which case you should also update the `max_open_conns` in your
|
||||
Dendrite configuration accordingly. However be aware that this is only advisable on particularly
|
||||
powerful servers that can handle the concurrent load of additional queries running at one time.
|
||||
|
||||
## File descriptor limit
|
||||
|
||||
Most platforms have a limit on how many file descriptors a single process can open. All
|
||||
|
|
|
|||
|
|
@ -79,6 +79,13 @@ func (s *OutputRoomEventConsumer) Start() error {
|
|||
// realises that it cannot update the room state using the deltas.
|
||||
func (s *OutputRoomEventConsumer) onMessage(ctx context.Context, msgs []*nats.Msg) bool {
|
||||
msg := msgs[0] // Guaranteed to exist if onMessage is called
|
||||
receivedType := api.OutputType(msg.Header.Get(jetstream.RoomEventType))
|
||||
|
||||
// Only handle events we care about
|
||||
if receivedType != api.OutputTypeNewRoomEvent && receivedType != api.OutputTypeNewInboundPeek {
|
||||
return true
|
||||
}
|
||||
|
||||
// Parse out the event JSON
|
||||
var output api.OutputEvent
|
||||
if err := json.Unmarshal(msg.Data, &output); err != nil {
|
||||
|
|
|
|||
|
|
@ -21,6 +21,29 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/blevesearch/bleve/v2"
|
||||
|
||||
// side effect imports to allow all possible languages
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/ar"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/cjk"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/ckb"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/da"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/de"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/en"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/es"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/fa"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/fi"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/fr"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/hi"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/hr"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/hu"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/it"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/nl"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/no"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/pt"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/ro"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/ru"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/sv"
|
||||
_ "github.com/blevesearch/bleve/v2/analysis/lang/tr"
|
||||
"github.com/blevesearch/bleve/v2/mapping"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package sqlutil
|
|||
|
||||
import (
|
||||
"database/sql"
|
||||
"flag"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
|
|
@ -9,6 +10,8 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var skipSanityChecks = flag.Bool("skip-db-sanity", false, "Ignore sanity checks on the database connections (NOT RECOMMENDED!)")
|
||||
|
||||
// Open opens a database specified by its database driver name and a driver-specific data source name,
|
||||
// usually consisting of at least a database name and connection information. Includes tracing driver
|
||||
// if DENDRITE_TRACE_SQL=1
|
||||
|
|
@ -37,15 +40,39 @@ func Open(dbProperties *config.DatabaseOptions, writer Writer) (*sql.DB, error)
|
|||
return nil, err
|
||||
}
|
||||
if driverName != "sqlite3" {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"MaxOpenConns": dbProperties.MaxOpenConns(),
|
||||
"MaxIdleConns": dbProperties.MaxIdleConns(),
|
||||
"ConnMaxLifetime": dbProperties.ConnMaxLifetime(),
|
||||
"dataSourceName": regexp.MustCompile(`://[^@]*@`).ReplaceAllLiteralString(dsn, "://"),
|
||||
}).Debug("Setting DB connection limits")
|
||||
logger := logrus.WithFields(logrus.Fields{
|
||||
"max_open_conns": dbProperties.MaxOpenConns(),
|
||||
"max_idle_conns": dbProperties.MaxIdleConns(),
|
||||
"conn_max_lifetime": dbProperties.ConnMaxLifetime(),
|
||||
"data_source_name": regexp.MustCompile(`://[^@]*@`).ReplaceAllLiteralString(dsn, "://"),
|
||||
})
|
||||
logger.Debug("Setting DB connection limits")
|
||||
db.SetMaxOpenConns(dbProperties.MaxOpenConns())
|
||||
db.SetMaxIdleConns(dbProperties.MaxIdleConns())
|
||||
db.SetConnMaxLifetime(dbProperties.ConnMaxLifetime())
|
||||
|
||||
if !*skipSanityChecks {
|
||||
if dbProperties.MaxOpenConns() == 0 {
|
||||
logrus.Warnf("WARNING: Configuring 'max_open_conns' to be unlimited is not recommended. This can result in bad performance or deadlocks.")
|
||||
}
|
||||
|
||||
switch driverName {
|
||||
case "postgres":
|
||||
// Perform a quick sanity check if possible that we aren't trying to use more database
|
||||
// connections than PostgreSQL is willing to give us.
|
||||
var max, reserved int
|
||||
if err := db.QueryRow("SELECT setting::integer FROM pg_settings WHERE name='max_connections';").Scan(&max); err != nil {
|
||||
return nil, fmt.Errorf("failed to find maximum connections: %w", err)
|
||||
}
|
||||
if err := db.QueryRow("SELECT setting::integer FROM pg_settings WHERE name='superuser_reserved_connections';").Scan(&reserved); err != nil {
|
||||
return nil, fmt.Errorf("failed to find reserved connections: %w", err)
|
||||
}
|
||||
if configured, allowed := dbProperties.MaxOpenConns(), max-reserved; configured > allowed {
|
||||
logrus.Errorf("ERROR: The configured 'max_open_conns' is greater than the %d non-superuser connections that PostgreSQL is configured to allow. This can result in bad performance or deadlocks. Please pay close attention to your configured database connection counts. If you REALLY know what you are doing and want to override this error, pass the --skip-db-sanity option to Dendrite.", allowed)
|
||||
return nil, fmt.Errorf("database sanity checks failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -424,7 +424,7 @@ func (u *DeviceListUpdater) processServer(serverName gomatrixserverlib.ServerNam
|
|||
"succeeded": successCount,
|
||||
"failed": len(userIDs) - successCount,
|
||||
"wait_time": waitTime,
|
||||
}).Warn("Failed to query device keys for some users")
|
||||
}).Debug("Failed to query device keys for some users")
|
||||
}
|
||||
return waitTime, !allUsersSucceeded
|
||||
}
|
||||
|
|
|
|||
|
|
@ -278,6 +278,7 @@ type QuerySharedUsersRequest struct {
|
|||
OtherUserIDs []string
|
||||
ExcludeRoomIDs []string
|
||||
IncludeRoomIDs []string
|
||||
LocalOnly bool
|
||||
}
|
||||
|
||||
type QuerySharedUsersResponse struct {
|
||||
|
|
|
|||
|
|
@ -799,7 +799,7 @@ func (r *Queryer) QuerySharedUsers(ctx context.Context, req *api.QuerySharedUser
|
|||
}
|
||||
roomIDs = roomIDs[:j]
|
||||
|
||||
users, err := r.DB.JoinedUsersSetInRooms(ctx, roomIDs, req.OtherUserIDs)
|
||||
users, err := r.DB.JoinedUsersSetInRooms(ctx, roomIDs, req.OtherUserIDs, req.LocalOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,12 +17,13 @@ package producers
|
|||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/matrix-org/dendrite/roomserver/acls"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/nats-io/nats.go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tidwall/gjson"
|
||||
|
||||
"github.com/matrix-org/dendrite/roomserver/acls"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
)
|
||||
|
||||
var keyContentFields = map[string]string{
|
||||
|
|
@ -40,10 +41,8 @@ type RoomEventProducer struct {
|
|||
func (r *RoomEventProducer) ProduceRoomEvents(roomID string, updates []api.OutputEvent) error {
|
||||
var err error
|
||||
for _, update := range updates {
|
||||
msg := &nats.Msg{
|
||||
Subject: r.Topic,
|
||||
Header: nats.Header{},
|
||||
}
|
||||
msg := nats.NewMsg(r.Topic)
|
||||
msg.Header.Set(jetstream.RoomEventType, string(update.Type))
|
||||
msg.Header.Set(jetstream.RoomID, roomID)
|
||||
msg.Data, err = json.Marshal(update)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ type Database interface {
|
|||
// If a tuple has the StateKey of '*' and allowWildcards=true then all state events with the EventType should be returned.
|
||||
GetBulkStateContent(ctx context.Context, roomIDs []string, tuples []gomatrixserverlib.StateKeyTuple, allowWildcards bool) ([]tables.StrippedEvent, error)
|
||||
// JoinedUsersSetInRooms returns how many times each of the given users appears across the given rooms.
|
||||
JoinedUsersSetInRooms(ctx context.Context, roomIDs, userIDs []string) (map[string]int, error)
|
||||
JoinedUsersSetInRooms(ctx context.Context, roomIDs, userIDs []string, localOnly bool) (map[string]int, error)
|
||||
// GetLocalServerInRoom returns true if we think we're in a given room or false otherwise.
|
||||
GetLocalServerInRoom(ctx context.Context, roomNID types.RoomNID) (bool, error)
|
||||
// GetServerInRoom returns true if we think a server is in a given room or false otherwise.
|
||||
|
|
|
|||
|
|
@ -68,14 +68,18 @@ CREATE TABLE IF NOT EXISTS roomserver_membership (
|
|||
|
||||
var selectJoinedUsersSetForRoomsAndUserSQL = "" +
|
||||
"SELECT target_nid, COUNT(room_nid) FROM roomserver_membership" +
|
||||
" WHERE room_nid = ANY($1) AND target_nid = ANY($2) AND" +
|
||||
" membership_nid = " + fmt.Sprintf("%d", tables.MembershipStateJoin) + " and forgotten = false" +
|
||||
" WHERE (target_local OR $1 = false)" +
|
||||
" AND room_nid = ANY($2) AND target_nid = ANY($3)" +
|
||||
" AND membership_nid = " + fmt.Sprintf("%d", tables.MembershipStateJoin) +
|
||||
" AND forgotten = false" +
|
||||
" GROUP BY target_nid"
|
||||
|
||||
var selectJoinedUsersSetForRoomsSQL = "" +
|
||||
"SELECT target_nid, COUNT(room_nid) FROM roomserver_membership" +
|
||||
" WHERE room_nid = ANY($1) AND" +
|
||||
" membership_nid = " + fmt.Sprintf("%d", tables.MembershipStateJoin) + " and forgotten = false" +
|
||||
" WHERE (target_local OR $1 = false) " +
|
||||
" AND room_nid = ANY($2)" +
|
||||
" AND membership_nid = " + fmt.Sprintf("%d", tables.MembershipStateJoin) +
|
||||
" AND forgotten = false" +
|
||||
" GROUP BY target_nid"
|
||||
|
||||
// Insert a row in to membership table so that it can be locked by the
|
||||
|
|
@ -334,6 +338,7 @@ func (s *membershipStatements) SelectJoinedUsersSetForRooms(
|
|||
ctx context.Context, txn *sql.Tx,
|
||||
roomNIDs []types.RoomNID,
|
||||
userNIDs []types.EventStateKeyNID,
|
||||
localOnly bool,
|
||||
) (map[types.EventStateKeyNID]int, error) {
|
||||
var (
|
||||
rows *sql.Rows
|
||||
|
|
@ -342,9 +347,9 @@ func (s *membershipStatements) SelectJoinedUsersSetForRooms(
|
|||
stmt := sqlutil.TxStmt(txn, s.selectJoinedUsersSetForRoomsStmt)
|
||||
if len(userNIDs) > 0 {
|
||||
stmt = sqlutil.TxStmt(txn, s.selectJoinedUsersSetForRoomsAndUserStmt)
|
||||
rows, err = stmt.QueryContext(ctx, pq.Array(roomNIDs), pq.Array(userNIDs))
|
||||
rows, err = stmt.QueryContext(ctx, localOnly, pq.Array(roomNIDs), pq.Array(userNIDs))
|
||||
} else {
|
||||
rows, err = stmt.QueryContext(ctx, pq.Array(roomNIDs))
|
||||
rows, err = stmt.QueryContext(ctx, localOnly, pq.Array(roomNIDs))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -1280,7 +1280,7 @@ func (d *Database) GetBulkStateContent(ctx context.Context, roomIDs []string, tu
|
|||
}
|
||||
|
||||
// JoinedUsersSetInRooms returns a map of how many times the given users appear in the specified rooms.
|
||||
func (d *Database) JoinedUsersSetInRooms(ctx context.Context, roomIDs, userIDs []string) (map[string]int, error) {
|
||||
func (d *Database) JoinedUsersSetInRooms(ctx context.Context, roomIDs, userIDs []string, localOnly bool) (map[string]int, error) {
|
||||
roomNIDs, err := d.RoomsTable.BulkSelectRoomNIDs(ctx, nil, roomIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -1295,7 +1295,7 @@ func (d *Database) JoinedUsersSetInRooms(ctx context.Context, roomIDs, userIDs [
|
|||
userNIDs = append(userNIDs, nid)
|
||||
nidToUserID[nid] = id
|
||||
}
|
||||
userNIDToCount, err := d.MembershipTable.SelectJoinedUsersSetForRooms(ctx, nil, roomNIDs, userNIDs)
|
||||
userNIDToCount, err := d.MembershipTable.SelectJoinedUsersSetForRooms(ctx, nil, roomNIDs, userNIDs, localOnly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,14 +44,18 @@ const membershipSchema = `
|
|||
|
||||
var selectJoinedUsersSetForRoomsAndUserSQL = "" +
|
||||
"SELECT target_nid, COUNT(room_nid) FROM roomserver_membership" +
|
||||
" WHERE room_nid IN ($1) AND target_nid IN ($2) AND" +
|
||||
" membership_nid = " + fmt.Sprintf("%d", tables.MembershipStateJoin) + " and forgotten = false" +
|
||||
" WHERE (target_local OR $1 = false)" +
|
||||
" AND room_nid IN ($2) AND target_nid IN ($3)" +
|
||||
" AND membership_nid = " + fmt.Sprintf("%d", tables.MembershipStateJoin) +
|
||||
" AND forgotten = false" +
|
||||
" GROUP BY target_nid"
|
||||
|
||||
var selectJoinedUsersSetForRoomsSQL = "" +
|
||||
"SELECT target_nid, COUNT(room_nid) FROM roomserver_membership" +
|
||||
" WHERE room_nid IN ($1) AND " +
|
||||
" membership_nid = " + fmt.Sprintf("%d", tables.MembershipStateJoin) + " and forgotten = false" +
|
||||
" WHERE (target_local OR $1 = false)" +
|
||||
" AND room_nid IN ($2)" +
|
||||
" AND membership_nid = " + fmt.Sprintf("%d", tables.MembershipStateJoin) +
|
||||
" AND forgotten = false" +
|
||||
" GROUP BY target_nid"
|
||||
|
||||
// Insert a row in to membership table so that it can be locked by the
|
||||
|
|
@ -305,8 +309,9 @@ func (s *membershipStatements) SelectRoomsWithMembership(
|
|||
return roomNIDs, nil
|
||||
}
|
||||
|
||||
func (s *membershipStatements) SelectJoinedUsersSetForRooms(ctx context.Context, txn *sql.Tx, roomNIDs []types.RoomNID, userNIDs []types.EventStateKeyNID) (map[types.EventStateKeyNID]int, error) {
|
||||
params := make([]interface{}, 0, len(roomNIDs)+len(userNIDs))
|
||||
func (s *membershipStatements) SelectJoinedUsersSetForRooms(ctx context.Context, txn *sql.Tx, roomNIDs []types.RoomNID, userNIDs []types.EventStateKeyNID, localOnly bool) (map[types.EventStateKeyNID]int, error) {
|
||||
params := make([]interface{}, 0, 1+len(roomNIDs)+len(userNIDs))
|
||||
params = append(params, localOnly)
|
||||
for _, v := range roomNIDs {
|
||||
params = append(params, v)
|
||||
}
|
||||
|
|
@ -314,10 +319,10 @@ func (s *membershipStatements) SelectJoinedUsersSetForRooms(ctx context.Context,
|
|||
params = append(params, v)
|
||||
}
|
||||
|
||||
query := strings.Replace(selectJoinedUsersSetForRoomsSQL, "($1)", sqlutil.QueryVariadic(len(roomNIDs)), 1)
|
||||
query := strings.Replace(selectJoinedUsersSetForRoomsSQL, "($2)", sqlutil.QueryVariadicOffset(len(roomNIDs), 1), 1)
|
||||
if len(userNIDs) > 0 {
|
||||
query = strings.Replace(selectJoinedUsersSetForRoomsAndUserSQL, "($1)", sqlutil.QueryVariadic(len(roomNIDs)), 1)
|
||||
query = strings.Replace(query, "($2)", sqlutil.QueryVariadicOffset(len(userNIDs), len(roomNIDs)), 1)
|
||||
query = strings.Replace(selectJoinedUsersSetForRoomsAndUserSQL, "($2)", sqlutil.QueryVariadicOffset(len(roomNIDs), 1), 1)
|
||||
query = strings.Replace(query, "($3)", sqlutil.QueryVariadicOffset(len(userNIDs), len(roomNIDs)+1), 1)
|
||||
}
|
||||
var rows *sql.Rows
|
||||
var err error
|
||||
|
|
|
|||
|
|
@ -137,7 +137,7 @@ type Membership interface {
|
|||
UpdateMembership(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, targetUserNID types.EventStateKeyNID, senderUserNID types.EventStateKeyNID, membership MembershipState, eventNID types.EventNID, forgotten bool) (bool, error)
|
||||
SelectRoomsWithMembership(ctx context.Context, txn *sql.Tx, userID types.EventStateKeyNID, membershipState MembershipState) ([]types.RoomNID, error)
|
||||
// SelectJoinedUsersSetForRooms returns how many times each of the given users appears across the given rooms.
|
||||
SelectJoinedUsersSetForRooms(ctx context.Context, txn *sql.Tx, roomNIDs []types.RoomNID, userNIDs []types.EventStateKeyNID) (map[types.EventStateKeyNID]int, error)
|
||||
SelectJoinedUsersSetForRooms(ctx context.Context, txn *sql.Tx, roomNIDs []types.RoomNID, userNIDs []types.EventStateKeyNID, localOnly bool) (map[types.EventStateKeyNID]int, error)
|
||||
SelectKnownUsers(ctx context.Context, txn *sql.Tx, userID types.EventStateKeyNID, searchString string, limit int) ([]string, error)
|
||||
UpdateForgetMembership(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, targetUserNID types.EventStateKeyNID, forget bool) error
|
||||
SelectLocalServerInRoom(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID) (bool, error)
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ func TestMembershipTable(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
assert.True(t, inRoom)
|
||||
|
||||
userJoinedToRooms, err := tab.SelectJoinedUsersSetForRooms(ctx, nil, []types.RoomNID{1}, userNIDs)
|
||||
userJoinedToRooms, err := tab.SelectJoinedUsersSetForRooms(ctx, nil, []types.RoomNID{1}, userNIDs, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(userJoinedToRooms))
|
||||
|
||||
|
|
|
|||
|
|
@ -9,9 +9,10 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
UserID = "user_id"
|
||||
RoomID = "room_id"
|
||||
EventID = "event_id"
|
||||
UserID = "user_id"
|
||||
RoomID = "room_id"
|
||||
EventID = "event_id"
|
||||
RoomEventType = "output_room_event_type"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
|||
|
|
@ -111,7 +111,8 @@ func (s *OutputKeyChangeEventConsumer) onDeviceKeyMessage(m api.DeviceMessage, d
|
|||
// work out who we need to notify about the new key
|
||||
var queryRes roomserverAPI.QuerySharedUsersResponse
|
||||
err := s.rsAPI.QuerySharedUsers(s.ctx, &roomserverAPI.QuerySharedUsersRequest{
|
||||
UserID: output.UserID,
|
||||
UserID: output.UserID,
|
||||
LocalOnly: true,
|
||||
}, &queryRes)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("syncapi: failed to QuerySharedUsers for key change event from key server")
|
||||
|
|
@ -135,7 +136,8 @@ func (s *OutputKeyChangeEventConsumer) onCrossSigningMessage(m api.DeviceMessage
|
|||
// work out who we need to notify about the new key
|
||||
var queryRes roomserverAPI.QuerySharedUsersResponse
|
||||
err := s.rsAPI.QuerySharedUsers(s.ctx, &roomserverAPI.QuerySharedUsersRequest{
|
||||
UserID: output.UserID,
|
||||
UserID: output.UserID,
|
||||
LocalOnly: true,
|
||||
}, &queryRes)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("syncapi: failed to QuerySharedUsers for key change event from key server")
|
||||
|
|
|
|||
|
|
@ -170,9 +170,12 @@ func joinResponseWithRooms(syncResponse *types.Response, userID string, roomIDs
|
|||
Content: []byte(`{"membership":"join"}`),
|
||||
},
|
||||
}
|
||||
|
||||
jr := syncResponse.Rooms.Join[roomID]
|
||||
jr.State.Events = roomEvents
|
||||
jr, ok := syncResponse.Rooms.Join[roomID]
|
||||
if !ok {
|
||||
jr = types.NewJoinResponse()
|
||||
}
|
||||
jr.Timeline = &types.Timeline{}
|
||||
jr.State = &types.ClientEvents{Events: roomEvents}
|
||||
syncResponse.Rooms.Join[roomID] = jr
|
||||
}
|
||||
return syncResponse
|
||||
|
|
@ -191,8 +194,11 @@ func leaveResponseWithRooms(syncResponse *types.Response, userID string, roomIDs
|
|||
},
|
||||
}
|
||||
|
||||
lr := syncResponse.Rooms.Leave[roomID]
|
||||
lr.Timeline.Events = roomEvents
|
||||
lr, ok := syncResponse.Rooms.Leave[roomID]
|
||||
if !ok {
|
||||
lr = types.NewLeaveResponse()
|
||||
}
|
||||
lr.Timeline = &types.Timeline{Events: roomEvents}
|
||||
syncResponse.Rooms.Leave[roomID] = lr
|
||||
}
|
||||
return syncResponse
|
||||
|
|
@ -328,9 +334,13 @@ func TestKeyChangeCatchupNoNewJoinsButMessages(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
jr := syncResponse.Rooms.Join[roomID]
|
||||
jr.State.Events = roomStateEvents
|
||||
jr.Timeline.Events = roomTimelineEvents
|
||||
jr, ok := syncResponse.Rooms.Join[roomID]
|
||||
if !ok {
|
||||
jr = types.NewJoinResponse()
|
||||
}
|
||||
|
||||
jr.State = &types.ClientEvents{Events: roomStateEvents}
|
||||
jr.Timeline = &types.Timeline{Events: roomTimelineEvents}
|
||||
syncResponse.Rooms.Join[roomID] = jr
|
||||
|
||||
rsAPI := &mockRoomserverAPI{
|
||||
|
|
@ -442,8 +452,11 @@ func TestKeyChangeCatchupChangeAndLeftSameRoom(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
lr := syncResponse.Rooms.Leave[roomID]
|
||||
lr.Timeline.Events = roomEvents
|
||||
lr, ok := syncResponse.Rooms.Leave[roomID]
|
||||
if !ok {
|
||||
lr = types.NewLeaveResponse()
|
||||
}
|
||||
lr.Timeline = &types.Timeline{Events: roomEvents}
|
||||
syncResponse.Rooms.Leave[roomID] = lr
|
||||
|
||||
rsAPI := &mockRoomserverAPI{
|
||||
|
|
|
|||
|
|
@ -90,9 +90,9 @@ func (p *AccountDataStreamProvider) IncrementalSync(
|
|||
}
|
||||
} else {
|
||||
if roomData, ok := dataRes.RoomAccountData[roomID][dataType]; ok {
|
||||
joinData := *types.NewJoinResponse()
|
||||
if existing, ok := req.Response.Rooms.Join[roomID]; ok {
|
||||
joinData = existing
|
||||
joinData, ok := req.Response.Rooms.Join[roomID]
|
||||
if !ok {
|
||||
joinData = types.NewJoinResponse()
|
||||
}
|
||||
joinData.AccountData.Events = append(
|
||||
joinData.AccountData.Events,
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ func (p *InviteStreamProvider) IncrementalSync(
|
|||
continue
|
||||
}
|
||||
ir := types.NewInviteResponse(inviteEvent)
|
||||
req.Response.Rooms.Invite[roomID] = *ir
|
||||
req.Response.Rooms.Invite[roomID] = ir
|
||||
}
|
||||
|
||||
// When doing an initial sync, we don't want to add retired invites, as this
|
||||
|
|
@ -87,7 +87,7 @@ func (p *InviteStreamProvider) IncrementalSync(
|
|||
Type: "m.room.member",
|
||||
Content: gomatrixserverlib.RawJSON(`{"membership":"leave"}`),
|
||||
})
|
||||
req.Response.Rooms.Leave[roomID] = *lr
|
||||
req.Response.Rooms.Leave[roomID] = lr
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ func (p *PDUStreamProvider) CompleteSync(
|
|||
}
|
||||
continue
|
||||
}
|
||||
req.Response.Rooms.Join[roomID] = *jr
|
||||
req.Response.Rooms.Join[roomID] = jr
|
||||
req.Rooms[roomID] = gomatrixserverlib.Join
|
||||
}
|
||||
|
||||
|
|
@ -129,7 +129,7 @@ func (p *PDUStreamProvider) CompleteSync(
|
|||
}
|
||||
continue
|
||||
}
|
||||
req.Response.Rooms.Peek[peek.RoomID] = *jr
|
||||
req.Response.Rooms.Peek[peek.RoomID] = jr
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -320,7 +320,7 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
|
|||
// didn't "remove" events, return that the response is limited.
|
||||
jr.Timeline.Limited = limited && len(events) == len(recentEvents)
|
||||
jr.State.Events = gomatrixserverlib.HeaderedToClientEvents(delta.StateEvents, gomatrixserverlib.FormatSync)
|
||||
res.Rooms.Join[delta.RoomID] = *jr
|
||||
res.Rooms.Join[delta.RoomID] = jr
|
||||
|
||||
case gomatrixserverlib.Peek:
|
||||
jr := types.NewJoinResponse()
|
||||
|
|
@ -329,7 +329,7 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
|
|||
jr.Timeline.Events = gomatrixserverlib.HeaderedToClientEvents(recentEvents, gomatrixserverlib.FormatSync)
|
||||
jr.Timeline.Limited = limited
|
||||
jr.State.Events = gomatrixserverlib.HeaderedToClientEvents(delta.StateEvents, gomatrixserverlib.FormatSync)
|
||||
res.Rooms.Peek[delta.RoomID] = *jr
|
||||
res.Rooms.Peek[delta.RoomID] = jr
|
||||
|
||||
case gomatrixserverlib.Leave:
|
||||
fallthrough // transitions to leave are the same as ban
|
||||
|
|
@ -342,7 +342,7 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
|
|||
// didn't "remove" events, return that the response is limited.
|
||||
lr.Timeline.Limited = limited && len(events) == len(recentEvents)
|
||||
lr.State.Events = gomatrixserverlib.HeaderedToClientEvents(delta.StateEvents, gomatrixserverlib.FormatSync)
|
||||
res.Rooms.Leave[delta.RoomID] = *lr
|
||||
res.Rooms.Leave[delta.RoomID] = lr
|
||||
}
|
||||
|
||||
return latestPosition, nil
|
||||
|
|
|
|||
|
|
@ -4,9 +4,10 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
type ReceiptStreamProvider struct {
|
||||
|
|
@ -76,7 +77,7 @@ func (p *ReceiptStreamProvider) IncrementalSync(
|
|||
continue
|
||||
}
|
||||
|
||||
jr := *types.NewJoinResponse()
|
||||
jr := types.NewJoinResponse()
|
||||
if existing, ok := req.Response.Rooms.Join[roomID]; ok {
|
||||
jr = existing
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,10 +4,11 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
type TypingStreamProvider struct {
|
||||
|
|
@ -35,9 +36,9 @@ func (p *TypingStreamProvider) IncrementalSync(
|
|||
continue
|
||||
}
|
||||
|
||||
jr := *types.NewJoinResponse()
|
||||
if existing, ok := req.Response.Rooms.Join[roomID]; ok {
|
||||
jr = existing
|
||||
jr, ok := req.Response.Rooms.Join[roomID]
|
||||
if !ok {
|
||||
jr = types.NewJoinResponse()
|
||||
}
|
||||
|
||||
if users, updated := p.EDUCache.GetTypingUsersIfUpdatedAfter(
|
||||
|
|
|
|||
|
|
@ -327,29 +327,57 @@ type PrevEventRef struct {
|
|||
PrevSender string `json:"prev_sender"`
|
||||
}
|
||||
|
||||
type DeviceLists struct {
|
||||
Changed []string `json:"changed,omitempty"`
|
||||
Left []string `json:"left,omitempty"`
|
||||
}
|
||||
|
||||
type RoomsResponse struct {
|
||||
Join map[string]*JoinResponse `json:"join,omitempty"`
|
||||
Peek map[string]*JoinResponse `json:"peek,omitempty"`
|
||||
Invite map[string]*InviteResponse `json:"invite,omitempty"`
|
||||
Leave map[string]*LeaveResponse `json:"leave,omitempty"`
|
||||
}
|
||||
|
||||
type ToDeviceResponse struct {
|
||||
Events []gomatrixserverlib.SendToDeviceEvent `json:"events,omitempty"`
|
||||
}
|
||||
|
||||
// Response represents a /sync API response. See https://matrix.org/docs/spec/client_server/r0.2.0.html#get-matrix-client-r0-sync
|
||||
type Response struct {
|
||||
NextBatch StreamingToken `json:"next_batch"`
|
||||
AccountData struct {
|
||||
Events []gomatrixserverlib.ClientEvent `json:"events,omitempty"`
|
||||
} `json:"account_data,omitempty"`
|
||||
Presence struct {
|
||||
Events []gomatrixserverlib.ClientEvent `json:"events,omitempty"`
|
||||
} `json:"presence,omitempty"`
|
||||
Rooms struct {
|
||||
Join map[string]JoinResponse `json:"join,omitempty"`
|
||||
Peek map[string]JoinResponse `json:"peek,omitempty"`
|
||||
Invite map[string]InviteResponse `json:"invite,omitempty"`
|
||||
Leave map[string]LeaveResponse `json:"leave,omitempty"`
|
||||
} `json:"rooms,omitempty"`
|
||||
ToDevice struct {
|
||||
Events []gomatrixserverlib.SendToDeviceEvent `json:"events,omitempty"`
|
||||
} `json:"to_device,omitempty"`
|
||||
DeviceLists struct {
|
||||
Changed []string `json:"changed,omitempty"`
|
||||
Left []string `json:"left,omitempty"`
|
||||
} `json:"device_lists,omitempty"`
|
||||
DeviceListsOTKCount map[string]int `json:"device_one_time_keys_count,omitempty"`
|
||||
NextBatch StreamingToken `json:"next_batch"`
|
||||
AccountData *ClientEvents `json:"account_data,omitempty"`
|
||||
Presence *ClientEvents `json:"presence,omitempty"`
|
||||
Rooms *RoomsResponse `json:"rooms,omitempty"`
|
||||
ToDevice *ToDeviceResponse `json:"to_device,omitempty"`
|
||||
DeviceLists *DeviceLists `json:"device_lists,omitempty"`
|
||||
DeviceListsOTKCount map[string]int `json:"device_one_time_keys_count,omitempty"`
|
||||
}
|
||||
|
||||
func (r Response) MarshalJSON() ([]byte, error) {
|
||||
type alias Response
|
||||
a := alias(r)
|
||||
if r.AccountData != nil && len(r.AccountData.Events) == 0 {
|
||||
a.AccountData = nil
|
||||
}
|
||||
if r.Presence != nil && len(r.Presence.Events) == 0 {
|
||||
a.Presence = nil
|
||||
}
|
||||
if r.DeviceLists != nil {
|
||||
if len(r.DeviceLists.Left) == 0 && len(r.DeviceLists.Changed) == 0 {
|
||||
a.DeviceLists = nil
|
||||
}
|
||||
}
|
||||
if r.Rooms != nil {
|
||||
if len(r.Rooms.Join) == 0 && len(r.Rooms.Peek) == 0 &&
|
||||
len(r.Rooms.Invite) == 0 && len(r.Rooms.Leave) == 0 {
|
||||
a.Rooms = nil
|
||||
}
|
||||
}
|
||||
if r.ToDevice != nil && len(r.ToDevice.Events) == 0 {
|
||||
a.ToDevice = nil
|
||||
}
|
||||
return json.Marshal(a)
|
||||
}
|
||||
|
||||
func (r *Response) HasUpdates() bool {
|
||||
|
|
@ -370,18 +398,21 @@ func NewResponse() *Response {
|
|||
res := Response{}
|
||||
// Pre-initialise the maps. Synapse will return {} even if there are no rooms under a specific section,
|
||||
// so let's do the same thing. Bonus: this means we can't get dreaded 'assignment to entry in nil map' errors.
|
||||
res.Rooms.Join = map[string]JoinResponse{}
|
||||
res.Rooms.Peek = map[string]JoinResponse{}
|
||||
res.Rooms.Invite = map[string]InviteResponse{}
|
||||
res.Rooms.Leave = map[string]LeaveResponse{}
|
||||
res.Rooms = &RoomsResponse{
|
||||
Join: map[string]*JoinResponse{},
|
||||
Peek: map[string]*JoinResponse{},
|
||||
Invite: map[string]*InviteResponse{},
|
||||
Leave: map[string]*LeaveResponse{},
|
||||
}
|
||||
|
||||
// Also pre-intialise empty slices or else we'll insert 'null' instead of '[]' for the value.
|
||||
// TODO: We really shouldn't have to do all this to coerce encoding/json to Do The Right Thing. We should
|
||||
// really be using our own Marshal/Unmarshal implementations otherwise this may prove to be a CPU bottleneck.
|
||||
// This also applies to NewJoinResponse, NewInviteResponse and NewLeaveResponse.
|
||||
res.AccountData.Events = []gomatrixserverlib.ClientEvent{}
|
||||
res.Presence.Events = []gomatrixserverlib.ClientEvent{}
|
||||
res.ToDevice.Events = []gomatrixserverlib.SendToDeviceEvent{}
|
||||
res.AccountData = &ClientEvents{}
|
||||
res.Presence = &ClientEvents{}
|
||||
res.DeviceLists = &DeviceLists{}
|
||||
res.ToDevice = &ToDeviceResponse{}
|
||||
res.DeviceListsOTKCount = map[string]int{}
|
||||
|
||||
return &res
|
||||
|
|
@ -403,38 +434,73 @@ type UnreadNotifications struct {
|
|||
NotificationCount int `json:"notification_count"`
|
||||
}
|
||||
|
||||
type ClientEvents struct {
|
||||
Events []gomatrixserverlib.ClientEvent `json:"events,omitempty"`
|
||||
}
|
||||
|
||||
type Timeline struct {
|
||||
Events []gomatrixserverlib.ClientEvent `json:"events"`
|
||||
Limited bool `json:"limited"`
|
||||
PrevBatch *TopologyToken `json:"prev_batch,omitempty"`
|
||||
}
|
||||
|
||||
type Summary struct {
|
||||
Heroes []string `json:"m.heroes,omitempty"`
|
||||
JoinedMemberCount *int `json:"m.joined_member_count,omitempty"`
|
||||
InvitedMemberCount *int `json:"m.invited_member_count,omitempty"`
|
||||
}
|
||||
|
||||
// JoinResponse represents a /sync response for a room which is under the 'join' or 'peek' key.
|
||||
type JoinResponse struct {
|
||||
Summary struct {
|
||||
Heroes []string `json:"m.heroes,omitempty"`
|
||||
JoinedMemberCount *int `json:"m.joined_member_count,omitempty"`
|
||||
InvitedMemberCount *int `json:"m.invited_member_count,omitempty"`
|
||||
} `json:"summary"`
|
||||
State struct {
|
||||
Events []gomatrixserverlib.ClientEvent `json:"events"`
|
||||
} `json:"state"`
|
||||
Timeline struct {
|
||||
Events []gomatrixserverlib.ClientEvent `json:"events"`
|
||||
Limited bool `json:"limited"`
|
||||
PrevBatch *TopologyToken `json:"prev_batch,omitempty"`
|
||||
} `json:"timeline"`
|
||||
Ephemeral struct {
|
||||
Events []gomatrixserverlib.ClientEvent `json:"events"`
|
||||
} `json:"ephemeral"`
|
||||
AccountData struct {
|
||||
Events []gomatrixserverlib.ClientEvent `json:"events"`
|
||||
} `json:"account_data"`
|
||||
Summary *Summary `json:"summary,omitempty"`
|
||||
State *ClientEvents `json:"state,omitempty"`
|
||||
Timeline *Timeline `json:"timeline,omitempty"`
|
||||
Ephemeral *ClientEvents `json:"ephemeral,omitempty"`
|
||||
AccountData *ClientEvents `json:"account_data,omitempty"`
|
||||
*UnreadNotifications `json:"unread_notifications,omitempty"`
|
||||
}
|
||||
|
||||
func (jr JoinResponse) MarshalJSON() ([]byte, error) {
|
||||
type alias JoinResponse
|
||||
a := alias(jr)
|
||||
if jr.State != nil && len(jr.State.Events) == 0 {
|
||||
a.State = nil
|
||||
}
|
||||
if jr.Ephemeral != nil && len(jr.Ephemeral.Events) == 0 {
|
||||
a.Ephemeral = nil
|
||||
}
|
||||
if jr.AccountData != nil && len(jr.AccountData.Events) == 0 {
|
||||
a.AccountData = nil
|
||||
}
|
||||
if jr.Timeline != nil && len(jr.Timeline.Events) == 0 {
|
||||
a.Timeline = nil
|
||||
}
|
||||
if jr.Summary != nil {
|
||||
var nilPtr int
|
||||
joinedEmpty := jr.Summary.JoinedMemberCount == nil || jr.Summary.JoinedMemberCount == &nilPtr
|
||||
invitedEmpty := jr.Summary.InvitedMemberCount == nil || jr.Summary.InvitedMemberCount == &nilPtr
|
||||
if joinedEmpty && invitedEmpty && len(jr.Summary.Heroes) == 0 {
|
||||
a.Summary = nil
|
||||
}
|
||||
|
||||
}
|
||||
if jr.UnreadNotifications != nil &&
|
||||
jr.UnreadNotifications.NotificationCount == 0 && jr.UnreadNotifications.HighlightCount == 0 {
|
||||
a.UnreadNotifications = nil
|
||||
}
|
||||
return json.Marshal(a)
|
||||
}
|
||||
|
||||
// NewJoinResponse creates an empty response with initialised arrays.
|
||||
func NewJoinResponse() *JoinResponse {
|
||||
res := JoinResponse{}
|
||||
res.State.Events = []gomatrixserverlib.ClientEvent{}
|
||||
res.Timeline.Events = []gomatrixserverlib.ClientEvent{}
|
||||
res.Ephemeral.Events = []gomatrixserverlib.ClientEvent{}
|
||||
res.AccountData.Events = []gomatrixserverlib.ClientEvent{}
|
||||
return &res
|
||||
return &JoinResponse{
|
||||
Summary: &Summary{},
|
||||
State: &ClientEvents{},
|
||||
Timeline: &Timeline{},
|
||||
Ephemeral: &ClientEvents{},
|
||||
AccountData: &ClientEvents{},
|
||||
UnreadNotifications: &UnreadNotifications{},
|
||||
}
|
||||
}
|
||||
|
||||
// InviteResponse represents a /sync response for a room which is under the 'invite' key.
|
||||
|
|
@ -469,21 +535,28 @@ func NewInviteResponse(event *gomatrixserverlib.HeaderedEvent) *InviteResponse {
|
|||
|
||||
// LeaveResponse represents a /sync response for a room which is under the 'leave' key.
|
||||
type LeaveResponse struct {
|
||||
State struct {
|
||||
Events []gomatrixserverlib.ClientEvent `json:"events"`
|
||||
} `json:"state"`
|
||||
Timeline struct {
|
||||
Events []gomatrixserverlib.ClientEvent `json:"events"`
|
||||
Limited bool `json:"limited"`
|
||||
PrevBatch *TopologyToken `json:"prev_batch,omitempty"`
|
||||
} `json:"timeline"`
|
||||
State *ClientEvents `json:"state,omitempty"`
|
||||
Timeline *Timeline `json:"timeline,omitempty"`
|
||||
}
|
||||
|
||||
func (lr LeaveResponse) MarshalJSON() ([]byte, error) {
|
||||
type alias LeaveResponse
|
||||
a := alias(lr)
|
||||
if lr.State != nil && len(lr.State.Events) == 0 {
|
||||
a.State = nil
|
||||
}
|
||||
if lr.Timeline != nil && len(lr.Timeline.Events) == 0 {
|
||||
a.Timeline = nil
|
||||
}
|
||||
return json.Marshal(a)
|
||||
}
|
||||
|
||||
// NewLeaveResponse creates an empty response with initialised arrays.
|
||||
func NewLeaveResponse() *LeaveResponse {
|
||||
res := LeaveResponse{}
|
||||
res.State.Events = []gomatrixserverlib.ClientEvent{}
|
||||
res.Timeline.Events = []gomatrixserverlib.ClientEvent{}
|
||||
res := LeaveResponse{
|
||||
State: &ClientEvents{},
|
||||
Timeline: &Timeline{},
|
||||
}
|
||||
return &res
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,10 +4,11 @@ import (
|
|||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/nats-io/nats.go"
|
||||
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/base"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/nats-io/nats.go"
|
||||
)
|
||||
|
||||
func MustPublishMsgs(t *testing.T, jsctx nats.JetStreamContext, msgs ...*nats.Msg) {
|
||||
|
|
@ -21,10 +22,8 @@ func MustPublishMsgs(t *testing.T, jsctx nats.JetStreamContext, msgs ...*nats.Ms
|
|||
|
||||
func NewOutputEventMsg(t *testing.T, base *base.BaseDendrite, roomID string, update api.OutputEvent) *nats.Msg {
|
||||
t.Helper()
|
||||
msg := &nats.Msg{
|
||||
Subject: base.Cfg.Global.JetStream.Prefixed(jetstream.OutputRoomEvent),
|
||||
Header: nats.Header{},
|
||||
}
|
||||
msg := nats.NewMsg(base.Cfg.Global.JetStream.Prefixed(jetstream.OutputRoomEvent))
|
||||
msg.Header.Set(jetstream.RoomEventType, string(update.Type))
|
||||
msg.Header.Set(jetstream.RoomID, roomID)
|
||||
var err error
|
||||
msg.Data, err = json.Marshal(update)
|
||||
|
|
|
|||
|
|
@ -84,15 +84,16 @@ func (s *OutputRoomEventConsumer) Start() error {
|
|||
|
||||
func (s *OutputRoomEventConsumer) onMessage(ctx context.Context, msgs []*nats.Msg) bool {
|
||||
msg := msgs[0] // Guaranteed to exist if onMessage is called
|
||||
// Only handle events we care about
|
||||
if rsapi.OutputType(msg.Header.Get(jetstream.RoomEventType)) != rsapi.OutputTypeNewRoomEvent {
|
||||
return true
|
||||
}
|
||||
var output rsapi.OutputEvent
|
||||
if err := json.Unmarshal(msg.Data, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("roomserver output log: message parse failure")
|
||||
return true
|
||||
}
|
||||
if output.Type != rsapi.OutputTypeNewRoomEvent {
|
||||
return true
|
||||
}
|
||||
event := output.NewRoomEvent.Event
|
||||
if event == nil {
|
||||
log.Errorf("userapi consumer: expected event")
|
||||
|
|
|
|||
Loading…
Reference in a new issue