Merge branch 'master' into matthew/peeking

This commit is contained in:
Matthew Hodgson 2020-09-01 19:11:51 +03:00
commit d0d5f70105
15 changed files with 32 additions and 26 deletions

View file

@ -48,6 +48,7 @@ func NewOutputRoomEventConsumer(
workerStates []types.ApplicationServiceWorkerState, workerStates []types.ApplicationServiceWorkerState,
) *OutputRoomEventConsumer { ) *OutputRoomEventConsumer {
consumer := internal.ContinualConsumer{ consumer := internal.ContinualConsumer{
ComponentName: "appservice/roomserver",
Topic: cfg.Global.Kafka.TopicFor(config.TopicOutputRoomEvent), Topic: cfg.Global.Kafka.TopicFor(config.TopicOutputRoomEvent),
Consumer: kafkaConsumer, Consumer: kafkaConsumer,
PartitionStore: appserviceDB, PartitionStore: appserviceDB,

View file

@ -36,6 +36,7 @@ type OutputRoomEventConsumer struct {
func NewOutputRoomEventConsumer(topicName string, kafkaConsumer sarama.Consumer, store storage.Database, acls *acls.ServerACLs) *OutputRoomEventConsumer { func NewOutputRoomEventConsumer(topicName string, kafkaConsumer sarama.Consumer, store storage.Database, acls *acls.ServerACLs) *OutputRoomEventConsumer {
consumer := &internal.ContinualConsumer{ consumer := &internal.ContinualConsumer{
ComponentName: "currentstateserver/roomserver",
Topic: topicName, Topic: topicName,
Consumer: kafkaConsumer, Consumer: kafkaConsumer,
PartitionStore: store, PartitionStore: store,

View file

@ -83,7 +83,6 @@ const selectKnownUsersSQL = "" +
type currentRoomStateStatements struct { type currentRoomStateStatements struct {
db *sql.DB db *sql.DB
writer sqlutil.Writer
upsertRoomStateStmt *sql.Stmt upsertRoomStateStmt *sql.Stmt
deleteRoomStateByEventIDStmt *sql.Stmt deleteRoomStateByEventIDStmt *sql.Stmt
selectRoomIDsWithMembershipStmt *sql.Stmt selectRoomIDsWithMembershipStmt *sql.Stmt
@ -95,8 +94,7 @@ type currentRoomStateStatements struct {
func NewSqliteCurrentRoomStateTable(db *sql.DB) (tables.CurrentRoomState, error) { func NewSqliteCurrentRoomStateTable(db *sql.DB) (tables.CurrentRoomState, error) {
s := &currentRoomStateStatements{ s := &currentRoomStateStatements{
db: db, db: db,
writer: sqlutil.NewExclusiveWriter(),
} }
_, err := db.Exec(currentRoomStateSchema) _, err := db.Exec(currentRoomStateSchema)
if err != nil { if err != nil {
@ -177,11 +175,9 @@ func (s *currentRoomStateStatements) SelectRoomIDsWithMembership(
func (s *currentRoomStateStatements) DeleteRoomStateByEventID( func (s *currentRoomStateStatements) DeleteRoomStateByEventID(
ctx context.Context, txn *sql.Tx, eventID string, ctx context.Context, txn *sql.Tx, eventID string,
) error { ) error {
return s.writer.Do(s.db, txn, func(txn *sql.Tx) error { stmt := sqlutil.TxStmt(txn, s.deleteRoomStateByEventIDStmt)
stmt := sqlutil.TxStmt(txn, s.deleteRoomStateByEventIDStmt) _, err := stmt.ExecContext(ctx, eventID)
_, err := stmt.ExecContext(ctx, eventID) return err
return err
})
} }
func (s *currentRoomStateStatements) UpsertRoomState( func (s *currentRoomStateStatements) UpsertRoomState(
@ -194,20 +190,18 @@ func (s *currentRoomStateStatements) UpsertRoomState(
} }
// upsert state event // upsert state event
return s.writer.Do(s.db, txn, func(txn *sql.Tx) error { stmt := sqlutil.TxStmt(txn, s.upsertRoomStateStmt)
stmt := sqlutil.TxStmt(txn, s.upsertRoomStateStmt) _, err = stmt.ExecContext(
_, err = stmt.ExecContext( ctx,
ctx, event.RoomID(),
event.RoomID(), event.EventID(),
event.EventID(), event.Type(),
event.Type(), event.Sender(),
event.Sender(), *event.StateKey(),
*event.StateKey(), headeredJSON,
headeredJSON, contentVal,
contentVal, )
) return err
return err
})
} }
func (s *currentRoomStateStatements) SelectEventsWithEventIDs( func (s *currentRoomStateStatements) SelectEventsWithEventIDs(

View file

@ -50,11 +50,13 @@ func NewOutputEDUConsumer(
) *OutputEDUConsumer { ) *OutputEDUConsumer {
c := &OutputEDUConsumer{ c := &OutputEDUConsumer{
typingConsumer: &internal.ContinualConsumer{ typingConsumer: &internal.ContinualConsumer{
ComponentName: "eduserver/typing",
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent)), Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent)),
Consumer: kafkaConsumer, Consumer: kafkaConsumer,
PartitionStore: store, PartitionStore: store,
}, },
sendToDeviceConsumer: &internal.ContinualConsumer{ sendToDeviceConsumer: &internal.ContinualConsumer{
ComponentName: "eduserver/sendtodevice",
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent)), Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent)),
Consumer: kafkaConsumer, Consumer: kafkaConsumer,
PartitionStore: store, PartitionStore: store,

View file

@ -49,6 +49,7 @@ func NewKeyChangeConsumer(
) *KeyChangeConsumer { ) *KeyChangeConsumer {
c := &KeyChangeConsumer{ c := &KeyChangeConsumer{
consumer: &internal.ContinualConsumer{ consumer: &internal.ContinualConsumer{
ComponentName: "federationsender/keychange",
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)), Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)),
Consumer: kafkaConsumer, Consumer: kafkaConsumer,
PartitionStore: store, PartitionStore: store,

View file

@ -48,6 +48,7 @@ func NewOutputRoomEventConsumer(
rsAPI api.RoomserverInternalAPI, rsAPI api.RoomserverInternalAPI,
) *OutputRoomEventConsumer { ) *OutputRoomEventConsumer {
consumer := internal.ContinualConsumer{ consumer := internal.ContinualConsumer{
ComponentName: "federationsender/roomserver",
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)), Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)),
Consumer: kafkaConsumer, Consumer: kafkaConsumer,
PartitionStore: store, PartitionStore: store,

View file

@ -33,6 +33,7 @@ type PartitionStorer interface {
// A ContinualConsumer continually consumes logs even across restarts. It requires a PartitionStorer to // A ContinualConsumer continually consumes logs even across restarts. It requires a PartitionStorer to
// remember the offset it reached. // remember the offset it reached.
type ContinualConsumer struct { type ContinualConsumer struct {
ComponentName string
// The kafkaesque topic to consume events from. // The kafkaesque topic to consume events from.
// This is the name used in kafka to identify the stream to consume events from. // This is the name used in kafka to identify the stream to consume events from.
Topic string Topic string
@ -111,7 +112,7 @@ func (c *ContinualConsumer) consumePartition(pc sarama.PartitionConsumer) {
msgErr := c.ProcessMessage(message) msgErr := c.ProcessMessage(message)
// Advance our position in the stream so that we will start at the right position after a restart. // Advance our position in the stream so that we will start at the right position after a restart.
if err := c.PartitionStore.SetPartitionOffset(context.TODO(), c.Topic, message.Partition, message.Offset); err != nil { if err := c.PartitionStore.SetPartitionOffset(context.TODO(), c.Topic, message.Partition, message.Offset); err != nil {
panic(fmt.Errorf("the ContinualConsumer failed to SetPartitionOffset: %w", err)) panic(fmt.Errorf("the ContinualConsumer in %q failed to SetPartitionOffset: %w", c.ComponentName, err))
} }
// Shutdown if we were told to do so. // Shutdown if we were told to do so.
if msgErr == ErrShutdown { if msgErr == ErrShutdown {

View file

@ -122,7 +122,7 @@ func Open(dbProperties *config.DatabaseOptions, cache caching.RoomServerCaches)
d.Database = shared.Database{ d.Database = shared.Database{
DB: d.db, DB: d.db,
Cache: cache, Cache: cache,
Writer: sqlutil.NewExclusiveWriter(), Writer: d.writer,
EventsTable: d.events, EventsTable: d.events,
EventTypesTable: d.eventTypes, EventTypesTable: d.eventTypes,
EventStateKeysTable: d.eventStateKeys, EventStateKeysTable: d.eventStateKeys,

View file

@ -44,6 +44,7 @@ func NewOutputClientDataConsumer(
) *OutputClientDataConsumer { ) *OutputClientDataConsumer {
consumer := internal.ContinualConsumer{ consumer := internal.ContinualConsumer{
ComponentName: "syncapi/clientapi",
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputClientData)), Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputClientData)),
Consumer: kafkaConsumer, Consumer: kafkaConsumer,
PartitionStore: store, PartitionStore: store,

View file

@ -48,6 +48,7 @@ func NewOutputSendToDeviceEventConsumer(
) *OutputSendToDeviceEventConsumer { ) *OutputSendToDeviceEventConsumer {
consumer := internal.ContinualConsumer{ consumer := internal.ContinualConsumer{
ComponentName: "syncapi/eduserver/sendtodevice",
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent)), Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent)),
Consumer: kafkaConsumer, Consumer: kafkaConsumer,
PartitionStore: store, PartitionStore: store,

View file

@ -44,6 +44,7 @@ func NewOutputTypingEventConsumer(
) *OutputTypingEventConsumer { ) *OutputTypingEventConsumer {
consumer := internal.ContinualConsumer{ consumer := internal.ContinualConsumer{
ComponentName: "syncapi/eduserver/typing",
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent)), Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent)),
Consumer: kafkaConsumer, Consumer: kafkaConsumer,
PartitionStore: store, PartitionStore: store,

View file

@ -56,6 +56,7 @@ func NewOutputKeyChangeEventConsumer(
) *OutputKeyChangeEventConsumer { ) *OutputKeyChangeEventConsumer {
consumer := internal.ContinualConsumer{ consumer := internal.ContinualConsumer{
ComponentName: "syncapi/keychange",
Topic: topic, Topic: topic,
Consumer: kafkaConsumer, Consumer: kafkaConsumer,
PartitionStore: store, PartitionStore: store,

View file

@ -49,6 +49,7 @@ func NewOutputRoomEventConsumer(
) *OutputRoomEventConsumer { ) *OutputRoomEventConsumer {
consumer := internal.ContinualConsumer{ consumer := internal.ContinualConsumer{
ComponentName: "syncapi/roomserver",
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)), Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)),
Consumer: kafkaConsumer, Consumer: kafkaConsumer,
PartitionStore: store, PartitionStore: store,

View file

@ -80,7 +80,7 @@ func NewDatabase(dbProperties *config.DatabaseOptions) (*SyncServerDatasource, e
} }
d.Database = shared.Database{ d.Database = shared.Database{
DB: d.db, DB: d.db,
Writer: sqlutil.NewDummyWriter(), Writer: d.writer,
Invites: invites, Invites: invites,
AccountData: accountData, AccountData: accountData,
OutputEvents: events, OutputEvents: events,

View file

@ -97,7 +97,7 @@ func (d *SyncServerDatasource) prepare() (err error) {
} }
d.Database = shared.Database{ d.Database = shared.Database{
DB: d.db, DB: d.db,
Writer: sqlutil.NewExclusiveWriter(), Writer: d.writer,
Invites: invites, Invites: invites,
Peeks: peeks, Peeks: peeks,
AccountData: accountData, AccountData: accountData,