diff --git a/dendrite-config.yaml b/dendrite-config.yaml index e5cbd4781..25503692b 100644 --- a/dendrite-config.yaml +++ b/dendrite-config.yaml @@ -76,6 +76,12 @@ global: # Kafka. use_naffka: true + # The max size a Kafka message is allowed to use. + # You only need to change this value, if you encounter issues with too large messages. + # Must be less than/equal to "max.message.bytes" configured in Kafka. + # Defaults to 8388608 bytes. + # max_message_bytes: 8388608 + # Naffka database options. Not required when using Kafka. naffka_database: connection_string: file:naffka.db diff --git a/internal/config/config_kafka.go b/internal/config/config_kafka.go index e2bd6538e..707c92a71 100644 --- a/internal/config/config_kafka.go +++ b/internal/config/config_kafka.go @@ -24,6 +24,9 @@ type Kafka struct { UseNaffka bool `yaml:"use_naffka"` // The Naffka database is used internally by the naffka library, if used. Database DatabaseOptions `yaml:"naffka_database"` + // The max size a Kafka message passed between consumer/producer can have + // Equals roughly max.message.bytes / fetch.message.max.bytes in Kafka + MaxMessageBytes *int `yaml:"max_message_bytes"` } func (k *Kafka) TopicFor(name string) string { @@ -36,6 +39,9 @@ func (c *Kafka) Defaults() { c.Addresses = []string{"localhost:2181"} c.Database.ConnectionString = DataSource("file:naffka.db") c.TopicPrefix = "Dendrite" + + maxBytes := 1024 * 1024 * 8 // about 8MB + c.MaxMessageBytes = &maxBytes } func (c *Kafka) Verify(configErrs *ConfigErrors, isMonolith bool) { @@ -50,4 +56,5 @@ func (c *Kafka) Verify(configErrs *ConfigErrors, isMonolith bool) { checkNotZero(configErrs, "global.kafka.addresses", int64(len(c.Addresses))) } checkNotEmpty(configErrs, "global.kafka.topic_prefix", string(c.TopicPrefix)) + checkPositive(configErrs, "global.kafka.max_message_bytes", int64(*c.MaxMessageBytes)) } diff --git a/internal/setup/kafka/kafka.go b/internal/setup/kafka/kafka.go index 9855ae156..091025ec7 100644 --- a/internal/setup/kafka/kafka.go +++ b/internal/setup/kafka/kafka.go @@ -17,12 +17,17 @@ func SetupConsumerProducer(cfg *config.Kafka) (sarama.Consumer, sarama.SyncProdu // setupKafka creates kafka consumer/producer pair from the config. func setupKafka(cfg *config.Kafka) (sarama.Consumer, sarama.SyncProducer) { - consumer, err := sarama.NewConsumer(cfg.Addresses, nil) + sCfg := sarama.NewConfig() + sCfg.Producer.MaxMessageBytes = *cfg.MaxMessageBytes + sCfg.Producer.Return.Successes = true + sCfg.Consumer.Fetch.Default = int32(*cfg.MaxMessageBytes) + + consumer, err := sarama.NewConsumer(cfg.Addresses, sCfg) if err != nil { logrus.WithError(err).Panic("failed to start kafka consumer") } - producer, err := sarama.NewSyncProducer(cfg.Addresses, nil) + producer, err := sarama.NewSyncProducer(cfg.Addresses, sCfg) if err != nil { logrus.WithError(err).Panic("failed to setup kafka producers") } diff --git a/roomserver/internal/input/input.go b/roomserver/internal/input/input.go index d340ac218..99c15f77a 100644 --- a/roomserver/internal/input/input.go +++ b/roomserver/internal/input/input.go @@ -102,7 +102,13 @@ func (r *Inputer) WriteOutputEvents(roomID string, updates []api.OutputEvent) er Value: sarama.ByteEncoder(value), } } - return r.Producer.SendMessages(messages) + errs := r.Producer.SendMessages(messages) + if errs != nil { + for _, err := range errs.(sarama.ProducerErrors) { + log.WithError(err).WithField("message_bytes", err.Msg.Value.Length()).Error("Write to kafka failed") + } + } + return errs } // InputRoomEvents implements api.RoomserverInternalAPI