2017-07-13 05:41:30 -05:00
|
|
|
// Copyright 2017 Vector Creations Ltd
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
// Package input contains the code processes new room events
|
2020-09-02 11:13:15 -05:00
|
|
|
package input
|
2017-07-13 05:41:30 -05:00
|
|
|
|
|
|
|
import (
|
2017-09-13 07:37:50 -05:00
|
|
|
"context"
|
2017-07-13 05:41:30 -05:00
|
|
|
"encoding/json"
|
2022-01-28 04:27:28 -06:00
|
|
|
"errors"
|
2022-03-23 05:20:18 -05:00
|
|
|
"fmt"
|
2020-08-20 10:24:33 -05:00
|
|
|
"sync"
|
2022-01-27 08:29:14 -06:00
|
|
|
"time"
|
2017-07-13 05:41:30 -05:00
|
|
|
|
2022-12-22 06:05:59 -06:00
|
|
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
2023-04-06 03:55:01 -05:00
|
|
|
"github.com/matrix-org/gomatrixserverlib/fclient"
|
2023-04-19 09:50:33 -05:00
|
|
|
"github.com/matrix-org/gomatrixserverlib/spec"
|
2022-12-22 06:05:59 -06:00
|
|
|
|
2022-01-05 11:44:49 -06:00
|
|
|
"github.com/Arceliar/phony"
|
2021-03-24 05:25:24 -05:00
|
|
|
"github.com/getsentry/sentry-go"
|
2022-08-05 04:12:41 -05:00
|
|
|
"github.com/matrix-org/gomatrixserverlib"
|
|
|
|
"github.com/nats-io/nats.go"
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
"github.com/sirupsen/logrus"
|
|
|
|
|
2022-01-27 08:29:14 -06:00
|
|
|
fedapi "github.com/matrix-org/dendrite/federationapi/api"
|
2020-09-04 04:40:58 -05:00
|
|
|
"github.com/matrix-org/dendrite/roomserver/acls"
|
2017-07-13 05:41:30 -05:00
|
|
|
"github.com/matrix-org/dendrite/roomserver/api"
|
2022-01-27 08:29:14 -06:00
|
|
|
"github.com/matrix-org/dendrite/roomserver/internal/query"
|
2022-07-01 04:54:07 -05:00
|
|
|
"github.com/matrix-org/dendrite/roomserver/producers"
|
2020-09-02 11:13:15 -05:00
|
|
|
"github.com/matrix-org/dendrite/roomserver/storage"
|
2022-08-25 04:57:27 -05:00
|
|
|
"github.com/matrix-org/dendrite/roomserver/types"
|
2022-03-23 05:20:18 -05:00
|
|
|
"github.com/matrix-org/dendrite/setup/config"
|
2022-01-05 11:44:49 -06:00
|
|
|
"github.com/matrix-org/dendrite/setup/jetstream"
|
2022-02-17 09:58:54 -06:00
|
|
|
"github.com/matrix-org/dendrite/setup/process"
|
2017-07-13 05:41:30 -05:00
|
|
|
)
|
|
|
|
|
2022-03-23 05:20:18 -05:00
|
|
|
// Inputer is responsible for consuming from the roomserver input
|
|
|
|
// streams and processing the events. All input events are queued
|
|
|
|
// into a single NATS stream and the order is preserved strictly.
|
|
|
|
// The `room_id` message header will contain the room ID which will
|
|
|
|
// be used to assign the pending event to a per-room worker.
|
|
|
|
//
|
|
|
|
// The input API maintains an ephemeral headers-only consumer. It
|
|
|
|
// will speed through the stream working out which room IDs are
|
|
|
|
// pending and create durable consumers for them. The durable
|
|
|
|
// consumer will then be used for each room worker goroutine to
|
|
|
|
// fetch events one by one and process them. Each room having a
|
|
|
|
// durable consumer of its own means there is no head-of-line
|
|
|
|
// blocking between rooms. Filtering ensures that each durable
|
|
|
|
// consumer only receives events for the room it is interested in.
|
|
|
|
//
|
|
|
|
// The ephemeral consumer closely tracks the newest events. The
|
|
|
|
// per-room durable consumers will only progress through the stream
|
|
|
|
// as events are processed.
|
|
|
|
//
|
2022-08-05 04:12:41 -05:00
|
|
|
// A BC * -> positions of each consumer (* = ephemeral)
|
|
|
|
// ⌄ ⌄⌄ ⌄
|
|
|
|
// ABAABCAABCAA -> newest (letter = subject for each message)
|
2022-03-23 05:20:18 -05:00
|
|
|
//
|
|
|
|
// In this example, A is still processing an event but has two
|
|
|
|
// pending events to process afterwards. Both B and C are caught
|
|
|
|
// up, so they will do nothing until a new event comes in for B
|
|
|
|
// or C.
|
2020-09-02 11:13:15 -05:00
|
|
|
type Inputer struct {
|
2022-07-01 04:54:07 -05:00
|
|
|
Cfg *config.RoomServer
|
|
|
|
ProcessContext *process.ProcessContext
|
2023-02-24 02:40:20 -06:00
|
|
|
DB storage.RoomDatabase
|
2022-07-01 04:54:07 -05:00
|
|
|
NATSClient *nats.Conn
|
|
|
|
JetStream nats.JetStreamContext
|
|
|
|
Durable nats.SubOpt
|
2023-04-19 09:50:33 -05:00
|
|
|
ServerName spec.ServerName
|
2023-06-28 13:29:49 -05:00
|
|
|
SigningIdentity func(ctx context.Context, roomID spec.RoomID, senderID spec.UserID) (fclient.SigningIdentity, error)
|
2022-07-01 04:54:07 -05:00
|
|
|
FSAPI fedapi.RoomserverFederationAPI
|
2023-07-21 11:08:40 -05:00
|
|
|
RSAPI api.RoomserverInternalAPI
|
2022-07-01 04:54:07 -05:00
|
|
|
KeyRing gomatrixserverlib.JSONVerifier
|
|
|
|
ACLs *acls.ServerACLs
|
|
|
|
InputRoomEventTopic string
|
|
|
|
OutputProducer *producers.RoomEventProducer
|
|
|
|
workers sync.Map // room ID -> *worker
|
2022-01-27 08:29:14 -06:00
|
|
|
|
2023-03-22 03:21:32 -05:00
|
|
|
Queryer *query.Queryer
|
|
|
|
UserAPI userapi.RoomserverUserAPI
|
2023-07-05 05:34:53 -05:00
|
|
|
EnableMetrics bool
|
2022-01-27 08:29:14 -06:00
|
|
|
}
|
|
|
|
|
2022-10-14 09:14:29 -05:00
|
|
|
// If a room consumer is inactive for a while then we will allow NATS
|
|
|
|
// to clean it up. This stops us from holding onto durable consumers
|
|
|
|
// indefinitely for rooms that might no longer be active, since they do
|
|
|
|
// have an interest overhead in the NATS Server. If the room becomes
|
|
|
|
// active again then we'll recreate the consumer anyway.
|
|
|
|
const inactiveThreshold = time.Hour * 24
|
|
|
|
|
2022-03-23 05:20:18 -05:00
|
|
|
type worker struct {
|
|
|
|
phony.Inbox
|
|
|
|
sync.Mutex
|
|
|
|
r *Inputer
|
|
|
|
roomID string
|
|
|
|
subscription *nats.Subscription
|
2020-09-03 09:22:16 -05:00
|
|
|
}
|
|
|
|
|
2022-03-23 05:20:18 -05:00
|
|
|
func (r *Inputer) startWorkerForRoom(roomID string) {
|
|
|
|
v, loaded := r.workers.LoadOrStore(roomID, &worker{
|
|
|
|
r: r,
|
|
|
|
roomID: roomID,
|
|
|
|
})
|
|
|
|
w := v.(*worker)
|
|
|
|
w.Lock()
|
|
|
|
defer w.Unlock()
|
|
|
|
if !loaded || w.subscription == nil {
|
2023-12-19 01:25:47 -06:00
|
|
|
streamName := r.Cfg.Matrix.JetStream.Prefixed(jetstream.InputRoomEvent)
|
2022-03-23 05:20:18 -05:00
|
|
|
consumer := r.Cfg.Matrix.JetStream.Prefixed("RoomInput" + jetstream.Tokenise(w.roomID))
|
|
|
|
subject := r.Cfg.Matrix.JetStream.Prefixed(jetstream.InputRoomEventSubj(w.roomID))
|
2022-01-27 08:29:14 -06:00
|
|
|
|
2023-12-19 01:25:47 -06:00
|
|
|
logger := logrus.WithFields(logrus.Fields{
|
|
|
|
"stream_name": streamName,
|
|
|
|
"consumer": consumer,
|
|
|
|
})
|
2022-03-23 05:20:18 -05:00
|
|
|
// Create the consumer. We do this as a specific step rather than
|
|
|
|
// letting PullSubscribe create it for us because we need the consumer
|
|
|
|
// to outlive the subscription. If we do it this way, we can Bind in the
|
|
|
|
// next step, and when we Unsubscribe, the consumer continues to live. If
|
|
|
|
// we leave PullSubscribe to create the durable consumer, Unsubscribe will
|
|
|
|
// delete it because it thinks it "owns" it, which in turn breaks the
|
|
|
|
// interest-based retention storage policy.
|
|
|
|
// If the durable consumer already exists, this is effectively a no-op.
|
|
|
|
// Another interesting tid-bit here: the ACK policy is set to "all" so that
|
|
|
|
// if we acknowledge a message, we also acknowledge everything that comes
|
|
|
|
// before it. This is necessary because otherwise our consumer will never
|
|
|
|
// acknowledge things we filtered out for other subjects and therefore they
|
|
|
|
// will linger around forever.
|
2023-12-19 01:25:47 -06:00
|
|
|
|
|
|
|
info, err := w.r.JetStream.ConsumerInfo(streamName, consumer)
|
|
|
|
if err != nil && !errors.Is(err, nats.ErrConsumerNotFound) {
|
|
|
|
// log and return, we will retry anyway
|
|
|
|
logger.WithError(err).Errorf("failed to get consumer info")
|
2022-03-23 05:20:18 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-12-19 01:25:47 -06:00
|
|
|
consumerConfig := &nats.ConsumerConfig{
|
|
|
|
Durable: consumer,
|
|
|
|
AckPolicy: nats.AckExplicitPolicy,
|
|
|
|
DeliverPolicy: nats.DeliverAllPolicy,
|
|
|
|
FilterSubject: subject,
|
|
|
|
AckWait: MaximumMissingProcessingTime + (time.Second * 10),
|
|
|
|
InactiveThreshold: inactiveThreshold,
|
|
|
|
}
|
|
|
|
|
|
|
|
// The consumer already exists, try to update if necessary.
|
|
|
|
if info != nil {
|
|
|
|
// Not using reflect.DeepEqual here, since consumerConfig does not explicitly set
|
|
|
|
// e.g. the consumerName, which is added by NATS later. So this would result
|
|
|
|
// in constantly updating/recreating the consumer.
|
|
|
|
switch {
|
|
|
|
case info.Config.AckWait.Nanoseconds() != consumerConfig.AckWait.Nanoseconds():
|
|
|
|
// Initially we had a AckWait of 2m 10s, now we have 5m 10s, so we need to update
|
|
|
|
// existing consumers.
|
|
|
|
fallthrough
|
|
|
|
case info.Config.AckPolicy != consumerConfig.AckPolicy:
|
|
|
|
// We've changed the AckPolicy from AckAll to AckExplicit, this needs a
|
|
|
|
// recreation of the consumer. (Note: Only a few changes actually need a recreat)
|
|
|
|
logger.Warn("Consumer already exists, trying to update it.")
|
|
|
|
// Try updating the consumer first
|
|
|
|
if _, err = w.r.JetStream.UpdateConsumer(streamName, consumerConfig); err != nil {
|
|
|
|
// We failed to update the consumer, recreate it
|
|
|
|
logger.WithError(err).Warn("Unable to update consumer, recreating...")
|
|
|
|
if err = w.r.JetStream.DeleteConsumer(streamName, consumer); err != nil {
|
|
|
|
logger.WithError(err).Fatal("Unable to delete consumer")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Set info to nil, so it can be recreated with the correct config.
|
|
|
|
info = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if info == nil {
|
|
|
|
// Create the consumer with the correct config
|
|
|
|
if _, err = w.r.JetStream.AddConsumer(
|
|
|
|
r.Cfg.Matrix.JetStream.Prefixed(jetstream.InputRoomEvent),
|
|
|
|
consumerConfig,
|
|
|
|
); err != nil {
|
|
|
|
logger.WithError(err).Errorf("Failed to create consumer for room %q", w.roomID)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-23 05:20:18 -05:00
|
|
|
// Bind to our durable consumer. We want to receive all messages waiting
|
|
|
|
// for this subject and we want to manually acknowledge them, so that we
|
|
|
|
// can ensure they are only cleaned up when we are done processing them.
|
|
|
|
sub, err := w.r.JetStream.PullSubscribe(
|
|
|
|
subject, consumer,
|
|
|
|
nats.ManualAck(),
|
|
|
|
nats.DeliverAll(),
|
|
|
|
nats.AckWait(MaximumMissingProcessingTime+(time.Second*10)),
|
|
|
|
nats.Bind(r.InputRoomEventTopic, consumer),
|
2022-10-14 09:14:29 -05:00
|
|
|
nats.InactiveThreshold(inactiveThreshold),
|
2022-03-23 05:20:18 -05:00
|
|
|
)
|
|
|
|
if err != nil {
|
2023-12-19 01:25:47 -06:00
|
|
|
logger.WithError(err).Errorf("Failed to subscribe to stream for room %q", w.roomID)
|
2022-03-23 05:20:18 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Go and start pulling messages off the queue.
|
|
|
|
w.subscription = sub
|
|
|
|
w.Act(nil, w._next)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start creates an ephemeral non-durable consumer on the roomserver
|
|
|
|
// input topic. It is configured to deliver us headers only because we
|
|
|
|
// don't actually care about the contents of the message at this point,
|
|
|
|
// we only care about the `room_id` field. Once a message arrives, we
|
|
|
|
// will look to see if we have a worker for that room which has its
|
|
|
|
// own consumer. If we don't, we'll start one.
|
2022-01-05 11:44:49 -06:00
|
|
|
func (r *Inputer) Start() error {
|
2023-07-05 05:34:53 -05:00
|
|
|
if r.EnableMetrics {
|
2022-07-05 07:50:56 -05:00
|
|
|
prometheus.MustRegister(roomserverInputBackpressure, processRoomEventDuration)
|
|
|
|
}
|
2022-01-05 11:44:49 -06:00
|
|
|
_, err := r.JetStream.Subscribe(
|
2022-03-23 05:20:18 -05:00
|
|
|
"", // This is blank because we specified it in BindStream.
|
|
|
|
func(m *nats.Msg) {
|
|
|
|
roomID := m.Header.Get(jetstream.RoomID)
|
|
|
|
r.startWorkerForRoom(roomID)
|
2022-09-14 03:55:50 -05:00
|
|
|
_ = m.Ack()
|
2022-01-05 11:44:49 -06:00
|
|
|
},
|
2022-03-23 05:20:18 -05:00
|
|
|
nats.HeadersOnly(),
|
2022-01-27 08:29:14 -06:00
|
|
|
nats.DeliverAll(),
|
2022-09-14 03:55:50 -05:00
|
|
|
nats.AckExplicit(),
|
|
|
|
nats.ReplayInstant(),
|
2022-03-23 05:20:18 -05:00
|
|
|
nats.BindStream(r.InputRoomEventTopic),
|
2022-01-05 11:44:49 -06:00
|
|
|
)
|
2022-10-14 09:14:29 -05:00
|
|
|
|
|
|
|
// Make sure that the room consumers have the right config.
|
|
|
|
stream := r.Cfg.Matrix.JetStream.Prefixed(jetstream.InputRoomEvent)
|
|
|
|
for consumer := range r.JetStream.Consumers(stream) {
|
|
|
|
switch {
|
|
|
|
case consumer.Config.Durable == "":
|
|
|
|
continue // Ignore ephemeral consumers
|
|
|
|
case consumer.Config.InactiveThreshold != inactiveThreshold:
|
|
|
|
consumer.Config.InactiveThreshold = inactiveThreshold
|
|
|
|
if _, cerr := r.JetStream.UpdateConsumer(stream, &consumer.Config); cerr != nil {
|
|
|
|
logrus.WithError(cerr).Warnf("Failed to update inactive threshold on consumer %q", consumer.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-05 11:44:49 -06:00
|
|
|
return err
|
2020-09-03 09:22:16 -05:00
|
|
|
}
|
|
|
|
|
2022-03-23 05:20:18 -05:00
|
|
|
// _next is called by the worker for the room. It must only be called
|
|
|
|
// by the actor embedded into the worker.
|
|
|
|
func (w *worker) _next() {
|
|
|
|
// Look up what the next event is that's waiting to be processed.
|
|
|
|
ctx, cancel := context.WithTimeout(w.r.ProcessContext.Context(), time.Minute)
|
|
|
|
defer cancel()
|
2022-09-05 11:25:11 -05:00
|
|
|
if scope := sentry.CurrentHub().Scope(); scope != nil {
|
|
|
|
scope.SetTag("room_id", w.roomID)
|
|
|
|
}
|
2022-03-23 05:20:18 -05:00
|
|
|
msgs, err := w.subscription.Fetch(1, nats.Context(ctx))
|
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
// Make sure that once we're done here, we queue up another call
|
|
|
|
// to _next in the inbox.
|
|
|
|
defer w.Act(nil, w._next)
|
|
|
|
|
|
|
|
// If no error was reported, but we didn't get exactly one message,
|
|
|
|
// then skip over this and try again on the next iteration.
|
|
|
|
if len(msgs) != 1 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-05-05 07:29:39 -05:00
|
|
|
case context.DeadlineExceeded, context.Canceled:
|
2022-03-23 05:20:18 -05:00
|
|
|
// The context exceeded, so we've been waiting for more than a
|
|
|
|
// minute for activity in this room. At this point we will shut
|
|
|
|
// down the subscriber to free up resources. It'll get started
|
|
|
|
// again if new activity happens.
|
|
|
|
if err = w.subscription.Unsubscribe(); err != nil {
|
|
|
|
logrus.WithError(err).Errorf("Failed to unsubscribe to stream for room %q", w.roomID)
|
|
|
|
}
|
|
|
|
w.Lock()
|
|
|
|
w.subscription = nil
|
|
|
|
w.Unlock()
|
|
|
|
return
|
|
|
|
|
|
|
|
default:
|
|
|
|
// Something went wrong while trying to fetch the next event
|
|
|
|
// from the queue. In which case, we'll shut down the subscriber
|
|
|
|
// and wait to be notified about new room activity again. Maybe
|
|
|
|
// the problem will be corrected by then.
|
|
|
|
logrus.WithError(err).Errorf("Failed to get next stream message for room %q", w.roomID)
|
|
|
|
if err = w.subscription.Unsubscribe(); err != nil {
|
|
|
|
logrus.WithError(err).Errorf("Failed to unsubscribe to stream for room %q", w.roomID)
|
|
|
|
}
|
|
|
|
w.Lock()
|
|
|
|
w.subscription = nil
|
|
|
|
w.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-12-19 01:25:47 -06:00
|
|
|
// Since we either Ack() or Term() the message at this point, we can defer decrementing the room backpressure
|
|
|
|
defer roomserverInputBackpressure.With(prometheus.Labels{"room_id": w.roomID}).Dec()
|
|
|
|
|
2022-03-23 05:20:18 -05:00
|
|
|
// Try to unmarshal the input room event. If the JSON unmarshalling
|
|
|
|
// fails then we'll terminate the message — this notifies NATS that
|
|
|
|
// we are done with the message and never want to see it again.
|
|
|
|
msg := msgs[0]
|
|
|
|
var inputRoomEvent api.InputRoomEvent
|
|
|
|
if err = json.Unmarshal(msg.Data, &inputRoomEvent); err != nil {
|
2023-12-19 01:25:47 -06:00
|
|
|
// using AckWait here makes the call synchronous; 5 seconds is the default value used by NATS
|
|
|
|
_ = msg.Term(nats.AckWait(time.Second * 5))
|
2022-03-23 05:20:18 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-09-05 11:25:11 -05:00
|
|
|
if scope := sentry.CurrentHub().Scope(); scope != nil {
|
|
|
|
scope.SetTag("event_id", inputRoomEvent.Event.EventID())
|
|
|
|
}
|
2022-03-23 05:20:18 -05:00
|
|
|
|
|
|
|
// Process the room event. If something goes wrong then we'll tell
|
|
|
|
// NATS to terminate the message. We'll store the error result as
|
|
|
|
// a string, because we might want to return that to the caller if
|
|
|
|
// it was a synchronous request.
|
|
|
|
var errString string
|
2022-11-15 09:05:23 -06:00
|
|
|
if err = w.r.processRoomEvent(
|
|
|
|
w.r.ProcessContext.Context(),
|
2023-04-19 09:50:33 -05:00
|
|
|
spec.ServerName(msg.Header.Get("virtual_host")),
|
2022-11-15 09:05:23 -06:00
|
|
|
&inputRoomEvent,
|
|
|
|
); err != nil {
|
2022-08-25 04:57:27 -05:00
|
|
|
switch err.(type) {
|
|
|
|
case types.RejectedError:
|
|
|
|
// Don't send events that were rejected to Sentry
|
|
|
|
logrus.WithError(err).WithFields(logrus.Fields{
|
|
|
|
"room_id": w.roomID,
|
|
|
|
"event_id": inputRoomEvent.Event.EventID(),
|
|
|
|
"type": inputRoomEvent.Event.Type(),
|
|
|
|
}).Warn("Roomserver rejected event")
|
|
|
|
default:
|
|
|
|
if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) {
|
|
|
|
sentry.CaptureException(err)
|
|
|
|
}
|
|
|
|
logrus.WithError(err).WithFields(logrus.Fields{
|
|
|
|
"room_id": w.roomID,
|
|
|
|
"event_id": inputRoomEvent.Event.EventID(),
|
|
|
|
"type": inputRoomEvent.Event.Type(),
|
|
|
|
}).Warn("Roomserver failed to process event")
|
2022-03-23 05:20:18 -05:00
|
|
|
}
|
2023-12-19 01:25:47 -06:00
|
|
|
// Even though we failed to process this message (e.g. due to Dendrite restarting and receiving a context canceled),
|
|
|
|
// the message may already have been queued for redelivery or will be, so this makes sure that we still reprocess the msg
|
|
|
|
// after restarting. We only Ack if the context was not yet canceled.
|
|
|
|
if w.r.ProcessContext.Context().Err() == nil {
|
|
|
|
_ = msg.AckSync()
|
|
|
|
}
|
2022-03-23 05:20:18 -05:00
|
|
|
errString = err.Error()
|
|
|
|
} else {
|
2023-12-19 01:25:47 -06:00
|
|
|
_ = msg.AckSync()
|
2022-03-23 05:20:18 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// If it was a synchronous input request then the "sync" field
|
|
|
|
// will be present in the message. That means that someone is
|
|
|
|
// waiting for a response. The temporary inbox name is present in
|
|
|
|
// that field, so send back the error string (if any). If there
|
|
|
|
// was no error then we'll return a blank message, which means
|
|
|
|
// that everything was OK.
|
|
|
|
if replyTo := msg.Header.Get("sync"); replyTo != "" {
|
|
|
|
if err = w.r.NATSClient.Publish(replyTo, []byte(errString)); err != nil {
|
|
|
|
logrus.WithError(err).WithFields(logrus.Fields{
|
|
|
|
"room_id": w.roomID,
|
|
|
|
"event_id": inputRoomEvent.Event.EventID(),
|
|
|
|
"type": inputRoomEvent.Event.Type(),
|
|
|
|
}).Warn("Roomserver failed to respond for sync event")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// queueInputRoomEvents queues events into the roomserver input
|
|
|
|
// stream in NATS.
|
|
|
|
func (r *Inputer) queueInputRoomEvents(
|
2022-01-05 11:44:49 -06:00
|
|
|
ctx context.Context,
|
|
|
|
request *api.InputRoomEventsRequest,
|
2022-03-23 05:20:18 -05:00
|
|
|
) (replySub *nats.Subscription, err error) {
|
|
|
|
// If the request is synchronous then we need to create a
|
|
|
|
// temporary inbox to wait for responses on, and then create
|
|
|
|
// a subscription to it. If it's asynchronous then we won't
|
|
|
|
// bother, so these values will remain empty.
|
2022-03-16 09:21:11 -05:00
|
|
|
var replyTo string
|
|
|
|
if !request.Asynchronous {
|
|
|
|
replyTo = nats.NewInbox()
|
|
|
|
replySub, err = r.NATSClient.SubscribeSync(replyTo)
|
|
|
|
if err != nil {
|
2022-03-23 05:20:18 -05:00
|
|
|
return nil, fmt.Errorf("r.NATSClient.SubscribeSync: %w", err)
|
|
|
|
}
|
|
|
|
if replySub == nil {
|
|
|
|
// This shouldn't ever happen, but it doesn't hurt to check
|
|
|
|
// because we can potentially avoid a nil pointer panic later
|
|
|
|
// if it did for some reason.
|
|
|
|
return nil, fmt.Errorf("expected a subscription to the temporary inbox")
|
2022-01-05 11:44:49 -06:00
|
|
|
}
|
2022-03-16 09:21:11 -05:00
|
|
|
}
|
|
|
|
|
2022-03-23 05:20:18 -05:00
|
|
|
// For each event, marshal the input room event and then
|
|
|
|
// send it into the input queue.
|
2022-03-16 09:21:11 -05:00
|
|
|
for _, e := range request.InputRoomEvents {
|
2023-09-15 09:39:06 -05:00
|
|
|
roomID := e.Event.RoomID().String()
|
2022-03-23 05:20:18 -05:00
|
|
|
subj := r.Cfg.Matrix.JetStream.Prefixed(jetstream.InputRoomEventSubj(roomID))
|
2022-03-16 09:21:11 -05:00
|
|
|
msg := &nats.Msg{
|
2022-03-23 05:20:18 -05:00
|
|
|
Subject: subj,
|
2022-03-16 09:21:11 -05:00
|
|
|
Header: nats.Header{},
|
2022-01-05 11:44:49 -06:00
|
|
|
}
|
2022-03-16 09:21:11 -05:00
|
|
|
msg.Header.Set("room_id", roomID)
|
|
|
|
if replyTo != "" {
|
|
|
|
msg.Header.Set("sync", replyTo)
|
|
|
|
}
|
2022-11-15 09:05:23 -06:00
|
|
|
msg.Header.Set("virtual_host", string(request.VirtualHost))
|
2022-03-16 09:21:11 -05:00
|
|
|
msg.Data, err = json.Marshal(e)
|
|
|
|
if err != nil {
|
2022-03-23 05:20:18 -05:00
|
|
|
return nil, fmt.Errorf("json.Marshal: %w", err)
|
2022-03-16 09:21:11 -05:00
|
|
|
}
|
2022-03-23 05:20:18 -05:00
|
|
|
if _, err = r.JetStream.PublishMsg(msg, nats.Context(ctx)); err != nil {
|
2022-03-16 09:21:11 -05:00
|
|
|
logrus.WithError(err).WithFields(logrus.Fields{
|
|
|
|
"room_id": roomID,
|
|
|
|
"event_id": e.Event.EventID(),
|
2022-03-23 05:20:18 -05:00
|
|
|
"subj": subj,
|
2022-03-16 09:21:11 -05:00
|
|
|
}).Error("Roomserver failed to queue async event")
|
2022-03-23 05:20:18 -05:00
|
|
|
return nil, fmt.Errorf("r.JetStream.PublishMsg: %w", err)
|
2022-03-16 09:21:11 -05:00
|
|
|
}
|
2023-12-19 01:25:47 -06:00
|
|
|
|
|
|
|
// Now that the event is queued, increment the room backpressure
|
|
|
|
roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Inc()
|
2022-03-16 09:21:11 -05:00
|
|
|
}
|
2022-03-23 05:20:18 -05:00
|
|
|
return
|
|
|
|
}
|
2022-03-16 09:21:11 -05:00
|
|
|
|
2022-03-23 05:20:18 -05:00
|
|
|
// InputRoomEvents implements api.RoomserverInternalAPI
|
|
|
|
func (r *Inputer) InputRoomEvents(
|
|
|
|
ctx context.Context,
|
|
|
|
request *api.InputRoomEventsRequest,
|
|
|
|
response *api.InputRoomEventsResponse,
|
2023-05-09 17:46:49 -05:00
|
|
|
) {
|
2022-03-23 05:20:18 -05:00
|
|
|
// Queue up the event into the roomserver.
|
|
|
|
replySub, err := r.queueInputRoomEvents(ctx, request)
|
|
|
|
if err != nil {
|
|
|
|
response.ErrMsg = err.Error()
|
2023-05-09 17:46:49 -05:00
|
|
|
return
|
2022-03-16 09:21:11 -05:00
|
|
|
}
|
|
|
|
|
2022-03-23 05:20:18 -05:00
|
|
|
// If we aren't waiting for synchronous responses then we can
|
|
|
|
// give up here, there is nothing further to do.
|
|
|
|
if replySub == nil {
|
2023-05-09 17:46:49 -05:00
|
|
|
return
|
2022-03-23 05:20:18 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we'll want to sit and wait for the responses
|
|
|
|
// from the roomserver. There will be one response for every
|
|
|
|
// input we submitted. The last error value we receive will
|
|
|
|
// be the one returned as the error string.
|
2022-03-16 09:21:11 -05:00
|
|
|
defer replySub.Drain() // nolint:errcheck
|
|
|
|
for i := 0; i < len(request.InputRoomEvents); i++ {
|
|
|
|
msg, err := replySub.NextMsgWithContext(ctx)
|
|
|
|
if err != nil {
|
|
|
|
response.ErrMsg = err.Error()
|
2023-05-09 17:46:49 -05:00
|
|
|
return
|
2022-03-16 09:21:11 -05:00
|
|
|
}
|
|
|
|
if len(msg.Data) > 0 {
|
|
|
|
response.ErrMsg = string(msg.Data)
|
2020-09-03 09:22:16 -05:00
|
|
|
}
|
|
|
|
}
|
2020-09-02 11:13:15 -05:00
|
|
|
}
|
|
|
|
|
2021-07-02 03:48:55 -05:00
|
|
|
var roomserverInputBackpressure = prometheus.NewGaugeVec(
|
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: "dendrite",
|
|
|
|
Subsystem: "roomserver",
|
|
|
|
Name: "input_backpressure",
|
|
|
|
Help: "How many events are queued for input for a given room",
|
|
|
|
},
|
|
|
|
[]string{"room_id"},
|
|
|
|
)
|