mirror of
https://github.com/matrix-org/dendrite.git
synced 2025-12-29 01:33:10 -06:00
Try refactoring /send concurrency
This commit is contained in:
parent
39afdcfdd3
commit
6f129ca0ee
|
|
@ -22,9 +22,7 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
eduserverAPI "github.com/matrix-org/dendrite/eduserver/api"
|
||||
federationAPI "github.com/matrix-org/dendrite/federationapi/api"
|
||||
|
|
@ -36,6 +34,7 @@ import (
|
|||
"github.com/matrix-org/util"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -191,11 +190,26 @@ type txnFederationClient interface {
|
|||
roomVersion gomatrixserverlib.RoomVersion) (res gomatrixserverlib.RespMissingEvents, err error)
|
||||
}
|
||||
|
||||
type inputTask struct {
|
||||
ctx context.Context
|
||||
t *txnReq
|
||||
event *gomatrixserverlib.Event
|
||||
wg *sync.WaitGroup
|
||||
err error // written back by worker, only safe to read when all tasks are done
|
||||
}
|
||||
|
||||
type inputWorker struct {
|
||||
running atomic.Bool
|
||||
input *fifoQueue
|
||||
}
|
||||
|
||||
var inputWorkers sync.Map // room ID -> *inputWorker
|
||||
|
||||
func (t *txnReq) processTransaction(ctx context.Context) (*gomatrixserverlib.RespSend, *util.JSONResponse) {
|
||||
results := make(map[string]gomatrixserverlib.PDUResult)
|
||||
var resultsMutex sync.Mutex
|
||||
//var resultsMutex sync.Mutex
|
||||
|
||||
pdus := []*gomatrixserverlib.HeaderedEvent{}
|
||||
var wg sync.WaitGroup
|
||||
for _, pdu := range t.PDUs {
|
||||
pduCountTotal.WithLabelValues("total").Inc()
|
||||
var header struct {
|
||||
|
|
@ -246,36 +260,55 @@ func (t *txnReq) processTransaction(ctx context.Context) (*gomatrixserverlib.Res
|
|||
}
|
||||
continue
|
||||
}
|
||||
pdus = append(pdus, event.Headered(verRes.RoomVersion))
|
||||
v, _ := inputWorkers.LoadOrStore(event.RoomID(), &inputWorker{
|
||||
input: newFIFOQueue(),
|
||||
})
|
||||
worker := v.(*inputWorker)
|
||||
if !worker.running.Load() {
|
||||
go worker.run()
|
||||
}
|
||||
wg.Add(1)
|
||||
worker.input.push(&inputTask{
|
||||
ctx: ctx,
|
||||
t: t,
|
||||
event: event,
|
||||
wg: &wg,
|
||||
})
|
||||
}
|
||||
|
||||
perRoom := map[string]chan *gomatrixserverlib.Event{}
|
||||
perCount := map[string]int{}
|
||||
for _, e := range pdus {
|
||||
perCount[e.RoomID()]++
|
||||
}
|
||||
for s, c := range perCount {
|
||||
perRoom[s] = make(chan *gomatrixserverlib.Event, c)
|
||||
}
|
||||
for _, e := range pdus {
|
||||
perRoom[e.RoomID()] <- e.Unwrap()
|
||||
}
|
||||
pdus = nil // nolint:ineffassign
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(perRoom) + 1)
|
||||
wg.Wait()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
t.processEDUs(ctx)
|
||||
}()
|
||||
|
||||
for _, q := range perRoom {
|
||||
go func(q chan *gomatrixserverlib.Event) {
|
||||
defer wg.Done()
|
||||
for e := range q {
|
||||
evStart := time.Now()
|
||||
if err := t.processEvent(ctx, e); err != nil {
|
||||
wg.Wait()
|
||||
|
||||
if c := len(results); c > 0 {
|
||||
util.GetLogger(ctx).Infof("Processed %d PDUs from transaction %q", c, t.TransactionID)
|
||||
}
|
||||
return &gomatrixserverlib.RespSend{PDUs: results}, nil
|
||||
}
|
||||
|
||||
func (t *inputWorker) run() {
|
||||
if !t.running.CAS(false, true) {
|
||||
return
|
||||
}
|
||||
defer t.running.Store(false)
|
||||
for {
|
||||
task, ok := t.input.pop()
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if task == nil {
|
||||
continue
|
||||
}
|
||||
//evStart := time.Now()
|
||||
task.err = task.t.processEvent(task.ctx, task.event)
|
||||
/*
|
||||
if task.err = task.t.processEvent(task.ctx, task.event); task.err != nil {
|
||||
err := task.err
|
||||
// If the error is due to the event itself being bad then we skip
|
||||
// it and move onto the next event. We report an error so that the
|
||||
// sender knows that we have skipped processing it.
|
||||
|
|
@ -295,7 +328,7 @@ func (t *txnReq) processTransaction(ctx context.Context) (*gomatrixserverlib.Res
|
|||
sentry.CaptureException(err)
|
||||
// Any other error should be the result of a temporary error in
|
||||
// our server so we should bail processing the transaction entirely.
|
||||
util.GetLogger(ctx).Warnf("Processing %s failed fatally: %s", e.EventID(), err)
|
||||
util.GetLogger(task.ctx).Warnf("Processing %s failed fatally: %s", e.EventID(), err)
|
||||
processEventSummary.WithLabelValues(t.work, MetricsOutcomeFatal).Observe(
|
||||
float64(time.Since(evStart).Nanoseconds()) / 1000.,
|
||||
)
|
||||
|
|
@ -329,21 +362,8 @@ func (t *txnReq) processTransaction(ctx context.Context) (*gomatrixserverlib.Res
|
|||
float64(time.Since(evStart).Nanoseconds()) / 1000.,
|
||||
)
|
||||
}
|
||||
*/
|
||||
}
|
||||
}(q)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
for k := range perRoom {
|
||||
close(perRoom[k])
|
||||
perRoom[k] = nil
|
||||
}
|
||||
|
||||
if c := len(results); c > 0 {
|
||||
util.GetLogger(ctx).Infof("Processed %d PDUs from transaction %q", c, t.TransactionID)
|
||||
}
|
||||
return &gomatrixserverlib.RespSend{PDUs: results}, nil
|
||||
}
|
||||
|
||||
// isProcessingErrorFatal returns true if the error is really bad and
|
||||
|
|
|
|||
64
federationapi/routing/send_fifo.go
Normal file
64
federationapi/routing/send_fifo.go
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
package routing
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type fifoQueue struct {
|
||||
tasks []*inputTask
|
||||
count int
|
||||
mutex sync.Mutex
|
||||
notifs chan struct{}
|
||||
}
|
||||
|
||||
func newFIFOQueue() *fifoQueue {
|
||||
q := &fifoQueue{
|
||||
notifs: make(chan struct{}, 1),
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *fifoQueue) push(frame *inputTask) {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
q.tasks = append(q.tasks, frame)
|
||||
q.count++
|
||||
select {
|
||||
case q.notifs <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// pop returns the first item of the queue, if there is one.
|
||||
// The second return value will indicate if a task was returned.
|
||||
// You must check this value, even after calling wait().
|
||||
func (q *fifoQueue) pop() (*inputTask, bool) {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
if q.count == 0 {
|
||||
return nil, false
|
||||
}
|
||||
frame := q.tasks[0]
|
||||
q.tasks[0] = nil
|
||||
q.tasks = q.tasks[1:]
|
||||
q.count--
|
||||
if q.count == 0 {
|
||||
// Force a GC of the underlying array, since it might have
|
||||
// grown significantly if the queue was hammered for some reason
|
||||
q.tasks = nil
|
||||
}
|
||||
return frame, true
|
||||
}
|
||||
|
||||
// wait returns a channel which can be used to detect when an
|
||||
// item is waiting in the queue.
|
||||
func (q *fifoQueue) wait() <-chan struct{} {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
if q.count > 0 && len(q.notifs) == 0 {
|
||||
ch := make(chan struct{})
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
return q.notifs
|
||||
}
|
||||
Loading…
Reference in a new issue