mirror of
https://github.com/matrix-org/dendrite.git
synced 2024-11-22 14:21:55 -06:00
Use a FIFO queue instead of a channel to reduce backpressure
This commit is contained in:
parent
a6f7e83596
commit
b63f699f1b
|
@ -38,8 +38,7 @@ type Inputer struct {
|
|||
ServerName gomatrixserverlib.ServerName
|
||||
ACLs *acls.ServerACLs
|
||||
OutputRoomEventTopic string
|
||||
|
||||
workers sync.Map // room ID -> *inputWorker
|
||||
workers sync.Map // room ID -> *inputWorker
|
||||
}
|
||||
|
||||
type inputTask struct {
|
||||
|
@ -52,7 +51,7 @@ type inputTask struct {
|
|||
type inputWorker struct {
|
||||
r *Inputer
|
||||
running atomic.Bool
|
||||
input chan *inputTask
|
||||
input *fifoQueue
|
||||
}
|
||||
|
||||
// Guarded by a CAS on w.running
|
||||
|
@ -60,7 +59,11 @@ func (w *inputWorker) start() {
|
|||
defer w.running.Store(false)
|
||||
for {
|
||||
select {
|
||||
case task := <-w.input:
|
||||
case <-w.input.wait():
|
||||
task, ok := w.input.pop()
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
hooks.Run(hooks.KindNewEventReceived, task.event.Event)
|
||||
_, task.err = w.r.processRoomEvent(task.ctx, task.event)
|
||||
if task.err == nil {
|
||||
|
@ -143,7 +146,7 @@ func (r *Inputer) InputRoomEvents(
|
|||
// room - the channel will be quite small as it's just pointer types.
|
||||
w, _ := r.workers.LoadOrStore(roomID, &inputWorker{
|
||||
r: r,
|
||||
input: make(chan *inputTask, 32),
|
||||
input: newFIFOQueue(),
|
||||
})
|
||||
worker := w.(*inputWorker)
|
||||
|
||||
|
@ -160,7 +163,7 @@ func (r *Inputer) InputRoomEvents(
|
|||
if worker.running.CAS(false, true) {
|
||||
go worker.start()
|
||||
}
|
||||
worker.input <- tasks[i]
|
||||
worker.input.push(tasks[i])
|
||||
}
|
||||
|
||||
// Wait for all of the workers to return results about our tasks.
|
||||
|
|
60
roomserver/internal/input/input_fifo.go
Normal file
60
roomserver/internal/input/input_fifo.go
Normal file
|
@ -0,0 +1,60 @@
|
|||
package input
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type fifoQueue struct {
|
||||
frames []*inputTask
|
||||
count int
|
||||
mutex sync.Mutex
|
||||
notifs chan struct{}
|
||||
}
|
||||
|
||||
func newFIFOQueue() *fifoQueue {
|
||||
q := &fifoQueue{
|
||||
notifs: make(chan struct{}),
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *fifoQueue) push(frame *inputTask) bool {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
q.frames = append(q.frames, frame)
|
||||
q.count++
|
||||
select {
|
||||
case q.notifs <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (q *fifoQueue) pop() (*inputTask, bool) {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
if q.count == 0 {
|
||||
return nil, false
|
||||
}
|
||||
frame := q.frames[0]
|
||||
q.frames[0] = nil
|
||||
q.frames = q.frames[1:]
|
||||
q.count--
|
||||
if q.count == 0 {
|
||||
// Force a GC of the underlying array, since it might have
|
||||
// grown significantly if the queue was hammered for some reason
|
||||
q.frames = nil
|
||||
}
|
||||
return frame, true
|
||||
}
|
||||
|
||||
func (q *fifoQueue) wait() <-chan struct{} {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
if q.count > 0 {
|
||||
ch := make(chan struct{})
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
return q.notifs
|
||||
}
|
Loading…
Reference in a new issue