Configurable backoff

This commit is contained in:
Neil Alexander 2020-09-02 18:03:21 +01:00
parent c83129a366
commit dd781f666d
No known key found for this signature in database
GPG key ID: A02A2019A2BB0944
4 changed files with 66 additions and 13 deletions

View file

@ -6,23 +6,28 @@ import (
"time"
"github.com/matrix-org/dendrite/clientapi/jsonerror"
"github.com/matrix-org/dendrite/internal/config"
"github.com/matrix-org/util"
)
type rateLimits struct {
limits map[string]chan struct{}
limitsMutex sync.RWMutex
maxRequests int
timeInterval time.Duration
limits map[string]chan struct{}
limitsMutex sync.RWMutex
enabled bool
requestThreshold int64
cooloffDuration time.Duration
}
func newRateLimits() *rateLimits {
func newRateLimits(cfg *config.RateLimiting) *rateLimits {
l := &rateLimits{
limits: make(map[string]chan struct{}),
maxRequests: 10,
timeInterval: 250 * time.Millisecond,
limits: make(map[string]chan struct{}),
enabled: cfg.Enabled,
requestThreshold: cfg.Threshold,
cooloffDuration: time.Duration(cfg.Cooloff) * time.Millisecond,
}
if l.enabled {
go l.clean()
}
go l.clean()
return l
}
@ -45,6 +50,15 @@ func (l *rateLimits) clean() {
}
func (l *rateLimits) rateLimit(req *http.Request) *util.JSONResponse {
// If rate limiting is disabled then do nothing.
if !l.enabled {
return nil
}
// Lock the map long enough to check for rate limiting. We hold it
// for longer here than we really need to but it makes sure that we
// also don't conflict with the cleaner goroutine which might clean
// up a channel after we have retrieved it otherwise.
l.limitsMutex.RLock()
defer l.limitsMutex.RUnlock()
@ -59,7 +73,7 @@ func (l *rateLimits) rateLimit(req *http.Request) *util.JSONResponse {
// let's create one.
rateLimit, ok := l.limits[caller]
if !ok {
l.limits[caller] = make(chan struct{}, l.maxRequests)
l.limits[caller] = make(chan struct{}, l.requestThreshold)
rateLimit = l.limits[caller]
}
@ -71,14 +85,14 @@ func (l *rateLimits) rateLimit(req *http.Request) *util.JSONResponse {
// We hit the rate limit. Tell the client to back off.
return &util.JSONResponse{
Code: http.StatusTooManyRequests,
JSON: jsonerror.LimitExceeded("You are sending too many requests too quickly!", l.timeInterval.Milliseconds()),
JSON: jsonerror.LimitExceeded("You are sending too many requests too quickly!", l.cooloffDuration.Milliseconds()),
}
}
// After the time interval, drain a resource from the rate limiting
// channel. This will free up space in the channel for new requests.
go func() {
<-time.After(l.timeInterval)
<-time.After(l.cooloffDuration)
<-rateLimit
}()
return nil

View file

@ -60,7 +60,7 @@ func Setup(
keyAPI keyserverAPI.KeyInternalAPI,
extRoomsProvider api.ExtraPublicRoomsProvider,
) {
rateLimits := newRateLimits()
rateLimits := newRateLimits(&cfg.RateLimiting)
userInteractiveAuth := auth.NewUserInteractive(accountDB.GetAccountByPassword, cfg)
publicAPIMux.Handle("/versions",

View file

@ -133,6 +133,14 @@ client_api:
turn_username: ""
turn_password: ""
# Settings for rate-limited endpoints. Rate limiting will kick in after the
# threshold number of "slots" have been taken by requests from a specific
# host. Each "slot" will be released after the cooloff time in milliseconds.
rate_limiting:
enabled: true
threshold: 5
cooloff_ms: 500
# Configuration for the Current State Server.
current_state_server:
internal_api:

View file

@ -34,6 +34,9 @@ type ClientAPI struct {
// TURN options
TURN TURN `yaml:"turn"`
// Rate-limiting options
RateLimiting RateLimiting `yaml:"rate_limiting"`
}
func (c *ClientAPI) Defaults() {
@ -47,6 +50,7 @@ func (c *ClientAPI) Defaults() {
c.RecaptchaBypassSecret = ""
c.RecaptchaSiteVerifyAPI = ""
c.RegistrationDisabled = false
c.RateLimiting.Defaults()
}
func (c *ClientAPI) Verify(configErrs *ConfigErrors, isMonolith bool) {
@ -61,6 +65,7 @@ func (c *ClientAPI) Verify(configErrs *ConfigErrors, isMonolith bool) {
checkNotEmpty(configErrs, "client_api.recaptcha_siteverify_api", string(c.RecaptchaSiteVerifyAPI))
}
c.TURN.Verify(configErrs)
c.RateLimiting.Verify(configErrs)
}
type TURN struct {
@ -90,3 +95,29 @@ func (c *TURN) Verify(configErrs *ConfigErrors) {
}
}
}
type RateLimiting struct {
// Is rate limiting enabled or disabled?
Enabled bool `yaml:"enabled"`
// How many "slots" a user can occupy sending requests to a rate-limited
// endpoint before we apply rate-limiting
Threshold int64 `yaml:"threshold"`
// The cooloff period in milliseconds after a request before the "slot"
// is freed again
Cooloff int64 `yaml:"cooloff_ms"`
}
func (r *RateLimiting) Verify(configErrs *ConfigErrors) {
if r.Enabled {
checkPositive(configErrs, "client_api.rate_limiting.threshold", r.Threshold)
checkPositive(configErrs, "client_api.rate_limiting.cooloff_ms", r.Cooloff)
}
}
func (r *RateLimiting) Defaults() {
r.Enabled = true
r.Threshold = 5
r.Cooloff = 500
}