mirror of
https://github.com/matrix-org/dendrite.git
synced 2025-12-29 01:33:10 -06:00
Removed some meaningless type conversions and fixed some comments to go style
This commit is contained in:
parent
e08942fb00
commit
5d61c057cd
|
|
@ -29,7 +29,7 @@ import (
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/crypto/ed25519"
|
"golang.org/x/crypto/ed25519"
|
||||||
yaml "gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
jaegerconfig "github.com/uber/jaeger-client-go/config"
|
jaegerconfig "github.com/uber/jaeger-client-go/config"
|
||||||
jaegermetrics "github.com/uber/jaeger-lib/metrics"
|
jaegermetrics "github.com/uber/jaeger-lib/metrics"
|
||||||
|
|
|
||||||
|
|
@ -62,9 +62,9 @@ func (c *ClientAPI) Verify(configErrs *ConfigErrors, isMonolith bool) {
|
||||||
checkURL(configErrs, "client_api.external_api.listen", string(c.ExternalAPI.Listen))
|
checkURL(configErrs, "client_api.external_api.listen", string(c.ExternalAPI.Listen))
|
||||||
}
|
}
|
||||||
if c.RecaptchaEnabled {
|
if c.RecaptchaEnabled {
|
||||||
checkNotEmpty(configErrs, "client_api.recaptcha_public_key", string(c.RecaptchaPublicKey))
|
checkNotEmpty(configErrs, "client_api.recaptcha_public_key", c.RecaptchaPublicKey)
|
||||||
checkNotEmpty(configErrs, "client_api.recaptcha_private_key", string(c.RecaptchaPrivateKey))
|
checkNotEmpty(configErrs, "client_api.recaptcha_private_key", c.RecaptchaPrivateKey)
|
||||||
checkNotEmpty(configErrs, "client_api.recaptcha_siteverify_api", string(c.RecaptchaSiteVerifyAPI))
|
checkNotEmpty(configErrs, "client_api.recaptcha_siteverify_api", c.RecaptchaSiteVerifyAPI)
|
||||||
}
|
}
|
||||||
c.TURN.Verify(configErrs)
|
c.TURN.Verify(configErrs)
|
||||||
c.RateLimiting.Verify(configErrs)
|
c.RateLimiting.Verify(configErrs)
|
||||||
|
|
|
||||||
|
|
@ -50,7 +50,7 @@ func NewOutputClientDataConsumer(
|
||||||
consumer := internal.ContinualConsumer{
|
consumer := internal.ContinualConsumer{
|
||||||
Process: process,
|
Process: process,
|
||||||
ComponentName: "syncapi/clientapi",
|
ComponentName: "syncapi/clientapi",
|
||||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputClientData)),
|
Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputClientData),
|
||||||
Consumer: kafkaConsumer,
|
Consumer: kafkaConsumer,
|
||||||
PartitionStore: store,
|
PartitionStore: store,
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -117,7 +117,7 @@ func OnIncomingMessagesRequest(
|
||||||
}
|
}
|
||||||
// A boolean is easier to handle in this case, especially since dir is sure
|
// A boolean is easier to handle in this case, especially since dir is sure
|
||||||
// to have one of the two accepted values (so dir == "f" <=> !backwardOrdering).
|
// to have one of the two accepted values (so dir == "f" <=> !backwardOrdering).
|
||||||
backwardOrdering := (dir == "b")
|
backwardOrdering := dir == "b"
|
||||||
|
|
||||||
// Pagination tokens. To is optional, and its default value depends on the
|
// Pagination tokens. To is optional, and its default value depends on the
|
||||||
// direction ("b" or "f").
|
// direction ("b" or "f").
|
||||||
|
|
@ -463,12 +463,12 @@ func (r *messagesReq) handleNonEmptyEventsSlice(streamEvents []types.StreamEvent
|
||||||
if r.wasToProvided {
|
if r.wasToProvided {
|
||||||
// The condition in the SQL query is a strict "greater than" so
|
// The condition in the SQL query is a strict "greater than" so
|
||||||
// we need to check against to-1.
|
// we need to check against to-1.
|
||||||
streamPos := types.StreamPosition(streamEvents[len(streamEvents)-1].StreamPosition)
|
streamPos := streamEvents[len(streamEvents)-1].StreamPosition
|
||||||
isSetLargeEnough = (r.to.PDUPosition-1 == streamPos)
|
isSetLargeEnough = r.to.PDUPosition-1 == streamPos
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
streamPos := types.StreamPosition(streamEvents[0].StreamPosition)
|
streamPos := streamEvents[0].StreamPosition
|
||||||
isSetLargeEnough = (r.from.PDUPosition-1 == streamPos)
|
isSetLargeEnough = r.from.PDUPosition-1 == streamPos
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -595,7 +595,7 @@ func (d *Database) fetchStateEvents(
|
||||||
if len(missingEvents) > 0 {
|
if len(missingEvents) > 0 {
|
||||||
// This happens when add_state_ids has an event ID which is not in the provided range.
|
// This happens when add_state_ids has an event ID which is not in the provided range.
|
||||||
// We need to explicitly fetch them.
|
// We need to explicitly fetch them.
|
||||||
allMissingEventIDs := []string{}
|
var allMissingEventIDs []string
|
||||||
for _, missingEvIDs := range missingEvents {
|
for _, missingEvIDs := range missingEvents {
|
||||||
allMissingEventIDs = append(allMissingEventIDs, missingEvIDs...)
|
allMissingEventIDs = append(allMissingEventIDs, missingEvIDs...)
|
||||||
}
|
}
|
||||||
|
|
@ -651,7 +651,7 @@ func (d *Database) fetchMissingStateEvents(
|
||||||
// this error again when we work out what it is and fix it, otherwise we
|
// this error again when we work out what it is and fix it, otherwise we
|
||||||
// just end up returning lots of 500s to the client and that breaks
|
// just end up returning lots of 500s to the client and that breaks
|
||||||
// pretty much everything, rather than just sending what we have.
|
// pretty much everything, rather than just sending what we have.
|
||||||
//return nil, fmt.Errorf("failed to map all event IDs to events: (got %d, wanted %d)", len(stateEvents), len(missing))
|
// return nil, fmt.Errorf("failed to map all event IDs to events: (got %d, wanted %d)", len(stateEvents), len(missing))
|
||||||
}
|
}
|
||||||
events = append(events, stateEvents...)
|
events = append(events, stateEvents...)
|
||||||
return events, nil
|
return events, nil
|
||||||
|
|
|
||||||
|
|
@ -65,7 +65,7 @@ func AddPublicRoutes(
|
||||||
requestPool := sync.NewRequestPool(syncDB, cfg, userAPI, keyAPI, rsAPI, streams, notifier)
|
requestPool := sync.NewRequestPool(syncDB, cfg, userAPI, keyAPI, rsAPI, streams, notifier)
|
||||||
|
|
||||||
keyChangeConsumer := consumers.NewOutputKeyChangeEventConsumer(
|
keyChangeConsumer := consumers.NewOutputKeyChangeEventConsumer(
|
||||||
process, cfg.Matrix.ServerName, string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)),
|
process, cfg.Matrix.ServerName, cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent),
|
||||||
consumer, keyAPI, rsAPI, syncDB, notifier, streams.DeviceListStreamProvider,
|
consumer, keyAPI, rsAPI, syncDB, notifier, streams.DeviceListStreamProvider,
|
||||||
)
|
)
|
||||||
if err = keyChangeConsumer.Start(); err != nil {
|
if err = keyChangeConsumer.Start(); err != nil {
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue