Tweak log levels of some federation logging

This commit is contained in:
Neil Alexander 2020-08-07 13:40:32 +01:00
parent 6ce7af8a3e
commit 9d1affaa9f
No known key found for this signature in database
GPG key ID: A02A2019A2BB0944
3 changed files with 9 additions and 7 deletions

View file

@ -218,7 +218,9 @@ func (t *txnReq) processTransaction() (*gomatrixserverlib.RespSend, *util.JSONRe
} }
t.processEDUs(t.EDUs) t.processEDUs(t.EDUs)
util.GetLogger(t.context).Infof("Processed %d PDUs from transaction %q", len(results), t.TransactionID) if c := len(results); c > 0 {
util.GetLogger(t.context).Infof("Processed %d PDUs from transaction %q", c, t.TransactionID)
}
return &gomatrixserverlib.RespSend{PDUs: results}, nil return &gomatrixserverlib.RespSend{PDUs: results}, nil
} }
@ -315,7 +317,7 @@ func (t *txnReq) processEDUs(edus []gomatrixserverlib.EDU) {
case gomatrixserverlib.MDeviceListUpdate: case gomatrixserverlib.MDeviceListUpdate:
t.processDeviceListUpdate(e) t.processDeviceListUpdate(e)
default: default:
util.GetLogger(t.context).WithField("type", e.Type).Warn("unhandled edu") util.GetLogger(t.context).WithField("type", e.Type).Debug("Unhandled EDU")
} }
} }
} }

View file

@ -255,7 +255,7 @@ func (oq *destinationQueue) backgroundSend() {
// The worker is idle so stop the goroutine. It'll get // The worker is idle so stop the goroutine. It'll get
// restarted automatically the next time we have an event to // restarted automatically the next time we have an event to
// send. // send.
log.Infof("Queue %q has been idle for %s, going to sleep", oq.destination, queueIdleTimeout) log.Debugf("Queue %q has been idle for %s, going to sleep", oq.destination, queueIdleTimeout)
return return
} }
@ -263,12 +263,12 @@ func (oq *destinationQueue) backgroundSend() {
// backoff duration to complete first, or until explicitly // backoff duration to complete first, or until explicitly
// told to retry. // told to retry.
if backoff, duration := oq.statistics.BackoffDuration(); backoff { if backoff, duration := oq.statistics.BackoffDuration(); backoff {
log.WithField("duration", duration).Infof("Backing off %s", oq.destination) log.WithField("duration", duration).Debugf("Backing off %s", oq.destination)
oq.backingOff.Store(true) oq.backingOff.Store(true)
select { select {
case <-time.After(duration): case <-time.After(duration):
case <-oq.interruptBackoff: case <-oq.interruptBackoff:
log.Infof("Interrupting backoff for %q", oq.destination) log.Debugf("Interrupting backoff for %q", oq.destination)
} }
oq.backingOff.Store(false) oq.backingOff.Store(false)
} }
@ -414,7 +414,7 @@ func (oq *destinationQueue) nextTransaction() (bool, error) {
t.EDUs = append(t.EDUs, *edu) t.EDUs = append(t.EDUs, *edu)
} }
logrus.WithField("server_name", oq.destination).Infof("Sending transaction %q containing %d PDUs, %d EDUs", t.TransactionID, len(t.PDUs), len(t.EDUs)) logrus.WithField("server_name", oq.destination).Debugf("Sending transaction %q containing %d PDUs, %d EDUs", t.TransactionID, len(t.PDUs), len(t.EDUs))
// Try to send the transaction to the destination server. // Try to send the transaction to the destination server.
// TODO: we should check for 500-ish fails vs 400-ish here, // TODO: we should check for 500-ish fails vs 400-ish here,

View file

@ -136,7 +136,7 @@ func (oqs *OutgoingQueues) SendEvent(
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"destinations": destinations, "event": ev.EventID(), "destinations": destinations, "event": ev.EventID(),
}).Info("Sending event") }).Infof("Sending event")
headeredJSON, err := json.Marshal(ev) headeredJSON, err := json.Marshal(ev)
if err != nil { if err != nil {