mirror of
https://github.com/matrix-org/dendrite.git
synced 2026-01-11 16:13:10 -06:00
Merge branch 'main' into neilalexander/purgeroom
This commit is contained in:
commit
40f00abd19
|
|
@ -22,6 +22,7 @@ import (
|
|||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
|
@ -151,10 +152,17 @@ func (s *OutputRoomEventConsumer) onMessage(
|
|||
return true
|
||||
}
|
||||
|
||||
txnID := ""
|
||||
// Try to get the message metadata, if we're able to, use the timestamp as the txnID
|
||||
metadata, err := msgs[0].Metadata()
|
||||
if err == nil {
|
||||
txnID = strconv.Itoa(int(metadata.Timestamp.UnixNano()))
|
||||
}
|
||||
|
||||
// Send event to any relevant application services. If we hit
|
||||
// an error here, return false, so that we negatively ack.
|
||||
log.WithField("appservice", state.ID).Debugf("Appservice worker sending %d events(s) from roomserver", len(events))
|
||||
return s.sendEvents(ctx, state, events) == nil
|
||||
return s.sendEvents(ctx, state, events, txnID) == nil
|
||||
}
|
||||
|
||||
// sendEvents passes events to the appservice by using the transactions
|
||||
|
|
@ -162,6 +170,7 @@ func (s *OutputRoomEventConsumer) onMessage(
|
|||
func (s *OutputRoomEventConsumer) sendEvents(
|
||||
ctx context.Context, state *appserviceState,
|
||||
events []*gomatrixserverlib.HeaderedEvent,
|
||||
txnID string,
|
||||
) error {
|
||||
// Create the transaction body.
|
||||
transaction, err := json.Marshal(
|
||||
|
|
@ -173,13 +182,14 @@ func (s *OutputRoomEventConsumer) sendEvents(
|
|||
return err
|
||||
}
|
||||
|
||||
// TODO: We should probably be more intelligent and pick something not
|
||||
// in the control of the event. A NATS timestamp header or something maybe.
|
||||
txnID := events[0].Event.OriginServerTS()
|
||||
// If txnID is not defined, generate one from the events.
|
||||
if txnID == "" {
|
||||
txnID = fmt.Sprintf("%d_%d", events[0].Event.OriginServerTS(), len(transaction))
|
||||
}
|
||||
|
||||
// Send the transaction to the appservice.
|
||||
// https://matrix.org/docs/spec/application_service/r0.1.2#put-matrix-app-v1-transactions-txnid
|
||||
address := fmt.Sprintf("%s/transactions/%d?access_token=%s", state.URL, txnID, url.QueryEscape(state.HSToken))
|
||||
address := fmt.Sprintf("%s/transactions/%s?access_token=%s", state.URL, txnID, url.QueryEscape(state.HSToken))
|
||||
req, err := http.NewRequestWithContext(ctx, "PUT", address, bytes.NewBuffer(transaction))
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -255,7 +255,7 @@ func (m *DendriteMonolith) Start() {
|
|||
m.logger.SetOutput(BindLogger{})
|
||||
logrus.SetOutput(BindLogger{})
|
||||
|
||||
m.PineconeRouter = pineconeRouter.NewRouter(logrus.WithField("pinecone", "router"), sk, false)
|
||||
m.PineconeRouter = pineconeRouter.NewRouter(logrus.WithField("pinecone", "router"), sk)
|
||||
m.PineconeQUIC = pineconeSessions.NewSessions(logrus.WithField("pinecone", "sessions"), m.PineconeRouter, []string{"matrix"})
|
||||
m.PineconeMulticast = pineconeMulticast.NewMulticast(logrus.WithField("pinecone", "multicast"), m.PineconeRouter)
|
||||
m.PineconeManager = pineconeConnections.NewConnectionManager(m.PineconeRouter, nil)
|
||||
|
|
|
|||
|
|
@ -151,7 +151,7 @@ func main() {
|
|||
base := base.NewBaseDendrite(cfg, "Monolith")
|
||||
defer base.Close() // nolint: errcheck
|
||||
|
||||
pRouter := pineconeRouter.NewRouter(logrus.WithField("pinecone", "router"), sk, false)
|
||||
pRouter := pineconeRouter.NewRouter(logrus.WithField("pinecone", "router"), sk)
|
||||
pQUIC := pineconeSessions.NewSessions(logrus.WithField("pinecone", "sessions"), pRouter, []string{"matrix"})
|
||||
pMulticast := pineconeMulticast.NewMulticast(logrus.WithField("pinecone", "multicast"), pRouter)
|
||||
pManager := pineconeConnections.NewConnectionManager(pRouter, nil)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/gomatrix"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
|
@ -81,11 +82,14 @@ func runTests(baseURL, branchName string) error {
|
|||
client: users[1].client, text: "4: " + branchName,
|
||||
},
|
||||
}
|
||||
wantEventIDs := make(map[string]struct{}, 8)
|
||||
for _, msg := range msgs {
|
||||
_, err = msg.client.SendText(dmRoomID, msg.text)
|
||||
var resp *gomatrix.RespSendEvent
|
||||
resp, err = msg.client.SendText(dmRoomID, msg.text)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send text in dm room: %s", err)
|
||||
}
|
||||
wantEventIDs[resp.EventID] = struct{}{}
|
||||
}
|
||||
|
||||
// attempt to create/join the shared public room
|
||||
|
|
@ -113,11 +117,48 @@ func runTests(baseURL, branchName string) error {
|
|||
}
|
||||
// send messages
|
||||
for _, msg := range msgs {
|
||||
_, err = msg.client.SendText(publicRoomID, "public "+msg.text)
|
||||
resp, err := msg.client.SendText(publicRoomID, "public "+msg.text)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send text in public room: %s", err)
|
||||
}
|
||||
wantEventIDs[resp.EventID] = struct{}{}
|
||||
}
|
||||
|
||||
// Sync until we have all expected messages
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
syncClient := users[0].client
|
||||
since := ""
|
||||
for len(wantEventIDs) > 0 {
|
||||
select {
|
||||
case <-doneCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
syncResp, err := syncClient.SyncRequest(1000, since, "1", false, "")
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, room := range syncResp.Rooms.Join {
|
||||
for _, ev := range room.Timeline.Events {
|
||||
if ev.Type != "m.room.message" {
|
||||
continue
|
||||
}
|
||||
delete(wantEventIDs, ev.ID)
|
||||
}
|
||||
}
|
||||
since = syncResp.NextBatch
|
||||
}
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second * 10):
|
||||
close(doneCh)
|
||||
return fmt.Errorf("failed to receive all expected messages: %+v", wantEventIDs)
|
||||
case <-doneCh:
|
||||
}
|
||||
|
||||
log.Printf("OK! rooms(public=%s, dm=%s) users(%s, %s)\n", publicRoomID, dmRoomID, users[0].userID, users[1].userID)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,8 +11,8 @@ possible to get started.
|
|||
|
||||
## Sign off
|
||||
|
||||
We ask that everyone who contributes to the project signs off their contributions
|
||||
in accordance with the [DCO](https://github.com/matrix-org/matrix-spec/blob/main/CONTRIBUTING.rst#sign-off).
|
||||
We require that everyone who contributes to the project signs off their contributions
|
||||
in accordance with the [Developer Certificate of Origin](https://github.com/matrix-org/matrix-spec/blob/main/CONTRIBUTING.rst#sign-off).
|
||||
In effect, this means adding a statement to your pull requests or commit messages
|
||||
along the lines of:
|
||||
|
||||
|
|
@ -20,7 +20,18 @@ along the lines of:
|
|||
Signed-off-by: Full Name <email address>
|
||||
```
|
||||
|
||||
Unfortunately we can't accept contributions without it.
|
||||
Unfortunately we can't accept contributions without a sign-off.
|
||||
|
||||
Please note that we can only accept contributions under a legally identifiable name,
|
||||
such as your name as it appears on government-issued documentation or common-law names
|
||||
(claimed by legitimate usage or repute). We cannot accept sign-offs from a pseudonym or
|
||||
alias and cannot accept anonymous contributions.
|
||||
|
||||
If you would prefer to sign off privately instead (so as to not reveal your full
|
||||
name on a public pull request), you can do so by emailing a sign-off declaration
|
||||
and a link to your pull request directly to the [Matrix.org Foundation](https://matrix.org/foundation/)
|
||||
at `dco@matrix.org`. Once a private sign-off has been made, you will not be required
|
||||
to do so for future contributions.
|
||||
|
||||
## Getting up and running
|
||||
|
||||
|
|
|
|||
|
|
@ -10,24 +10,6 @@ permalink: /installation/database
|
|||
Dendrite uses SQL databases to store data. Depending on the database engine being used, you
|
||||
may need to perform some manual steps outlined below.
|
||||
|
||||
## SQLite
|
||||
|
||||
SQLite deployments do not require manual database creation. Simply configure the database
|
||||
filenames in the Dendrite configuration file and start Dendrite. The databases will be created
|
||||
and populated automatically.
|
||||
|
||||
Note that Dendrite **cannot share a single SQLite database across multiple components**. Each
|
||||
component must be configured with its own SQLite database filename. You will have to remove
|
||||
the `global.database` section from your Dendrite config and add it to each individual section
|
||||
instead in order to use SQLite.
|
||||
|
||||
### Connection strings
|
||||
|
||||
Connection strings for SQLite databases take the following forms:
|
||||
|
||||
* Current working directory path: `file:dendrite_component.db`
|
||||
* Full specified path: `file:///path/to/dendrite_component.db`
|
||||
|
||||
## PostgreSQL
|
||||
|
||||
Dendrite can automatically populate the database with the relevant tables and indexes, but
|
||||
|
|
@ -106,3 +88,25 @@ for i in appservice federationapi mediaapi mscs roomserver syncapi keyserver use
|
|||
sudo -u postgres createdb -O dendrite dendrite_$i
|
||||
done
|
||||
```
|
||||
|
||||
## SQLite
|
||||
|
||||
**WARNING:** The Dendrite SQLite backend is slower, less reliable and not recommended for
|
||||
production usage. You should use PostgreSQL instead. We may not be able to provide support if
|
||||
you run into issues with your deployment while using the SQLite backend.
|
||||
|
||||
SQLite deployments do not require manual database creation. Simply configure the database
|
||||
filenames in the Dendrite configuration file and start Dendrite. The databases will be created
|
||||
and populated automatically.
|
||||
|
||||
Note that Dendrite **cannot share a single SQLite database across multiple components**. Each
|
||||
component must be configured with its own SQLite database filename. You will have to remove
|
||||
the `global.database` section from your Dendrite config and add it to each individual section
|
||||
instead in order to use SQLite.
|
||||
|
||||
### Connection strings
|
||||
|
||||
Connection strings for SQLite databases take the following forms:
|
||||
|
||||
* Current working directory path: `file:dendrite_component.db`
|
||||
* Full specified path: `file:///path/to/dendrite_component.db`
|
||||
|
|
|
|||
|
|
@ -163,10 +163,10 @@ func (p *SyncAPIProducer) SendPresence(
|
|||
}
|
||||
|
||||
func (p *SyncAPIProducer) SendDeviceListUpdate(
|
||||
ctx context.Context, deviceListUpdate gomatrixserverlib.RawJSON, origin string,
|
||||
ctx context.Context, deviceListUpdate gomatrixserverlib.RawJSON, origin gomatrixserverlib.ServerName,
|
||||
) (err error) {
|
||||
m := nats.NewMsg(p.TopicDeviceListUpdate)
|
||||
m.Header.Set("origin", origin)
|
||||
m.Header.Set("origin", string(origin))
|
||||
m.Data = deviceListUpdate
|
||||
log.Debugf("Sending device list update: %+v", m.Header)
|
||||
_, err = p.JetStream.PublishMsg(m, nats.Context(ctx))
|
||||
|
|
|
|||
|
|
@ -359,7 +359,7 @@ func (t *txnReq) processEDUs(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
case gomatrixserverlib.MDeviceListUpdate:
|
||||
if err := t.producer.SendDeviceListUpdate(ctx, e.Content, e.Origin); err != nil {
|
||||
if err := t.producer.SendDeviceListUpdate(ctx, e.Content, t.Origin); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("failed to InputDeviceListUpdate")
|
||||
}
|
||||
case gomatrixserverlib.MReceipt:
|
||||
|
|
|
|||
2
go.mod
2
go.mod
|
|
@ -23,7 +23,7 @@ require (
|
|||
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91
|
||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220912142654-7b96db48888a
|
||||
github.com/matrix-org/pinecone v0.0.0-20220912093434-b215925d5534
|
||||
github.com/matrix-org/pinecone v0.0.0-20220914131522-608215eb1b29
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
|
||||
github.com/mattn/go-sqlite3 v1.14.13
|
||||
github.com/nats-io/nats-server/v2 v2.9.0
|
||||
|
|
|
|||
4
go.sum
4
go.sum
|
|
@ -390,8 +390,8 @@ github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 h1:ZtO5uywdd5d
|
|||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220912142654-7b96db48888a h1:XeGBDZZsUe4kgj3myl0EiuDNVWxszJecMTrON3Wn9sI=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220912142654-7b96db48888a/go.mod h1:jX38yp3SSLJNftBg3PXU1ayd0PCLIiDHQ4xAc9DIixk=
|
||||
github.com/matrix-org/pinecone v0.0.0-20220912093434-b215925d5534 h1:XuJYAJNkdG3zj9cO0yQSvL+Sp2xogsTOuZRx7PwdtoA=
|
||||
github.com/matrix-org/pinecone v0.0.0-20220912093434-b215925d5534/go.mod h1:K0N1ixHQxXoCyqolDqVxPM3ArrDtcMs8yegOx2Lfv9k=
|
||||
github.com/matrix-org/pinecone v0.0.0-20220914131522-608215eb1b29 h1:/AIaqhK1BBi2sMEVQdgZRV8H8sNloAGCgztLZhsPqD0=
|
||||
github.com/matrix-org/pinecone v0.0.0-20220914131522-608215eb1b29/go.mod h1:K0N1ixHQxXoCyqolDqVxPM3ArrDtcMs8yegOx2Lfv9k=
|
||||
github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 h1:eCEHXWDv9Rm335MSuB49mFUK44bwZPFSDde3ORE3syk=
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
||||
|
|
|
|||
|
|
@ -439,6 +439,7 @@ type QueryMembershipAtEventRequest struct {
|
|||
|
||||
// QueryMembershipAtEventResponse is the response to QueryMembershipAtEventRequest.
|
||||
type QueryMembershipAtEventResponse struct {
|
||||
// Memberships is a map from eventID to a list of events (if any).
|
||||
// Memberships is a map from eventID to a list of events (if any). Events that
|
||||
// do not have known state will return an empty array here.
|
||||
Memberships map[string][]*gomatrixserverlib.HeaderedEvent `json:"memberships"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -176,7 +176,8 @@ func (r *Inputer) Start() error {
|
|||
},
|
||||
nats.HeadersOnly(),
|
||||
nats.DeliverAll(),
|
||||
nats.AckAll(),
|
||||
nats.AckExplicit(),
|
||||
nats.ReplayInstant(),
|
||||
nats.BindStream(r.InputRoomEventTopic),
|
||||
)
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -264,16 +264,27 @@ func (u *latestEventsUpdater) latestState() error {
|
|||
return fmt.Errorf("roomState.CalculateAndStoreStateAfterEvents: %w", err)
|
||||
}
|
||||
|
||||
// Now that we have a new state snapshot based on the latest events,
|
||||
// we can compare that new snapshot to the previous one and see what
|
||||
// has changed. This gives us one list of removed state events and
|
||||
// another list of added ones. Replacing a value for a state-key tuple
|
||||
// will result one removed (the old event) and one added (the new event).
|
||||
u.removed, u.added, err = roomState.DifferenceBetweeenStateSnapshots(
|
||||
ctx, u.oldStateNID, u.newStateNID,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("roomState.DifferenceBetweenStateSnapshots: %w", err)
|
||||
// Include information about what changed in the state transition. If the
|
||||
// event rewrites the state (i.e. is a federated join) then we will simply
|
||||
// include the entire state snapshot as added events, as the "RewritesState"
|
||||
// flag in the output event signals downstream components to purge their
|
||||
// room state first. If it doesn't rewrite the state then we will work out
|
||||
// what the difference is between the state snapshots and send that. In all
|
||||
// cases where a state event is being replaced, the old state event will
|
||||
// appear in "removed" and the replacement will appear in "added".
|
||||
if u.rewritesState {
|
||||
u.removed = []types.StateEntry{}
|
||||
u.added, err = roomState.LoadStateAtSnapshot(ctx, u.newStateNID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("roomState.LoadStateAtSnapshot: %w", err)
|
||||
}
|
||||
} else {
|
||||
u.removed, u.added, err = roomState.DifferenceBetweeenStateSnapshots(
|
||||
ctx, u.oldStateNID, u.newStateNID,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("roomState.DifferenceBetweenStateSnapshots: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if removed := len(u.removed) - len(u.added); !u.rewritesState && removed > 0 {
|
||||
|
|
|
|||
|
|
@ -208,6 +208,9 @@ func (r *Queryer) QueryMembershipForUser(
|
|||
return err
|
||||
}
|
||||
|
||||
// QueryMembershipAtEvent returns the known memberships at a given event.
|
||||
// If the state before an event is not known, an empty list will be returned
|
||||
// for that event instead.
|
||||
func (r *Queryer) QueryMembershipAtEvent(
|
||||
ctx context.Context,
|
||||
request *api.QueryMembershipAtEventRequest,
|
||||
|
|
@ -237,7 +240,11 @@ func (r *Queryer) QueryMembershipAtEvent(
|
|||
}
|
||||
|
||||
for _, eventID := range request.EventIDs {
|
||||
stateEntry := stateEntries[eventID]
|
||||
stateEntry, ok := stateEntries[eventID]
|
||||
if !ok {
|
||||
response.Memberships[eventID] = []*gomatrixserverlib.HeaderedEvent{}
|
||||
continue
|
||||
}
|
||||
memberships, err := helpers.GetMembershipsAtState(ctx, r.DB, stateEntry, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get memberships at state: %w", err)
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package state
|
|||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
|
|
@ -134,11 +135,14 @@ func (v *StateResolution) LoadMembershipAtEvent(
|
|||
for i := range eventIDs {
|
||||
eventID := eventIDs[i]
|
||||
snapshotNID, err := v.db.SnapshotNIDFromEventID(ctx, eventID)
|
||||
if err != nil {
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
return nil, fmt.Errorf("LoadStateAtEvent.SnapshotNIDFromEventID failed for event %s : %w", eventID, err)
|
||||
}
|
||||
if snapshotNID == 0 {
|
||||
return nil, fmt.Errorf("LoadStateAtEvent.SnapshotNIDFromEventID(%s) returned 0 NID, was this event stored?", eventID)
|
||||
// If we don't know a state snapshot for this event then we can't calculate
|
||||
// memberships at the time of the event, so skip over it. This means that
|
||||
// it isn't guaranteed that the response map will contain every single event.
|
||||
continue
|
||||
}
|
||||
snapshotNIDMap[snapshotNID] = append(snapshotNIDMap[snapshotNID], eventID)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -388,7 +388,7 @@ func applyHistoryVisibilityFilter(
|
|||
if err != nil {
|
||||
// Not a fatal error, we can continue without the stateEvents,
|
||||
// they are only needed if there are state events in the timeline.
|
||||
logrus.WithError(err).Warnf("failed to get current room state")
|
||||
logrus.WithError(err).Warnf("Failed to get current room state for history visibility")
|
||||
}
|
||||
alwaysIncludeIDs := make(map[string]struct{}, len(stateEvents))
|
||||
for _, ev := range stateEvents {
|
||||
|
|
@ -397,7 +397,6 @@ func applyHistoryVisibilityFilter(
|
|||
startTime := time.Now()
|
||||
events, err := internal.ApplyHistoryVisibilityFilter(ctx, db, rsAPI, recentEvents, alwaysIncludeIDs, userID, "sync")
|
||||
if err != nil {
|
||||
|
||||
return nil, err
|
||||
}
|
||||
logrus.WithFields(logrus.Fields{
|
||||
|
|
@ -405,7 +404,7 @@ func applyHistoryVisibilityFilter(
|
|||
"room_id": roomID,
|
||||
"before": len(recentEvents),
|
||||
"after": len(events),
|
||||
}).Debug("applied history visibility (sync)")
|
||||
}).Trace("Applied history visibility (sync)")
|
||||
return events, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue