mirror of
https://github.com/matrix-org/dendrite.git
synced 2025-12-29 01:33:10 -06:00
Merge branch 'master' of https://github.com/matrix-org/dendrite into fix-room-search
This commit is contained in:
commit
3c23f25615
|
|
@ -32,7 +32,7 @@ import (
|
|||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/base"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/kafka"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
|
@ -58,7 +58,7 @@ func NewInternalAPI(
|
|||
},
|
||||
},
|
||||
}
|
||||
consumer, _ := kafka.SetupConsumerProducer(&base.Cfg.Global.Kafka)
|
||||
js, _, _ := jetstream.Prepare(&base.Cfg.Global.JetStream)
|
||||
|
||||
// Create a connection to the appservice postgres DB
|
||||
appserviceDB, err := storage.NewDatabase(&base.Cfg.AppServiceAPI.Database)
|
||||
|
|
@ -97,7 +97,7 @@ func NewInternalAPI(
|
|||
// We can't add ASes at runtime so this is safe to do.
|
||||
if len(workerStates) > 0 {
|
||||
consumer := consumers.NewOutputRoomEventConsumer(
|
||||
base.ProcessContext, base.Cfg, consumer, appserviceDB,
|
||||
base.ProcessContext, base.Cfg, js, appserviceDB,
|
||||
rsAPI, workerStates,
|
||||
)
|
||||
if err := consumer.Start(); err != nil {
|
||||
|
|
|
|||
|
|
@ -20,23 +20,26 @@ import (
|
|||
|
||||
"github.com/matrix-org/dendrite/appservice/storage"
|
||||
"github.com/matrix-org/dendrite/appservice/types"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/nats-io/nats.go"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OutputRoomEventConsumer consumes events that originated in the room server.
|
||||
type OutputRoomEventConsumer struct {
|
||||
roomServerConsumer *internal.ContinualConsumer
|
||||
asDB storage.Database
|
||||
rsAPI api.RoomserverInternalAPI
|
||||
serverName string
|
||||
workerStates []types.ApplicationServiceWorkerState
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable nats.SubOpt
|
||||
topic string
|
||||
asDB storage.Database
|
||||
rsAPI api.RoomserverInternalAPI
|
||||
serverName string
|
||||
workerStates []types.ApplicationServiceWorkerState
|
||||
}
|
||||
|
||||
// NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call
|
||||
|
|
@ -44,55 +47,56 @@ type OutputRoomEventConsumer struct {
|
|||
func NewOutputRoomEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.Dendrite,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
js nats.JetStreamContext,
|
||||
appserviceDB storage.Database,
|
||||
rsAPI api.RoomserverInternalAPI,
|
||||
workerStates []types.ApplicationServiceWorkerState,
|
||||
) *OutputRoomEventConsumer {
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "appservice/roomserver",
|
||||
Topic: cfg.Global.Kafka.TopicFor(config.TopicOutputRoomEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: appserviceDB,
|
||||
return &OutputRoomEventConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
durable: cfg.Global.JetStream.Durable("AppserviceRoomserverConsumer"),
|
||||
topic: cfg.Global.JetStream.TopicFor(jetstream.OutputRoomEvent),
|
||||
asDB: appserviceDB,
|
||||
rsAPI: rsAPI,
|
||||
serverName: string(cfg.Global.ServerName),
|
||||
workerStates: workerStates,
|
||||
}
|
||||
s := &OutputRoomEventConsumer{
|
||||
roomServerConsumer: &consumer,
|
||||
asDB: appserviceDB,
|
||||
rsAPI: rsAPI,
|
||||
serverName: string(cfg.Global.ServerName),
|
||||
workerStates: workerStates,
|
||||
}
|
||||
consumer.ProcessMessage = s.onMessage
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Start consuming from room servers
|
||||
func (s *OutputRoomEventConsumer) Start() error {
|
||||
return s.roomServerConsumer.Start()
|
||||
_, err := s.jetstream.Subscribe(s.topic, s.onMessage, s.durable)
|
||||
return err
|
||||
}
|
||||
|
||||
// onMessage is called when the appservice component receives a new event from
|
||||
// the room server output log.
|
||||
func (s *OutputRoomEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
|
||||
// Parse out the event JSON
|
||||
var output api.OutputEvent
|
||||
if err := json.Unmarshal(msg.Value, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("roomserver output log: message parse failure")
|
||||
return nil
|
||||
}
|
||||
func (s *OutputRoomEventConsumer) onMessage(msg *nats.Msg) {
|
||||
jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
|
||||
// Parse out the event JSON
|
||||
var output api.OutputEvent
|
||||
if err := json.Unmarshal(msg.Data, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("roomserver output log: message parse failure")
|
||||
return true
|
||||
}
|
||||
|
||||
if output.Type != api.OutputTypeNewRoomEvent {
|
||||
return nil
|
||||
}
|
||||
if output.Type != api.OutputTypeNewRoomEvent {
|
||||
return true
|
||||
}
|
||||
|
||||
events := []*gomatrixserverlib.HeaderedEvent{output.NewRoomEvent.Event}
|
||||
events = append(events, output.NewRoomEvent.AddStateEvents...)
|
||||
events := []*gomatrixserverlib.HeaderedEvent{output.NewRoomEvent.Event}
|
||||
events = append(events, output.NewRoomEvent.AddStateEvents...)
|
||||
|
||||
// Send event to any relevant application services
|
||||
return s.filterRoomserverEvents(context.TODO(), events)
|
||||
// Send event to any relevant application services
|
||||
if err := s.filterRoomserverEvents(context.TODO(), events); err != nil {
|
||||
log.WithError(err).Errorf("roomserver output log: filter error")
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// filterRoomserverEvents takes in events and decides whether any of them need
|
||||
|
|
|
|||
|
|
@ -17,12 +17,10 @@ package storage
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
type Database interface {
|
||||
internal.PartitionStorer
|
||||
StoreEvent(ctx context.Context, appServiceID string, event *gomatrixserverlib.HeaderedEvent) error
|
||||
GetEventsWithAppServiceID(ctx context.Context, appServiceID string, limit int) (int, int, []gomatrixserverlib.HeaderedEvent, bool, error)
|
||||
CountEventsWithAppServiceID(ctx context.Context, appServiceID string) (int, error)
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ not contain the Go toolchain etc.
|
|||
|
||||
There are three sample `docker-compose` files:
|
||||
|
||||
- `docker-compose.deps.yml` which runs the Postgres and Kafka prerequisites
|
||||
- `docker-compose.monolith.yml` which runs a monolith Dendrite deployment
|
||||
- `docker-compose.polylith.yml` which runs a polylith Dendrite deployment
|
||||
|
||||
|
|
@ -28,7 +27,7 @@ There are three sample `docker-compose` files:
|
|||
The `docker-compose` files refer to the `/etc/dendrite` volume as where the
|
||||
runtime config should come from. The mounted folder must contain:
|
||||
|
||||
- `dendrite.yaml` configuration file (based on the [`dendrite-config.yaml`](https://raw.githubusercontent.com/matrix-org/dendrite/master/dendrite-config.yaml)
|
||||
- `dendrite.yaml` configuration file (from the [Docker config folder](https://github.com/matrix-org/dendrite/tree/master/build/docker/config)
|
||||
sample in the `build/docker/config` folder of this repository.)
|
||||
- `matrix_key.pem` server key, as generated using `cmd/generate-keys`
|
||||
- `server.crt` certificate file
|
||||
|
|
@ -50,15 +49,9 @@ The key files will now exist in your current working directory, and can be mount
|
|||
|
||||
## Starting Dendrite as a monolith deployment
|
||||
|
||||
Create your config based on the [`dendrite-config.yaml`](https://raw.githubusercontent.com/matrix-org/dendrite/master/dendrite-config.yaml) configuration file in the `build/docker/config` folder of this repository. And rename the config file to `dendrite.yml` (and put it in your `config` directory).
|
||||
Create your config based on the [`dendrite.yaml`](https://github.com/matrix-org/dendrite/tree/master/build/docker/config) configuration file in the `build/docker/config` folder of this repository.
|
||||
|
||||
Once in place, start the PostgreSQL dependency:
|
||||
|
||||
```
|
||||
docker-compose -f docker-compose.deps.yml up postgres
|
||||
```
|
||||
|
||||
Wait a few seconds for PostgreSQL to finish starting up, and then start a monolith:
|
||||
Then start the deployment:
|
||||
|
||||
```
|
||||
docker-compose -f docker-compose.monolith.yml up
|
||||
|
|
@ -66,15 +59,9 @@ docker-compose -f docker-compose.monolith.yml up
|
|||
|
||||
## Starting Dendrite as a polylith deployment
|
||||
|
||||
Create your config based on the [`dendrite-config.yaml`](https://raw.githubusercontent.com/matrix-org/dendrite/master/dendrite-config.yaml) configuration file in the `build/docker/config` folder of this repository. And rename the config file to `dendrite.yml` (and put it in your `config` directory).
|
||||
Create your config based on the [`dendrite-config.yaml`](https://github.com/matrix-org/dendrite/tree/master/build/docker/config) configuration file in the `build/docker/config` folder of this repository.
|
||||
|
||||
Once in place, start all the dependencies:
|
||||
|
||||
```
|
||||
docker-compose -f docker-compose.deps.yml up
|
||||
```
|
||||
|
||||
Wait a few seconds for PostgreSQL and Kafka to finish starting up, and then start a polylith:
|
||||
Then start the deployment:
|
||||
|
||||
```
|
||||
docker-compose -f docker-compose.polylith.yml up
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@
|
|||
# connection can be idle in seconds - a negative value is unlimited.
|
||||
|
||||
# The version of the configuration file.
|
||||
version: 1
|
||||
version: 2
|
||||
|
||||
# Global Matrix configuration. This configuration applies to all components.
|
||||
global:
|
||||
|
|
@ -62,29 +62,28 @@ global:
|
|||
- matrix.org
|
||||
- vector.im
|
||||
|
||||
# Configuration for Kafka/Naffka.
|
||||
kafka:
|
||||
# List of Kafka broker addresses to connect to. This is not needed if using
|
||||
# Naffka in monolith mode.
|
||||
# Configuration for NATS JetStream
|
||||
jetstream:
|
||||
# A list of NATS Server addresses to connect to. If none are specified, an
|
||||
# internal NATS server will be started automatically when running Dendrite
|
||||
# in monolith mode. It is required to specify the address of at least one
|
||||
# NATS Server node if running in polylith mode.
|
||||
addresses:
|
||||
- kafka:9092
|
||||
- jetstream:4222
|
||||
|
||||
# The prefix to use for Kafka topic names for this homeserver. Change this only if
|
||||
# you are running more than one Dendrite homeserver on the same Kafka deployment.
|
||||
# Keep all NATS streams in memory, rather than persisting it to the storage
|
||||
# path below. This option is present primarily for integration testing and
|
||||
# should not be used on a real world Dendrite deployment.
|
||||
in_memory: false
|
||||
|
||||
# Persistent directory to store JetStream streams in. This directory
|
||||
# should be preserved across Dendrite restarts.
|
||||
storage_path: ./
|
||||
|
||||
# The prefix to use for stream names for this homeserver - really only
|
||||
# useful if running more than one Dendrite on the same NATS deployment.
|
||||
topic_prefix: Dendrite
|
||||
|
||||
# Whether to use Naffka instead of Kafka. This is only available in monolith
|
||||
# mode, but means that you can run a single-process server without requiring
|
||||
# Kafka.
|
||||
use_naffka: false
|
||||
|
||||
# Naffka database options. Not required when using Kafka.
|
||||
naffka_database:
|
||||
connection_string: postgresql://dendrite:itsasecret@postgres/dendrite_naffka?sslmode=disable
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
||||
# Configuration for Prometheus metric collection.
|
||||
metrics:
|
||||
# Whether or not Prometheus metrics are enabled.
|
||||
|
|
@ -266,6 +265,19 @@ media_api:
|
|||
height: 480
|
||||
method: scale
|
||||
|
||||
# Configuration for experimental MSC's
|
||||
mscs:
|
||||
# A list of enabled MSC's
|
||||
# Currently valid values are:
|
||||
# - msc2836 (Threading, see https://github.com/matrix-org/matrix-doc/pull/2836)
|
||||
# - msc2946 (Spaces Summary, see https://github.com/matrix-org/matrix-doc/pull/2946)
|
||||
mscs: []
|
||||
database:
|
||||
connection_string: postgresql://dendrite:itsasecret@postgres/dendrite_mscs?sslmode=disable
|
||||
max_open_conns: 5
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
||||
# Configuration for the Room Server.
|
||||
room_server:
|
||||
internal_api:
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
version: "3.4"
|
||||
services:
|
||||
# PostgreSQL is needed for both polylith and monolith modes.
|
||||
postgres:
|
||||
hostname: postgres
|
||||
image: postgres:14
|
||||
restart: always
|
||||
volumes:
|
||||
- ./postgres/create_db.sh:/docker-entrypoint-initdb.d/20-create_db.sh
|
||||
# To persist your PostgreSQL databases outside of the Docker image, to
|
||||
# prevent data loss, you will need to add something like this:
|
||||
# - ./path/to/persistent/storage:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_PASSWORD: itsasecret
|
||||
POSTGRES_USER: dendrite
|
||||
networks:
|
||||
- internal
|
||||
|
||||
# Zookeeper is only needed for polylith mode!
|
||||
zookeeper:
|
||||
hostname: zookeeper
|
||||
image: zookeeper
|
||||
networks:
|
||||
- internal
|
||||
|
||||
# Kafka is only needed for polylith mode!
|
||||
kafka:
|
||||
container_name: dendrite_kafka
|
||||
hostname: kafka
|
||||
image: wurstmeister/kafka
|
||||
environment:
|
||||
KAFKA_ADVERTISED_HOST_NAME: "kafka"
|
||||
KAFKA_DELETE_TOPIC_ENABLE: "true"
|
||||
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
|
||||
depends_on:
|
||||
- zookeeper
|
||||
networks:
|
||||
- internal
|
||||
|
||||
networks:
|
||||
internal:
|
||||
attachable: true
|
||||
|
|
@ -1,5 +1,25 @@
|
|||
version: "3.4"
|
||||
services:
|
||||
postgres:
|
||||
hostname: postgres
|
||||
image: postgres:14
|
||||
restart: always
|
||||
volumes:
|
||||
- ./postgres/create_db.sh:/docker-entrypoint-initdb.d/20-create_db.sh
|
||||
# To persist your PostgreSQL databases outside of the Docker image,
|
||||
# to prevent data loss, modify the following ./path_to path:
|
||||
- ./path_to/postgresql:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_PASSWORD: itsasecret
|
||||
POSTGRES_USER: dendrite
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U dendrite"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- internal
|
||||
|
||||
monolith:
|
||||
hostname: monolith
|
||||
image: matrixdotorg/dendrite-monolith:latest
|
||||
|
|
@ -13,8 +33,11 @@ services:
|
|||
volumes:
|
||||
- ./config:/etc/dendrite
|
||||
- ./media:/var/dendrite/media
|
||||
depends_on:
|
||||
- postgres
|
||||
networks:
|
||||
- internal
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
internal:
|
||||
|
|
|
|||
|
|
@ -1,13 +1,51 @@
|
|||
version: "3.4"
|
||||
services:
|
||||
postgres:
|
||||
hostname: postgres
|
||||
image: postgres:14
|
||||
restart: always
|
||||
volumes:
|
||||
- ./postgres/create_db.sh:/docker-entrypoint-initdb.d/20-create_db.sh
|
||||
# To persist your PostgreSQL databases outside of the Docker image,
|
||||
# to prevent data loss, modify the following ./path_to path:
|
||||
- ./path_to/postgresql:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_PASSWORD: itsasecret
|
||||
POSTGRES_USER: dendrite
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U dendrite"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- internal
|
||||
|
||||
jetstream:
|
||||
hostname: jetstream
|
||||
image: nats:latest
|
||||
command: |
|
||||
--jetstream
|
||||
--store_dir /var/lib/nats
|
||||
--cluster_name Dendrite
|
||||
volumes:
|
||||
# To persist your NATS JetStream streams outside of the Docker image,
|
||||
# prevent data loss, modify the following ./path_to path:
|
||||
- ./path_to/nats:/var/lib/nats
|
||||
networks:
|
||||
- internal
|
||||
|
||||
client_api:
|
||||
hostname: client_api
|
||||
image: matrixdotorg/dendrite-polylith:latest
|
||||
command: clientapi
|
||||
volumes:
|
||||
- ./config:/etc/dendrite
|
||||
depends_on:
|
||||
- jetstream
|
||||
- postgres
|
||||
networks:
|
||||
- internal
|
||||
restart: unless-stopped
|
||||
|
||||
media_api:
|
||||
hostname: media_api
|
||||
|
|
@ -18,6 +56,7 @@ services:
|
|||
- ./media:/var/dendrite/media
|
||||
networks:
|
||||
- internal
|
||||
restart: unless-stopped
|
||||
|
||||
sync_api:
|
||||
hostname: sync_api
|
||||
|
|
@ -25,8 +64,12 @@ services:
|
|||
command: syncapi
|
||||
volumes:
|
||||
- ./config:/etc/dendrite
|
||||
depends_on:
|
||||
- jetstream
|
||||
- postgres
|
||||
networks:
|
||||
- internal
|
||||
restart: unless-stopped
|
||||
|
||||
room_server:
|
||||
hostname: room_server
|
||||
|
|
@ -34,8 +77,12 @@ services:
|
|||
command: roomserver
|
||||
volumes:
|
||||
- ./config:/etc/dendrite
|
||||
depends_on:
|
||||
- jetstream
|
||||
- postgres
|
||||
networks:
|
||||
- internal
|
||||
restart: unless-stopped
|
||||
|
||||
edu_server:
|
||||
hostname: edu_server
|
||||
|
|
@ -43,8 +90,11 @@ services:
|
|||
command: eduserver
|
||||
volumes:
|
||||
- ./config:/etc/dendrite
|
||||
depends_on:
|
||||
- jetstream
|
||||
networks:
|
||||
- internal
|
||||
restart: unless-stopped
|
||||
|
||||
federation_api:
|
||||
hostname: federation_api
|
||||
|
|
@ -52,8 +102,12 @@ services:
|
|||
command: federationapi
|
||||
volumes:
|
||||
- ./config:/etc/dendrite
|
||||
depends_on:
|
||||
- jetstream
|
||||
- postgres
|
||||
networks:
|
||||
- internal
|
||||
restart: unless-stopped
|
||||
|
||||
key_server:
|
||||
hostname: key_server
|
||||
|
|
@ -61,8 +115,12 @@ services:
|
|||
command: keyserver
|
||||
volumes:
|
||||
- ./config:/etc/dendrite
|
||||
depends_on:
|
||||
- jetstream
|
||||
- postgres
|
||||
networks:
|
||||
- internal
|
||||
restart: unless-stopped
|
||||
|
||||
user_api:
|
||||
hostname: user_api
|
||||
|
|
@ -70,8 +128,12 @@ services:
|
|||
command: userapi
|
||||
volumes:
|
||||
- ./config:/etc/dendrite
|
||||
depends_on:
|
||||
- jetstream
|
||||
- postgres
|
||||
networks:
|
||||
- internal
|
||||
restart: unless-stopped
|
||||
|
||||
appservice_api:
|
||||
hostname: appservice_api
|
||||
|
|
@ -82,8 +144,11 @@ services:
|
|||
networks:
|
||||
- internal
|
||||
depends_on:
|
||||
- jetstream
|
||||
- postgres
|
||||
- room_server
|
||||
- user_api
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
internal:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
#!/bin/sh
|
||||
|
||||
for db in userapi_accounts userapi_devices mediaapi syncapi roomserver keyserver federationapi appservice naffka; do
|
||||
for db in userapi_accounts userapi_devices mediaapi syncapi roomserver keyserver federationapi appservice mscs; do
|
||||
createdb -U dendrite -O dendrite dendrite_$db
|
||||
done
|
||||
|
|
|
|||
|
|
@ -281,8 +281,7 @@ func (m *DendriteMonolith) Start() {
|
|||
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
|
||||
cfg.Global.PrivateKey = sk
|
||||
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
|
||||
cfg.Global.Kafka.UseNaffka = true
|
||||
cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-naffka.db", m.StorageDirectory, prefix))
|
||||
cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("file:%s/%s", m.StorageDirectory, prefix))
|
||||
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-account.db", m.StorageDirectory, prefix))
|
||||
cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-device.db", m.StorageDirectory, prefix))
|
||||
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-mediaapi.db", m.CacheDirectory, prefix))
|
||||
|
|
|
|||
|
|
@ -86,8 +86,7 @@ func (m *DendriteMonolith) Start() {
|
|||
cfg.Global.ServerName = gomatrixserverlib.ServerName(ygg.DerivedServerName())
|
||||
cfg.Global.PrivateKey = ygg.PrivateKey()
|
||||
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
|
||||
cfg.Global.Kafka.UseNaffka = true
|
||||
cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-naffka.db", m.StorageDirectory))
|
||||
cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("file:%s/", m.StorageDirectory))
|
||||
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-account.db", m.StorageDirectory))
|
||||
cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-device.db", m.StorageDirectory))
|
||||
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-mediaapi.db", m.StorageDirectory))
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.13-stretch as build
|
||||
FROM golang:1.16-stretch as build
|
||||
RUN apt-get update && apt-get install -y sqlite3
|
||||
WORKDIR /build
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import (
|
|||
keyserverAPI "github.com/matrix-org/dendrite/keyserver/api"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/kafka"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/dendrite/userapi/storage/accounts"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
|
@ -49,11 +49,11 @@ func AddPublicRoutes(
|
|||
extRoomsProvider api.ExtraPublicRoomsProvider,
|
||||
mscCfg *config.MSCs,
|
||||
) {
|
||||
_, producer := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
|
||||
js, _, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
|
||||
|
||||
syncProducer := &producers.SyncAPIProducer{
|
||||
Producer: producer,
|
||||
Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputClientData),
|
||||
JetStream: js,
|
||||
Topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputClientData),
|
||||
}
|
||||
|
||||
routing.Setup(
|
||||
|
|
|
|||
|
|
@ -17,39 +17,42 @@ package producers
|
|||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/nats-io/nats.go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SyncAPIProducer produces events for the sync API server to consume
|
||||
type SyncAPIProducer struct {
|
||||
Topic string
|
||||
Producer sarama.SyncProducer
|
||||
Topic string
|
||||
JetStream nats.JetStreamContext
|
||||
}
|
||||
|
||||
// SendData sends account data to the sync API server
|
||||
func (p *SyncAPIProducer) SendData(userID string, roomID string, dataType string) error {
|
||||
var m sarama.ProducerMessage
|
||||
m := &nats.Msg{
|
||||
Subject: p.Topic,
|
||||
Header: nats.Header{},
|
||||
}
|
||||
m.Header.Set(jetstream.UserID, userID)
|
||||
|
||||
data := eventutil.AccountData{
|
||||
RoomID: roomID,
|
||||
Type: dataType,
|
||||
}
|
||||
value, err := json.Marshal(data)
|
||||
var err error
|
||||
m.Data, err = json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.Topic = string(p.Topic)
|
||||
m.Key = sarama.StringEncoder(userID)
|
||||
m.Value = sarama.ByteEncoder(value)
|
||||
log.WithFields(log.Fields{
|
||||
"user_id": userID,
|
||||
"room_id": roomID,
|
||||
"data_type": dataType,
|
||||
}).Infof("Producing to topic '%s'", p.Topic)
|
||||
|
||||
_, _, err = p.Producer.SendMessage(&m)
|
||||
_, err = p.JetStream.PublishMsg(m)
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -463,6 +463,7 @@ func createRoom(
|
|||
},
|
||||
ev.Headered(roomVersion),
|
||||
nil,
|
||||
false,
|
||||
); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("SendEventWithState failed")
|
||||
return jsonerror.InternalServerError()
|
||||
|
|
|
|||
|
|
@ -110,6 +110,7 @@ func sendMembership(ctx context.Context, accountDB accounts.Database, device *us
|
|||
[]*gomatrixserverlib.HeaderedEvent{event.Event.Headered(roomVer)},
|
||||
cfg.Matrix.ServerName,
|
||||
nil,
|
||||
false,
|
||||
); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("SendEvents failed")
|
||||
return jsonerror.InternalServerError()
|
||||
|
|
|
|||
|
|
@ -169,7 +169,7 @@ func SetAvatarURL(
|
|||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, nil); err != nil {
|
||||
if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, nil, false); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
|
@ -286,7 +286,7 @@ func SetDisplayName(
|
|||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, nil); err != nil {
|
||||
if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, nil, false); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@ func SendRedaction(
|
|||
JSON: jsonerror.NotFound("Room does not exist"),
|
||||
}
|
||||
}
|
||||
if err = roomserverAPI.SendEvents(context.Background(), rsAPI, roomserverAPI.KindNew, []*gomatrixserverlib.HeaderedEvent{e}, cfg.Matrix.ServerName, nil); err != nil {
|
||||
if err = roomserverAPI.SendEvents(context.Background(), rsAPI, roomserverAPI.KindNew, []*gomatrixserverlib.HeaderedEvent{e}, cfg.Matrix.ServerName, nil, false); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Errorf("failed to SendEvents")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -122,6 +122,7 @@ func SendEvent(
|
|||
},
|
||||
cfg.Matrix.ServerName,
|
||||
txnAndSessionID,
|
||||
false,
|
||||
); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed")
|
||||
return jsonerror.InternalServerError()
|
||||
|
|
|
|||
|
|
@ -367,5 +367,6 @@ func emit3PIDInviteEvent(
|
|||
},
|
||||
cfg.Matrix.ServerName,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -123,8 +123,8 @@ func main() {
|
|||
cfg.Global.ServerName = "p2p"
|
||||
cfg.Global.PrivateKey = privKey
|
||||
cfg.Global.KeyID = gomatrixserverlib.KeyID(fmt.Sprintf("ed25519:%s", *instanceName))
|
||||
cfg.Global.Kafka.UseNaffka = true
|
||||
cfg.FederationAPI.FederationMaxRetries = 6
|
||||
cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", *instanceName))
|
||||
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", *instanceName))
|
||||
cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-device.db", *instanceName))
|
||||
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", *instanceName))
|
||||
|
|
@ -132,7 +132,6 @@ func main() {
|
|||
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-roomserver.db", *instanceName))
|
||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationapi.db", *instanceName))
|
||||
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-appservice.db", *instanceName))
|
||||
cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-naffka.db", *instanceName))
|
||||
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-e2ekey.db", *instanceName))
|
||||
cfg.MSCs.MSCs = []string{"msc2836"}
|
||||
cfg.MSCs.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mscs.db", *instanceName))
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ func main() {
|
|||
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
|
||||
cfg.Global.PrivateKey = sk
|
||||
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
|
||||
cfg.Global.Kafka.UseNaffka = true
|
||||
cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", *instanceName))
|
||||
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", *instanceName))
|
||||
cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-device.db", *instanceName))
|
||||
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", *instanceName))
|
||||
|
|
@ -167,7 +167,6 @@ func main() {
|
|||
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-keyserver.db", *instanceName))
|
||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationapi.db", *instanceName))
|
||||
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-appservice.db", *instanceName))
|
||||
cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-naffka.db", *instanceName))
|
||||
cfg.MSCs.MSCs = []string{"msc2836", "msc2946"}
|
||||
if err := cfg.Derive(); err != nil {
|
||||
panic(err)
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ func main() {
|
|||
cfg.Global.ServerName = gomatrixserverlib.ServerName(ygg.DerivedServerName())
|
||||
cfg.Global.PrivateKey = ygg.PrivateKey()
|
||||
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
|
||||
cfg.Global.Kafka.UseNaffka = true
|
||||
cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", *instanceName))
|
||||
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", *instanceName))
|
||||
cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-device.db", *instanceName))
|
||||
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", *instanceName))
|
||||
|
|
@ -86,7 +86,6 @@ func main() {
|
|||
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-keyserver.db", *instanceName))
|
||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationapi.db", *instanceName))
|
||||
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-appservice.db", *instanceName))
|
||||
cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-naffka.db", *instanceName))
|
||||
cfg.MSCs.MSCs = []string{"msc2836"}
|
||||
cfg.MSCs.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mscs.db", *instanceName))
|
||||
if err = cfg.Derive(); err != nil {
|
||||
|
|
|
|||
|
|
@ -99,10 +99,6 @@ func main() {
|
|||
}
|
||||
keyRing := fsAPI.KeyRing()
|
||||
|
||||
// The underlying roomserver implementation needs to be able to call the fedsender.
|
||||
// This is different to rsAPI which can be the http client which doesn't need this dependency
|
||||
rsImpl.SetFederationAPI(fsAPI, keyRing)
|
||||
|
||||
keyImpl := keyserver.NewInternalAPI(base, &base.Cfg.KeyServer, fsAPI)
|
||||
keyAPI := keyImpl
|
||||
if base.UseHTTPAPIs {
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ const HEAD = "HEAD"
|
|||
// due to the error:
|
||||
// When using COPY with more than one source file, the destination must be a directory and end with a /
|
||||
// We need to run a postgres anyway, so use the dockerfile associated with Complement instead.
|
||||
const Dockerfile = `FROM golang:1.13-stretch as build
|
||||
const Dockerfile = `FROM golang:1.16-stretch as build
|
||||
RUN apt-get update && apt-get install -y postgresql
|
||||
WORKDIR /build
|
||||
|
||||
|
|
@ -189,7 +189,9 @@ func buildDendrite(httpClient *http.Client, dockerClient *client.Client, tmpDir,
|
|||
if err := decoder.Decode(&dl); err != nil {
|
||||
return "", fmt.Errorf("failed to decode build image output line: %w", err)
|
||||
}
|
||||
log.Printf("%s: %s", branchOrTagName, dl.Stream)
|
||||
if len(strings.TrimSpace(dl.Stream)) > 0 {
|
||||
log.Printf("%s: %s", branchOrTagName, dl.Stream)
|
||||
}
|
||||
if dl.Aux != nil {
|
||||
imgID, ok := dl.Aux["ID"]
|
||||
if ok {
|
||||
|
|
@ -425,8 +427,10 @@ func cleanup(dockerClient *client.Client) {
|
|||
// ignore all errors, we are just cleaning up and don't want to fail just because we fail to cleanup
|
||||
containers, _ := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
Filters: label(dendriteUpgradeTestLabel),
|
||||
All: true,
|
||||
})
|
||||
for _, c := range containers {
|
||||
log.Printf("Removing container: %v %v\n", c.ID, c.Names)
|
||||
s := time.Second
|
||||
_ = dockerClient.ContainerStop(context.Background(), c.ID, &s)
|
||||
_ = dockerClient.ContainerRemove(context.Background(), c.ID, types.ContainerRemoveOptions{
|
||||
|
|
|
|||
|
|
@ -170,8 +170,7 @@ func startup() {
|
|||
cfg.RoomServer.Database.ConnectionString = "file:/idb/dendritejs_roomserver.db"
|
||||
cfg.SyncAPI.Database.ConnectionString = "file:/idb/dendritejs_syncapi.db"
|
||||
cfg.KeyServer.Database.ConnectionString = "file:/idb/dendritejs_e2ekey.db"
|
||||
cfg.Global.Kafka.UseNaffka = true
|
||||
cfg.Global.Kafka.Database.ConnectionString = "file:/idb/dendritejs_naffka.db"
|
||||
cfg.Global.JetStream.StoragePath = "file:/idb/dendritejs/"
|
||||
cfg.Global.TrustedIDServers = []string{}
|
||||
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
|
||||
cfg.Global.PrivateKey = sk
|
||||
|
|
|
|||
|
|
@ -173,8 +173,7 @@ func main() {
|
|||
cfg.RoomServer.Database.ConnectionString = "file:/idb/dendritejs_roomserver.db"
|
||||
cfg.SyncAPI.Database.ConnectionString = "file:/idb/dendritejs_syncapi.db"
|
||||
cfg.KeyServer.Database.ConnectionString = "file:/idb/dendritejs_e2ekey.db"
|
||||
cfg.Global.Kafka.UseNaffka = true
|
||||
cfg.Global.Kafka.Database.ConnectionString = "file:/idb/dendritejs_naffka.db"
|
||||
cfg.Global.JetStream.StoragePath = "file:/idb/dendritejs/"
|
||||
cfg.Global.TrustedIDServers = []string{
|
||||
"matrix.org", "vector.im",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,13 +16,14 @@ func main() {
|
|||
dbURI := flag.String("db", "", "The DB URI to use for all components if not SQLite files")
|
||||
flag.Parse()
|
||||
|
||||
cfg := &config.Dendrite{}
|
||||
cfg := &config.Dendrite{
|
||||
Version: config.Version,
|
||||
}
|
||||
cfg.Defaults(true)
|
||||
if *serverName != "" {
|
||||
cfg.Global.ServerName = gomatrixserverlib.ServerName(*serverName)
|
||||
}
|
||||
if *dbURI != "" {
|
||||
cfg.Global.Kafka.Database.ConnectionString = config.DataSource(*dbURI)
|
||||
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(*dbURI)
|
||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource(*dbURI)
|
||||
cfg.KeyServer.Database.ConnectionString = config.DataSource(*dbURI)
|
||||
|
|
@ -88,6 +89,7 @@ func main() {
|
|||
cfg.MSCs.MSCs = []string{"msc2836", "msc2946", "msc2444", "msc2753"}
|
||||
cfg.Logging[0].Level = "trace"
|
||||
cfg.UserAPI.BCryptCost = bcrypt.MinCost
|
||||
cfg.Global.JetStream.InMemory = true
|
||||
}
|
||||
|
||||
j, err := yaml.Marshal(cfg)
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@
|
|||
# connection can be idle in seconds - a negative value is unlimited.
|
||||
|
||||
# The version of the configuration file.
|
||||
version: 1
|
||||
version: 2
|
||||
|
||||
# Global Matrix configuration. This configuration applies to all components.
|
||||
global:
|
||||
|
|
@ -68,35 +68,28 @@ global:
|
|||
# to other servers and the federation API will not be exposed.
|
||||
disable_federation: false
|
||||
|
||||
# Configuration for Kafka/Naffka.
|
||||
kafka:
|
||||
# List of Kafka broker addresses to connect to. This is not needed if using
|
||||
# Naffka in monolith mode.
|
||||
# Configuration for NATS JetStream
|
||||
jetstream:
|
||||
# A list of NATS Server addresses to connect to. If none are specified, an
|
||||
# internal NATS server will be started automatically when running Dendrite
|
||||
# in monolith mode. It is required to specify the address of at least one
|
||||
# NATS Server node if running in polylith mode.
|
||||
addresses:
|
||||
- localhost:2181
|
||||
# - localhost:4222
|
||||
|
||||
# The prefix to use for Kafka topic names for this homeserver. Change this only if
|
||||
# you are running more than one Dendrite homeserver on the same Kafka deployment.
|
||||
# Keep all NATS streams in memory, rather than persisting it to the storage
|
||||
# path below. This option is present primarily for integration testing and
|
||||
# should not be used on a real world Dendrite deployment.
|
||||
in_memory: false
|
||||
|
||||
# Persistent directory to store JetStream streams in. This directory
|
||||
# should be preserved across Dendrite restarts.
|
||||
storage_path: ./
|
||||
|
||||
# The prefix to use for stream names for this homeserver - really only
|
||||
# useful if running more than one Dendrite on the same NATS deployment.
|
||||
topic_prefix: Dendrite
|
||||
|
||||
# Whether to use Naffka instead of Kafka. This is only available in monolith
|
||||
# mode, but means that you can run a single-process server without requiring
|
||||
# Kafka.
|
||||
use_naffka: true
|
||||
|
||||
# The max size a Kafka message is allowed to use.
|
||||
# You only need to change this value, if you encounter issues with too large messages.
|
||||
# Must be less than/equal to "max.message.bytes" configured in Kafka.
|
||||
# Defaults to 8388608 bytes.
|
||||
# max_message_bytes: 8388608
|
||||
|
||||
# Naffka database options. Not required when using Kafka.
|
||||
naffka_database:
|
||||
connection_string: file:naffka.db
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
||||
# Configuration for Prometheus metric collection.
|
||||
metrics:
|
||||
# Whether or not Prometheus metrics are enabled.
|
||||
|
|
@ -121,8 +114,8 @@ global:
|
|||
# Configuration for the Appservice API.
|
||||
app_service_api:
|
||||
internal_api:
|
||||
listen: http://localhost:7777
|
||||
connect: http://localhost:7777
|
||||
listen: http://localhost:7777 # Only used in polylith deployments
|
||||
connect: http://localhost:7777 # Only used in polylith deployments
|
||||
database:
|
||||
connection_string: file:appservice.db
|
||||
max_open_conns: 10
|
||||
|
|
@ -140,8 +133,8 @@ app_service_api:
|
|||
# Configuration for the Client API.
|
||||
client_api:
|
||||
internal_api:
|
||||
listen: http://localhost:7771
|
||||
connect: http://localhost:7771
|
||||
listen: http://localhost:7771 # Only used in polylith deployments
|
||||
connect: http://localhost:7771 # Only used in polylith deployments
|
||||
external_api:
|
||||
listen: http://[::]:8071
|
||||
|
||||
|
|
@ -181,14 +174,14 @@ client_api:
|
|||
# Configuration for the EDU server.
|
||||
edu_server:
|
||||
internal_api:
|
||||
listen: http://localhost:7778
|
||||
connect: http://localhost:7778
|
||||
listen: http://localhost:7778 # Only used in polylith deployments
|
||||
connect: http://localhost:7778 # Only used in polylith deployments
|
||||
|
||||
# Configuration for the Federation API.
|
||||
federation_api:
|
||||
internal_api:
|
||||
listen: http://localhost:7772
|
||||
connect: http://localhost:7772
|
||||
listen: http://localhost:7772 # Only used in polylith deployments
|
||||
connect: http://localhost:7772 # Only used in polylith deployments
|
||||
external_api:
|
||||
listen: http://[::]:8072
|
||||
database:
|
||||
|
|
@ -237,8 +230,8 @@ federation_api:
|
|||
# Configuration for the Key Server (for end-to-end encryption).
|
||||
key_server:
|
||||
internal_api:
|
||||
listen: http://localhost:7779
|
||||
connect: http://localhost:7779
|
||||
listen: http://localhost:7779 # Only used in polylith deployments
|
||||
connect: http://localhost:7779 # Only used in polylith deployments
|
||||
database:
|
||||
connection_string: file:keyserver.db
|
||||
max_open_conns: 10
|
||||
|
|
@ -248,8 +241,8 @@ key_server:
|
|||
# Configuration for the Media API.
|
||||
media_api:
|
||||
internal_api:
|
||||
listen: http://localhost:7774
|
||||
connect: http://localhost:7774
|
||||
listen: http://localhost:7774 # Only used in polylith deployments
|
||||
connect: http://localhost:7774 # Only used in polylith deployments
|
||||
external_api:
|
||||
listen: http://[::]:8074
|
||||
database:
|
||||
|
|
@ -300,8 +293,8 @@ mscs:
|
|||
# Configuration for the Room Server.
|
||||
room_server:
|
||||
internal_api:
|
||||
listen: http://localhost:7770
|
||||
connect: http://localhost:7770
|
||||
listen: http://localhost:7770 # Only used in polylith deployments
|
||||
connect: http://localhost:7770 # Only used in polylith deployments
|
||||
database:
|
||||
connection_string: file:roomserver.db
|
||||
max_open_conns: 10
|
||||
|
|
@ -311,8 +304,8 @@ room_server:
|
|||
# Configuration for the Sync API.
|
||||
sync_api:
|
||||
internal_api:
|
||||
listen: http://localhost:7773
|
||||
connect: http://localhost:7773
|
||||
listen: http://localhost:7773 # Only used in polylith deployments
|
||||
connect: http://localhost:7773 # Only used in polylith deployments
|
||||
external_api:
|
||||
listen: http://[::]:8073
|
||||
database:
|
||||
|
|
@ -336,8 +329,8 @@ user_api:
|
|||
# This value can be low if performing tests or on embedded Dendrite instances (e.g WASM builds)
|
||||
# bcrypt_cost: 10
|
||||
internal_api:
|
||||
listen: http://localhost:7781
|
||||
connect: http://localhost:7781
|
||||
listen: http://localhost:7781 # Only used in polylith deployments
|
||||
connect: http://localhost:7781 # Only used in polylith deployments
|
||||
account_database:
|
||||
connection_string: file:userapi_accounts.db
|
||||
max_open_conns: 10
|
||||
|
|
|
|||
|
|
@ -2,21 +2,23 @@
|
|||
|
||||
Dendrite can be run in one of two configurations:
|
||||
|
||||
* **Polylith mode**: A cluster of individual components, dealing with different
|
||||
aspects of the Matrix protocol (see [WIRING.md](WIRING-Current.md)). Components communicate
|
||||
with each other using internal HTTP APIs and [Apache Kafka](https://kafka.apache.org).
|
||||
This will almost certainly be the preferred model for large-scale deployments.
|
||||
|
||||
* **Monolith mode**: All components run in the same process. In this mode,
|
||||
Kafka is completely optional and can instead be replaced with an in-process
|
||||
lightweight implementation called [Naffka](https://github.com/matrix-org/naffka). This
|
||||
will usually be the preferred model for low-volume, low-user or experimental deployments.
|
||||
it is possible to run an in-process [NATS Server](https://github.com/nats-io/nats-server)
|
||||
instead of running a standalone deployment. This will usually be the preferred model for
|
||||
low-to-mid volume deployments, providing the best balance between performance and resource usage.
|
||||
|
||||
For most deployments, it is **recommended to run in monolith mode with PostgreSQL databases**.
|
||||
* **Polylith mode**: A cluster of individual components running in their own processes, dealing
|
||||
with different aspects of the Matrix protocol (see [WIRING.md](WIRING-Current.md)). Components
|
||||
communicate with each other using internal HTTP APIs and [NATS Server](https://github.com/nats-io/nats-server).
|
||||
This will almost certainly be the preferred model for very large deployments but scalability
|
||||
comes with a cost. API calls are expensive and therefore a polylith deployment may end up using
|
||||
disproportionately more resources for a smaller number of users compared to a monolith deployment.
|
||||
|
||||
In almost all cases, it is **recommended to run in monolith mode with PostgreSQL databases**.
|
||||
|
||||
Regardless of whether you are running in polylith or monolith mode, each Dendrite component that
|
||||
requires storage has its own database. Both Postgres and SQLite are supported and can be
|
||||
mixed-and-matched across components as needed in the configuration file.
|
||||
requires storage has its own database connections. Both Postgres and SQLite are supported and can
|
||||
be mixed-and-matched across components as needed in the configuration file.
|
||||
|
||||
Be advised that Dendrite is still in development and it's not recommended for
|
||||
use in production environments just yet!
|
||||
|
|
@ -26,13 +28,11 @@ use in production environments just yet!
|
|||
Dendrite requires:
|
||||
|
||||
* Go 1.15 or higher
|
||||
* Postgres 9.6 or higher (if using Postgres databases, not needed for SQLite)
|
||||
* PostgreSQL 12 or higher (if using PostgreSQL databases, not needed for SQLite)
|
||||
|
||||
If you want to run a polylith deployment, you also need:
|
||||
|
||||
* Apache Kafka 0.10.2+
|
||||
|
||||
Please note that Kafka is **not required** for a monolith deployment.
|
||||
* A standalone [NATS Server](https://github.com/nats-io/nats-server) deployment with JetStream enabled
|
||||
|
||||
## Building Dendrite
|
||||
|
||||
|
|
@ -49,40 +49,18 @@ Then build it:
|
|||
./build.sh
|
||||
```
|
||||
|
||||
## Install Kafka (polylith only)
|
||||
## Install NATS Server
|
||||
|
||||
Install and start Kafka (c.f. [scripts/install-local-kafka.sh](scripts/install-local-kafka.sh)):
|
||||
Follow the [NATS Server installation instructions](https://docs.nats.io/running-a-nats-service/introduction/installation) and then [start your NATS deployment](https://docs.nats.io/running-a-nats-service/introduction/running).
|
||||
|
||||
```bash
|
||||
KAFKA_URL=http://archive.apache.org/dist/kafka/2.1.0/kafka_2.11-2.1.0.tgz
|
||||
|
||||
# Only download the kafka if it isn't already downloaded.
|
||||
test -f kafka.tgz || wget $KAFKA_URL -O kafka.tgz
|
||||
# Unpack the kafka over the top of any existing installation
|
||||
mkdir -p kafka && tar xzf kafka.tgz -C kafka --strip-components 1
|
||||
|
||||
# Start the zookeeper running in the background.
|
||||
# By default the zookeeper listens on localhost:2181
|
||||
kafka/bin/zookeeper-server-start.sh -daemon kafka/config/zookeeper.properties
|
||||
|
||||
# Start the kafka server running in the background.
|
||||
# By default the kafka listens on localhost:9092
|
||||
kafka/bin/kafka-server-start.sh -daemon kafka/config/server.properties
|
||||
```
|
||||
|
||||
On macOS, you can use [Homebrew](https://brew.sh/) for easier setup of Kafka:
|
||||
|
||||
```bash
|
||||
brew install kafka
|
||||
brew services start zookeeper
|
||||
brew services start kafka
|
||||
```
|
||||
JetStream must be enabled, either by passing the `-js` flag to `nats-server`,
|
||||
or by specifying the `store_dir` option in the the `jetstream` configuration.
|
||||
|
||||
## Configuration
|
||||
|
||||
### PostgreSQL database setup
|
||||
|
||||
Assuming that PostgreSQL 9.6 (or later) is installed:
|
||||
Assuming that PostgreSQL 12 (or later) is installed:
|
||||
|
||||
* Create role, choosing a new password when prompted:
|
||||
|
||||
|
|
@ -109,7 +87,7 @@ On macOS, omit `sudo -u postgres` from the below commands.
|
|||
* If you want to run each Dendrite component with its own database:
|
||||
|
||||
```bash
|
||||
for i in mediaapi syncapi roomserver signingkeyserver federationsender appservice keyserver userapi_accounts userapi_devices naffka; do
|
||||
for i in mediaapi syncapi roomserver federationapi appservice keyserver userapi_accounts userapi_devices; do
|
||||
sudo -u postgres createdb -O dendrite dendrite_$i
|
||||
done
|
||||
```
|
||||
|
|
@ -163,7 +141,11 @@ Create config file, based on `dendrite-config.yaml`. Call it `dendrite.yaml`. Th
|
|||
* `postgres://dendrite:password@localhost/dendrite_userapi_account?sslmode=disable` to connect to PostgreSQL without SSL/TLS
|
||||
* For SQLite on disk: `file:component.db` or `file:///path/to/component.db`, e.g. `file:userapi_account.db`
|
||||
* Postgres and SQLite can be mixed and matched on different components as desired.
|
||||
* The `use_naffka` option if using Naffka in a monolith deployment
|
||||
* Either one of the following in the `jetstream` configuration section:
|
||||
* The `addresses` option — a list of one or more addresses of an external standalone
|
||||
NATS Server deployment
|
||||
* The `storage_path` — where on the filesystem the built-in NATS server should
|
||||
store durable queues, if using the built-in NATS server
|
||||
|
||||
There are other options which may be useful so review them all. In particular,
|
||||
if you are trying to federate from your Dendrite instance into public rooms
|
||||
|
|
@ -177,11 +159,6 @@ using SQLite, all components **MUST** use their own database file.
|
|||
|
||||
## Starting a monolith server
|
||||
|
||||
It is possible to use Naffka as an in-process replacement to Kafka when using
|
||||
the monolith server. To do this, set `use_naffka: true` in your `dendrite.yaml`
|
||||
configuration and uncomment the relevant Naffka line in the `database` section.
|
||||
Be sure to update the database username and password if needed.
|
||||
|
||||
The monolith server can be started as shown below. By default it listens for
|
||||
HTTP connections on port 8008, so you can configure your Matrix client to use
|
||||
`http://servername:8008` as the server:
|
||||
|
|
@ -197,6 +174,10 @@ for HTTPS connections on port 8448:
|
|||
./bin/dendrite-monolith-server --tls-cert=server.crt --tls-key=server.key
|
||||
```
|
||||
|
||||
If the `jetstream` section of the configuration contains no `addresses` but does
|
||||
contain a `store_dir`, Dendrite will start up a built-in NATS JetStream node
|
||||
automatically, eliminating the need to run a separate NATS server.
|
||||
|
||||
## Starting a polylith deployment
|
||||
|
||||
The following contains scripts which will run all the required processes in order to point a Matrix client at Dendrite.
|
||||
|
|
@ -263,15 +244,6 @@ This is what implements the room DAG. Clients do not talk to this.
|
|||
./bin/dendrite-polylith-multi --config=dendrite.yaml roomserver
|
||||
```
|
||||
|
||||
#### Federation sender
|
||||
|
||||
This sends events from our users to other servers. This is only required if
|
||||
you want to support federation.
|
||||
|
||||
```bash
|
||||
./bin/dendrite-polylith-multi --config=dendrite.yaml federationsender
|
||||
```
|
||||
|
||||
#### Appservice server
|
||||
|
||||
This sends events from the network to [application
|
||||
|
|
@ -291,14 +263,6 @@ This manages end-to-end encryption keys for users.
|
|||
./bin/dendrite-polylith-multi --config=dendrite.yaml keyserver
|
||||
```
|
||||
|
||||
#### Signing key server
|
||||
|
||||
This manages signing keys for servers.
|
||||
|
||||
```bash
|
||||
./bin/dendrite-polylith-multi --config=dendrite.yaml signingkeyserver
|
||||
```
|
||||
|
||||
#### EDU server
|
||||
|
||||
This manages processing EDUs such as typing, send-to-device events and presence. Clients do not talk to
|
||||
|
|
|
|||
|
|
@ -23,8 +23,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/eduserver/input"
|
||||
"github.com/matrix-org/dendrite/eduserver/inthttp"
|
||||
"github.com/matrix-org/dendrite/setup/base"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/kafka"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
)
|
||||
|
||||
|
|
@ -43,16 +42,16 @@ func NewInternalAPI(
|
|||
) api.EDUServerInputAPI {
|
||||
cfg := &base.Cfg.EDUServer
|
||||
|
||||
_, producer := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
|
||||
js, _, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
|
||||
|
||||
return &input.EDUServerInputAPI{
|
||||
Cache: eduCache,
|
||||
UserAPI: userAPI,
|
||||
Producer: producer,
|
||||
OutputTypingEventTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent),
|
||||
OutputSendToDeviceEventTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent),
|
||||
OutputReceiptEventTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputReceiptEvent),
|
||||
OutputKeyChangeEventTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent),
|
||||
JetStream: js,
|
||||
OutputTypingEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputTypingEvent),
|
||||
OutputSendToDeviceEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputSendToDeviceEvent),
|
||||
OutputReceiptEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputReceiptEvent),
|
||||
OutputKeyChangeEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent),
|
||||
ServerName: cfg.Matrix.ServerName,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,12 +21,12 @@ import (
|
|||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/matrix-org/dendrite/eduserver/api"
|
||||
"github.com/matrix-org/dendrite/eduserver/cache"
|
||||
keyapi "github.com/matrix-org/dendrite/keyserver/api"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/nats-io/nats.go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -43,7 +43,7 @@ type EDUServerInputAPI struct {
|
|||
// The kafka topic to output new key change events to
|
||||
OutputKeyChangeEventTopic string
|
||||
// kafka producer
|
||||
Producer sarama.SyncProducer
|
||||
JetStream nats.JetStreamContext
|
||||
// Internal user query API
|
||||
UserAPI userapi.UserInternalAPI
|
||||
// our server name
|
||||
|
|
@ -100,13 +100,11 @@ func (t *EDUServerInputAPI) InputCrossSigningKeyUpdate(
|
|||
"user_id": request.UserID,
|
||||
}).Infof("Producing to topic '%s'", t.OutputKeyChangeEventTopic)
|
||||
|
||||
m := &sarama.ProducerMessage{
|
||||
Topic: string(t.OutputKeyChangeEventTopic),
|
||||
Key: sarama.StringEncoder(request.UserID),
|
||||
Value: sarama.ByteEncoder(eventJSON),
|
||||
}
|
||||
|
||||
_, _, err = t.Producer.SendMessage(m)
|
||||
_, err = t.JetStream.PublishMsg(&nats.Msg{
|
||||
Subject: t.OutputKeyChangeEventTopic,
|
||||
Header: nats.Header{},
|
||||
Data: eventJSON,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -138,13 +136,11 @@ func (t *EDUServerInputAPI) sendTypingEvent(ite *api.InputTypingEvent) error {
|
|||
"typing": ite.Typing,
|
||||
}).Infof("Producing to topic '%s'", t.OutputTypingEventTopic)
|
||||
|
||||
m := &sarama.ProducerMessage{
|
||||
Topic: string(t.OutputTypingEventTopic),
|
||||
Key: sarama.StringEncoder(ite.RoomID),
|
||||
Value: sarama.ByteEncoder(eventJSON),
|
||||
}
|
||||
|
||||
_, _, err = t.Producer.SendMessage(m)
|
||||
_, err = t.JetStream.PublishMsg(&nats.Msg{
|
||||
Subject: t.OutputTypingEventTopic,
|
||||
Header: nats.Header{},
|
||||
Data: eventJSON,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -193,14 +189,10 @@ func (t *EDUServerInputAPI) sendToDeviceEvent(ise *api.InputSendToDeviceEvent) e
|
|||
return err
|
||||
}
|
||||
|
||||
m := &sarama.ProducerMessage{
|
||||
Topic: string(t.OutputSendToDeviceEventTopic),
|
||||
Key: sarama.StringEncoder(ote.UserID),
|
||||
Value: sarama.ByteEncoder(eventJSON),
|
||||
}
|
||||
|
||||
_, _, err = t.Producer.SendMessage(m)
|
||||
if err != nil {
|
||||
if _, err = t.JetStream.PublishMsg(&nats.Msg{
|
||||
Subject: t.OutputSendToDeviceEventTopic,
|
||||
Data: eventJSON,
|
||||
}); err != nil {
|
||||
logrus.WithError(err).Error("sendToDevice failed t.Producer.SendMessage")
|
||||
return err
|
||||
}
|
||||
|
|
@ -228,11 +220,10 @@ func (t *EDUServerInputAPI) InputReceiptEvent(
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m := &sarama.ProducerMessage{
|
||||
Topic: t.OutputReceiptEventTopic,
|
||||
Key: sarama.StringEncoder(request.InputReceiptEvent.RoomID + ":" + request.InputReceiptEvent.UserID),
|
||||
Value: sarama.ByteEncoder(js),
|
||||
}
|
||||
_, _, err = t.Producer.SendMessage(m)
|
||||
|
||||
_, err = t.JetStream.PublishMsg(&nats.Msg{
|
||||
Subject: t.OutputReceiptEventTopic,
|
||||
Data: js,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,233 +17,238 @@ package consumers
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/matrix-org/dendrite/eduserver/api"
|
||||
"github.com/matrix-org/dendrite/federationapi/queue"
|
||||
"github.com/matrix-org/dendrite/federationapi/storage"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
"github.com/nats-io/nats.go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OutputEDUConsumer consumes events that originate in EDU server.
|
||||
type OutputEDUConsumer struct {
|
||||
typingConsumer *internal.ContinualConsumer
|
||||
sendToDeviceConsumer *internal.ContinualConsumer
|
||||
receiptConsumer *internal.ContinualConsumer
|
||||
db storage.Database
|
||||
queues *queue.OutgoingQueues
|
||||
ServerName gomatrixserverlib.ServerName
|
||||
TypingTopic string
|
||||
SendToDeviceTopic string
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable nats.SubOpt
|
||||
db storage.Database
|
||||
queues *queue.OutgoingQueues
|
||||
ServerName gomatrixserverlib.ServerName
|
||||
typingTopic string
|
||||
sendToDeviceTopic string
|
||||
receiptTopic string
|
||||
}
|
||||
|
||||
// NewOutputEDUConsumer creates a new OutputEDUConsumer. Call Start() to begin consuming from EDU servers.
|
||||
func NewOutputEDUConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.FederationAPI,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
js nats.JetStreamContext,
|
||||
queues *queue.OutgoingQueues,
|
||||
store storage.Database,
|
||||
) *OutputEDUConsumer {
|
||||
c := &OutputEDUConsumer{
|
||||
typingConsumer: &internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "eduserver/typing",
|
||||
Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: store,
|
||||
},
|
||||
sendToDeviceConsumer: &internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "eduserver/sendtodevice",
|
||||
Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: store,
|
||||
},
|
||||
receiptConsumer: &internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "eduserver/receipt",
|
||||
Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputReceiptEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: store,
|
||||
},
|
||||
return &OutputEDUConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
queues: queues,
|
||||
db: store,
|
||||
ServerName: cfg.Matrix.ServerName,
|
||||
TypingTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent),
|
||||
SendToDeviceTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent),
|
||||
durable: cfg.Matrix.JetStream.Durable("FederationAPIEDUServerConsumer"),
|
||||
typingTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputTypingEvent),
|
||||
sendToDeviceTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputSendToDeviceEvent),
|
||||
receiptTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputReceiptEvent),
|
||||
}
|
||||
c.typingConsumer.ProcessMessage = c.onTypingEvent
|
||||
c.sendToDeviceConsumer.ProcessMessage = c.onSendToDeviceEvent
|
||||
c.receiptConsumer.ProcessMessage = c.onReceiptEvent
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Start consuming from EDU servers
|
||||
func (t *OutputEDUConsumer) Start() error {
|
||||
if err := t.typingConsumer.Start(); err != nil {
|
||||
return fmt.Errorf("t.typingConsumer.Start: %w", err)
|
||||
if _, err := t.jetstream.Subscribe(t.typingTopic, t.onTypingEvent, t.durable); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.sendToDeviceConsumer.Start(); err != nil {
|
||||
return fmt.Errorf("t.sendToDeviceConsumer.Start: %w", err)
|
||||
if _, err := t.jetstream.Subscribe(t.sendToDeviceTopic, t.onSendToDeviceEvent, t.durable); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.receiptConsumer.Start(); err != nil {
|
||||
return fmt.Errorf("t.receiptConsumer.Start: %w", err)
|
||||
if _, err := t.jetstream.Subscribe(t.receiptTopic, t.onReceiptEvent, t.durable); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// onSendToDeviceEvent is called in response to a message received on the
|
||||
// send-to-device events topic from the EDU server.
|
||||
func (t *OutputEDUConsumer) onSendToDeviceEvent(msg *sarama.ConsumerMessage) error {
|
||||
func (t *OutputEDUConsumer) onSendToDeviceEvent(msg *nats.Msg) {
|
||||
// Extract the send-to-device event from msg.
|
||||
var ote api.OutputSendToDeviceEvent
|
||||
if err := json.Unmarshal(msg.Value, &ote); err != nil {
|
||||
log.WithError(err).Errorf("eduserver output log: message parse failed (expected send-to-device)")
|
||||
return nil
|
||||
}
|
||||
jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
|
||||
var ote api.OutputSendToDeviceEvent
|
||||
if err := json.Unmarshal(msg.Data, &ote); err != nil {
|
||||
log.WithError(err).Errorf("eduserver output log: message parse failed (expected send-to-device)")
|
||||
return true
|
||||
}
|
||||
|
||||
// only send send-to-device events which originated from us
|
||||
_, originServerName, err := gomatrixserverlib.SplitID('@', ote.Sender)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("user_id", ote.Sender).Error("Failed to extract domain from send-to-device sender")
|
||||
return nil
|
||||
}
|
||||
if originServerName != t.ServerName {
|
||||
log.WithField("other_server", originServerName).Info("Suppressing send-to-device: originated elsewhere")
|
||||
return nil
|
||||
}
|
||||
// only send send-to-device events which originated from us
|
||||
_, originServerName, err := gomatrixserverlib.SplitID('@', ote.Sender)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("user_id", ote.Sender).Error("Failed to extract domain from send-to-device sender")
|
||||
return true
|
||||
}
|
||||
if originServerName != t.ServerName {
|
||||
log.WithField("other_server", originServerName).Info("Suppressing send-to-device: originated elsewhere")
|
||||
return true
|
||||
}
|
||||
|
||||
_, destServerName, err := gomatrixserverlib.SplitID('@', ote.UserID)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("user_id", ote.UserID).Error("Failed to extract domain from send-to-device destination")
|
||||
return nil
|
||||
}
|
||||
_, destServerName, err := gomatrixserverlib.SplitID('@', ote.UserID)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("user_id", ote.UserID).Error("Failed to extract domain from send-to-device destination")
|
||||
return true
|
||||
}
|
||||
|
||||
// Pack the EDU and marshal it
|
||||
edu := &gomatrixserverlib.EDU{
|
||||
Type: gomatrixserverlib.MDirectToDevice,
|
||||
Origin: string(t.ServerName),
|
||||
}
|
||||
tdm := gomatrixserverlib.ToDeviceMessage{
|
||||
Sender: ote.Sender,
|
||||
Type: ote.Type,
|
||||
MessageID: util.RandomString(32),
|
||||
Messages: map[string]map[string]json.RawMessage{
|
||||
ote.UserID: {
|
||||
ote.DeviceID: ote.Content,
|
||||
// Pack the EDU and marshal it
|
||||
edu := &gomatrixserverlib.EDU{
|
||||
Type: gomatrixserverlib.MDirectToDevice,
|
||||
Origin: string(t.ServerName),
|
||||
}
|
||||
tdm := gomatrixserverlib.ToDeviceMessage{
|
||||
Sender: ote.Sender,
|
||||
Type: ote.Type,
|
||||
MessageID: util.RandomString(32),
|
||||
Messages: map[string]map[string]json.RawMessage{
|
||||
ote.UserID: {
|
||||
ote.DeviceID: ote.Content,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if edu.Content, err = json.Marshal(tdm); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if edu.Content, err = json.Marshal(tdm); err != nil {
|
||||
log.WithError(err).Error("failed to marshal EDU JSON")
|
||||
return true
|
||||
}
|
||||
|
||||
log.Infof("Sending send-to-device message into %q destination queue", destServerName)
|
||||
return t.queues.SendEDU(edu, t.ServerName, []gomatrixserverlib.ServerName{destServerName})
|
||||
log.Infof("Sending send-to-device message into %q destination queue", destServerName)
|
||||
if err := t.queues.SendEDU(edu, t.ServerName, []gomatrixserverlib.ServerName{destServerName}); err != nil {
|
||||
log.WithError(err).Error("failed to send EDU")
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// onTypingEvent is called in response to a message received on the typing
|
||||
// events topic from the EDU server.
|
||||
func (t *OutputEDUConsumer) onTypingEvent(msg *sarama.ConsumerMessage) error {
|
||||
// Extract the typing event from msg.
|
||||
var ote api.OutputTypingEvent
|
||||
if err := json.Unmarshal(msg.Value, &ote); err != nil {
|
||||
// Skip this msg but continue processing messages.
|
||||
log.WithError(err).Errorf("eduserver output log: message parse failed (expected typing)")
|
||||
return nil
|
||||
}
|
||||
func (t *OutputEDUConsumer) onTypingEvent(msg *nats.Msg) {
|
||||
jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
|
||||
// Extract the typing event from msg.
|
||||
var ote api.OutputTypingEvent
|
||||
if err := json.Unmarshal(msg.Data, &ote); err != nil {
|
||||
// Skip this msg but continue processing messages.
|
||||
log.WithError(err).Errorf("eduserver output log: message parse failed (expected typing)")
|
||||
_ = msg.Ack()
|
||||
return true
|
||||
}
|
||||
|
||||
// only send typing events which originated from us
|
||||
_, typingServerName, err := gomatrixserverlib.SplitID('@', ote.Event.UserID)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("user_id", ote.Event.UserID).Error("Failed to extract domain from typing sender")
|
||||
return nil
|
||||
}
|
||||
if typingServerName != t.ServerName {
|
||||
log.WithField("other_server", typingServerName).Info("Suppressing typing notif: originated elsewhere")
|
||||
return nil
|
||||
}
|
||||
// only send typing events which originated from us
|
||||
_, typingServerName, err := gomatrixserverlib.SplitID('@', ote.Event.UserID)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("user_id", ote.Event.UserID).Error("Failed to extract domain from typing sender")
|
||||
_ = msg.Ack()
|
||||
return true
|
||||
}
|
||||
if typingServerName != t.ServerName {
|
||||
return true
|
||||
}
|
||||
|
||||
joined, err := t.db.GetJoinedHosts(context.TODO(), ote.Event.RoomID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
joined, err := t.db.GetJoinedHosts(t.ctx, ote.Event.RoomID)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("room_id", ote.Event.RoomID).Error("failed to get joined hosts for room")
|
||||
return false
|
||||
}
|
||||
|
||||
names := make([]gomatrixserverlib.ServerName, len(joined))
|
||||
for i := range joined {
|
||||
names[i] = joined[i].ServerName
|
||||
}
|
||||
names := make([]gomatrixserverlib.ServerName, len(joined))
|
||||
for i := range joined {
|
||||
names[i] = joined[i].ServerName
|
||||
}
|
||||
|
||||
edu := &gomatrixserverlib.EDU{Type: ote.Event.Type}
|
||||
if edu.Content, err = json.Marshal(map[string]interface{}{
|
||||
"room_id": ote.Event.RoomID,
|
||||
"user_id": ote.Event.UserID,
|
||||
"typing": ote.Event.Typing,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
edu := &gomatrixserverlib.EDU{Type: ote.Event.Type}
|
||||
if edu.Content, err = json.Marshal(map[string]interface{}{
|
||||
"room_id": ote.Event.RoomID,
|
||||
"user_id": ote.Event.UserID,
|
||||
"typing": ote.Event.Typing,
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("failed to marshal EDU JSON")
|
||||
return true
|
||||
}
|
||||
|
||||
return t.queues.SendEDU(edu, t.ServerName, names)
|
||||
if err := t.queues.SendEDU(edu, t.ServerName, names); err != nil {
|
||||
log.WithError(err).Error("failed to send EDU")
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// onReceiptEvent is called in response to a message received on the receipt
|
||||
// events topic from the EDU server.
|
||||
func (t *OutputEDUConsumer) onReceiptEvent(msg *sarama.ConsumerMessage) error {
|
||||
// Extract the typing event from msg.
|
||||
var receipt api.OutputReceiptEvent
|
||||
if err := json.Unmarshal(msg.Value, &receipt); err != nil {
|
||||
// Skip this msg but continue processing messages.
|
||||
log.WithError(err).Errorf("eduserver output log: message parse failed (expected receipt)")
|
||||
return nil
|
||||
}
|
||||
func (t *OutputEDUConsumer) onReceiptEvent(msg *nats.Msg) {
|
||||
jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
|
||||
// Extract the typing event from msg.
|
||||
var receipt api.OutputReceiptEvent
|
||||
if err := json.Unmarshal(msg.Data, &receipt); err != nil {
|
||||
// Skip this msg but continue processing messages.
|
||||
log.WithError(err).Errorf("eduserver output log: message parse failed (expected receipt)")
|
||||
return true
|
||||
}
|
||||
|
||||
// only send receipt events which originated from us
|
||||
_, receiptServerName, err := gomatrixserverlib.SplitID('@', receipt.UserID)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("user_id", receipt.UserID).Error("Failed to extract domain from receipt sender")
|
||||
return nil
|
||||
}
|
||||
if receiptServerName != t.ServerName {
|
||||
return nil // don't log, very spammy as it logs for each remote receipt
|
||||
}
|
||||
// only send receipt events which originated from us
|
||||
_, receiptServerName, err := gomatrixserverlib.SplitID('@', receipt.UserID)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("user_id", receipt.UserID).Error("failed to extract domain from receipt sender")
|
||||
return true
|
||||
}
|
||||
if receiptServerName != t.ServerName {
|
||||
return true
|
||||
}
|
||||
|
||||
joined, err := t.db.GetJoinedHosts(context.TODO(), receipt.RoomID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
joined, err := t.db.GetJoinedHosts(t.ctx, receipt.RoomID)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("room_id", receipt.RoomID).Error("failed to get joined hosts for room")
|
||||
return false
|
||||
}
|
||||
|
||||
names := make([]gomatrixserverlib.ServerName, len(joined))
|
||||
for i := range joined {
|
||||
names[i] = joined[i].ServerName
|
||||
}
|
||||
names := make([]gomatrixserverlib.ServerName, len(joined))
|
||||
for i := range joined {
|
||||
names[i] = joined[i].ServerName
|
||||
}
|
||||
|
||||
content := map[string]api.FederationReceiptMRead{}
|
||||
content[receipt.RoomID] = api.FederationReceiptMRead{
|
||||
User: map[string]api.FederationReceiptData{
|
||||
receipt.UserID: {
|
||||
Data: api.ReceiptTS{
|
||||
TS: receipt.Timestamp,
|
||||
content := map[string]api.FederationReceiptMRead{}
|
||||
content[receipt.RoomID] = api.FederationReceiptMRead{
|
||||
User: map[string]api.FederationReceiptData{
|
||||
receipt.UserID: {
|
||||
Data: api.ReceiptTS{
|
||||
TS: receipt.Timestamp,
|
||||
},
|
||||
EventIDs: []string{receipt.EventID},
|
||||
},
|
||||
EventIDs: []string{receipt.EventID},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
edu := &gomatrixserverlib.EDU{
|
||||
Type: gomatrixserverlib.MReceipt,
|
||||
Origin: string(t.ServerName),
|
||||
}
|
||||
if edu.Content, err = json.Marshal(content); err != nil {
|
||||
return err
|
||||
}
|
||||
edu := &gomatrixserverlib.EDU{
|
||||
Type: gomatrixserverlib.MReceipt,
|
||||
Origin: string(t.ServerName),
|
||||
}
|
||||
if edu.Content, err = json.Marshal(content); err != nil {
|
||||
log.WithError(err).Error("failed to marshal EDU JSON")
|
||||
return true
|
||||
}
|
||||
|
||||
return t.queues.SendEDU(edu, t.ServerName, names)
|
||||
if err := t.queues.SendEDU(edu, t.ServerName, names); err != nil {
|
||||
log.WithError(err).Error("failed to send EDU")
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/keyserver/api"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -34,6 +35,7 @@ import (
|
|||
|
||||
// KeyChangeConsumer consumes events that originate in key server.
|
||||
type KeyChangeConsumer struct {
|
||||
ctx context.Context
|
||||
consumer *internal.ContinualConsumer
|
||||
db storage.Database
|
||||
queues *queue.OutgoingQueues
|
||||
|
|
@ -51,10 +53,11 @@ func NewKeyChangeConsumer(
|
|||
rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
) *KeyChangeConsumer {
|
||||
c := &KeyChangeConsumer{
|
||||
ctx: process.Context(),
|
||||
consumer: &internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "federationapi/keychange",
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)),
|
||||
Topic: string(cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent)),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: store,
|
||||
},
|
||||
|
|
@ -100,6 +103,9 @@ func (t *KeyChangeConsumer) onMessage(msg *sarama.ConsumerMessage) error {
|
|||
}
|
||||
|
||||
func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) error {
|
||||
if m.DeviceKeys == nil {
|
||||
return nil
|
||||
}
|
||||
logger := logrus.WithField("user_id", m.UserID)
|
||||
|
||||
// only send key change events which originated from us
|
||||
|
|
@ -113,7 +119,7 @@ func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) error {
|
|||
}
|
||||
|
||||
var queryRes roomserverAPI.QueryRoomsForUserResponse
|
||||
err = t.rsAPI.QueryRoomsForUser(context.Background(), &roomserverAPI.QueryRoomsForUserRequest{
|
||||
err = t.rsAPI.QueryRoomsForUser(t.ctx, &roomserverAPI.QueryRoomsForUserRequest{
|
||||
UserID: m.UserID,
|
||||
WantMembership: "join",
|
||||
}, &queryRes)
|
||||
|
|
@ -122,7 +128,7 @@ func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) error {
|
|||
return nil
|
||||
}
|
||||
// send this key change to all servers who share rooms with this user.
|
||||
destinations, err := t.db.GetJoinedHostsForRooms(context.Background(), queryRes.RoomIDs)
|
||||
destinations, err := t.db.GetJoinedHostsForRooms(t.ctx, queryRes.RoomIDs)
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("failed to calculate joined hosts for rooms user is in")
|
||||
return nil
|
||||
|
|
@ -165,7 +171,7 @@ func (t *KeyChangeConsumer) onCrossSigningMessage(m api.DeviceMessage) error {
|
|||
logger := logrus.WithField("user_id", output.UserID)
|
||||
|
||||
var queryRes roomserverAPI.QueryRoomsForUserResponse
|
||||
err = t.rsAPI.QueryRoomsForUser(context.Background(), &roomserverAPI.QueryRoomsForUserRequest{
|
||||
err = t.rsAPI.QueryRoomsForUser(t.ctx, &roomserverAPI.QueryRoomsForUserRequest{
|
||||
UserID: output.UserID,
|
||||
WantMembership: "join",
|
||||
}, &queryRes)
|
||||
|
|
@ -174,7 +180,7 @@ func (t *KeyChangeConsumer) onCrossSigningMessage(m api.DeviceMessage) error {
|
|||
return nil
|
||||
}
|
||||
// send this key change to all servers who share rooms with this user.
|
||||
destinations, err := t.db.GetJoinedHostsForRooms(context.Background(), queryRes.RoomIDs)
|
||||
destinations, err := t.db.GetJoinedHostsForRooms(t.ctx, queryRes.RoomIDs)
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("fedsender key change consumer: failed to calculate joined hosts for rooms user is in")
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -19,117 +19,117 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/matrix-org/dendrite/federationapi/queue"
|
||||
"github.com/matrix-org/dendrite/federationapi/storage"
|
||||
"github.com/matrix-org/dendrite/federationapi/types"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/nats-io/nats.go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OutputRoomEventConsumer consumes events that originated in the room server.
|
||||
type OutputRoomEventConsumer struct {
|
||||
cfg *config.FederationAPI
|
||||
rsAPI api.RoomserverInternalAPI
|
||||
rsConsumer *internal.ContinualConsumer
|
||||
db storage.Database
|
||||
queues *queue.OutgoingQueues
|
||||
ctx context.Context
|
||||
cfg *config.FederationAPI
|
||||
rsAPI api.RoomserverInternalAPI
|
||||
jetstream nats.JetStreamContext
|
||||
durable nats.SubOpt
|
||||
db storage.Database
|
||||
queues *queue.OutgoingQueues
|
||||
topic string
|
||||
}
|
||||
|
||||
// NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call Start() to begin consuming from room servers.
|
||||
func NewOutputRoomEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.FederationAPI,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
js nats.JetStreamContext,
|
||||
queues *queue.OutgoingQueues,
|
||||
store storage.Database,
|
||||
rsAPI api.RoomserverInternalAPI,
|
||||
) *OutputRoomEventConsumer {
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "federationapi/roomserver",
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: store,
|
||||
return &OutputRoomEventConsumer{
|
||||
ctx: process.Context(),
|
||||
cfg: cfg,
|
||||
jetstream: js,
|
||||
db: store,
|
||||
queues: queues,
|
||||
rsAPI: rsAPI,
|
||||
durable: cfg.Matrix.JetStream.Durable("FederationAPIRoomServerConsumer"),
|
||||
topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputRoomEvent),
|
||||
}
|
||||
s := &OutputRoomEventConsumer{
|
||||
cfg: cfg,
|
||||
rsConsumer: &consumer,
|
||||
db: store,
|
||||
queues: queues,
|
||||
rsAPI: rsAPI,
|
||||
}
|
||||
consumer.ProcessMessage = s.onMessage
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Start consuming from room servers
|
||||
func (s *OutputRoomEventConsumer) Start() error {
|
||||
return s.rsConsumer.Start()
|
||||
_, err := s.jetstream.Subscribe(s.topic, s.onMessage, s.durable)
|
||||
return err
|
||||
}
|
||||
|
||||
// onMessage is called when the federation server receives a new event from the room server output log.
|
||||
// It is unsafe to call this with messages for the same room in multiple gorountines
|
||||
// because updates it will likely fail with a types.EventIDMismatchError when it
|
||||
// realises that it cannot update the room state using the deltas.
|
||||
func (s *OutputRoomEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
|
||||
// Parse out the event JSON
|
||||
var output api.OutputEvent
|
||||
if err := json.Unmarshal(msg.Value, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("roomserver output log: message parse failure")
|
||||
return nil
|
||||
}
|
||||
|
||||
switch output.Type {
|
||||
case api.OutputTypeNewRoomEvent:
|
||||
ev := output.NewRoomEvent.Event
|
||||
|
||||
if output.NewRoomEvent.RewritesState {
|
||||
if err := s.db.PurgeRoomState(context.TODO(), ev.RoomID()); err != nil {
|
||||
return fmt.Errorf("s.db.PurgeRoom: %w", err)
|
||||
}
|
||||
func (s *OutputRoomEventConsumer) onMessage(msg *nats.Msg) {
|
||||
jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
|
||||
// Parse out the event JSON
|
||||
var output api.OutputEvent
|
||||
if err := json.Unmarshal(msg.Data, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("roomserver output log: message parse failure")
|
||||
return true
|
||||
}
|
||||
|
||||
if err := s.processMessage(*output.NewRoomEvent); err != nil {
|
||||
switch err.(type) {
|
||||
case *queue.ErrorFederationDisabled:
|
||||
log.WithField("error", output.Type).Info(
|
||||
err.Error(),
|
||||
)
|
||||
default:
|
||||
// panic rather than continue with an inconsistent database
|
||||
switch output.Type {
|
||||
case api.OutputTypeNewRoomEvent:
|
||||
ev := output.NewRoomEvent.Event
|
||||
|
||||
if output.NewRoomEvent.RewritesState {
|
||||
if err := s.db.PurgeRoomState(s.ctx, ev.RoomID()); err != nil {
|
||||
log.WithError(err).Errorf("roomserver output log: purge room state failure")
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.processMessage(*output.NewRoomEvent); err != nil {
|
||||
switch err.(type) {
|
||||
case *queue.ErrorFederationDisabled:
|
||||
log.WithField("error", output.Type).Info(
|
||||
err.Error(),
|
||||
)
|
||||
default:
|
||||
// panic rather than continue with an inconsistent database
|
||||
log.WithFields(log.Fields{
|
||||
"event_id": ev.EventID(),
|
||||
"event": string(ev.JSON()),
|
||||
"add": output.NewRoomEvent.AddsStateEventIDs,
|
||||
"del": output.NewRoomEvent.RemovesStateEventIDs,
|
||||
log.ErrorKey: err,
|
||||
}).Panicf("roomserver output log: write room event failure")
|
||||
}
|
||||
}
|
||||
|
||||
case api.OutputTypeNewInboundPeek:
|
||||
if err := s.processInboundPeek(*output.NewInboundPeek); err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"event_id": ev.EventID(),
|
||||
"event": string(ev.JSON()),
|
||||
"add": output.NewRoomEvent.AddsStateEventIDs,
|
||||
"del": output.NewRoomEvent.RemovesStateEventIDs,
|
||||
"event": output.NewInboundPeek,
|
||||
log.ErrorKey: err,
|
||||
}).Panicf("roomserver output log: write room event failure")
|
||||
}).Panicf("roomserver output log: remote peek event failure")
|
||||
return false
|
||||
}
|
||||
return nil
|
||||
}
|
||||
case api.OutputTypeNewInboundPeek:
|
||||
if err := s.processInboundPeek(*output.NewInboundPeek); err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"event": output.NewInboundPeek,
|
||||
log.ErrorKey: err,
|
||||
}).Panicf("roomserver output log: remote peek event failure")
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
log.WithField("type", output.Type).Debug(
|
||||
"roomserver output log: ignoring unknown output type",
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
default:
|
||||
log.WithField("type", output.Type).Debug(
|
||||
"roomserver output log: ignoring unknown output type",
|
||||
)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// processInboundPeek starts tracking a new federated inbound peek (replacing the existing one if any)
|
||||
|
|
@ -146,7 +146,7 @@ func (s *OutputRoomEventConsumer) processInboundPeek(orp api.OutputNewInboundPee
|
|||
//
|
||||
// This is making the tests flakey.
|
||||
|
||||
return s.db.AddInboundPeek(context.TODO(), orp.ServerName, orp.RoomID, orp.PeekID, orp.RenewalInterval)
|
||||
return s.db.AddInboundPeek(s.ctx, orp.ServerName, orp.RoomID, orp.PeekID, orp.RenewalInterval)
|
||||
}
|
||||
|
||||
// processMessage updates the list of currently joined hosts in the room
|
||||
|
|
@ -162,7 +162,7 @@ func (s *OutputRoomEventConsumer) processMessage(ore api.OutputNewRoomEvent) err
|
|||
// TODO(#290): handle EventIDMismatchError and recover the current state by
|
||||
// talking to the roomserver
|
||||
oldJoinedHosts, err := s.db.UpdateRoom(
|
||||
context.TODO(),
|
||||
s.ctx,
|
||||
ore.Event.RoomID(),
|
||||
ore.LastSentEventID,
|
||||
ore.Event.EventID(),
|
||||
|
|
@ -255,7 +255,7 @@ func (s *OutputRoomEventConsumer) joinedHostsAtEvent(
|
|||
}
|
||||
|
||||
// handle peeking hosts
|
||||
inboundPeeks, err := s.db.GetInboundPeeks(context.TODO(), ore.Event.Event.RoomID())
|
||||
inboundPeeks, err := s.db.GetInboundPeeks(s.ctx, ore.Event.Event.RoomID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -373,7 +373,7 @@ func (s *OutputRoomEventConsumer) lookupStateEvents(
|
|||
// from the roomserver using the query API.
|
||||
eventReq := api.QueryEventsByIDRequest{EventIDs: missing}
|
||||
var eventResp api.QueryEventsByIDResponse
|
||||
if err := s.rsAPI.QueryEventsByID(context.TODO(), &eventReq, &eventResp); err != nil {
|
||||
if err := s.rsAPI.QueryEventsByID(s.ctx, &eventReq, &eventResp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ import (
|
|||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/base"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/kafka"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
|
|
@ -92,7 +92,7 @@ func NewInternalAPI(
|
|||
FailuresUntilBlacklist: cfg.FederationMaxRetries,
|
||||
}
|
||||
|
||||
consumer, _ := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
|
||||
js, consumer, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
|
||||
|
||||
queues := queue.NewOutgoingQueues(
|
||||
federationDB, base.ProcessContext,
|
||||
|
|
@ -106,7 +106,7 @@ func NewInternalAPI(
|
|||
)
|
||||
|
||||
rsConsumer := consumers.NewOutputRoomEventConsumer(
|
||||
base.ProcessContext, cfg, consumer, queues,
|
||||
base.ProcessContext, cfg, js, queues,
|
||||
federationDB, rsAPI,
|
||||
)
|
||||
if err = rsConsumer.Start(); err != nil {
|
||||
|
|
@ -114,7 +114,7 @@ func NewInternalAPI(
|
|||
}
|
||||
|
||||
tsConsumer := consumers.NewOutputEDUConsumer(
|
||||
base.ProcessContext, cfg, consumer, queues, federationDB,
|
||||
base.ProcessContext, cfg, js, queues, federationDB,
|
||||
)
|
||||
if err := tsConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panic("failed to start typing server consumer")
|
||||
|
|
|
|||
|
|
@ -68,13 +68,22 @@ func TestMain(m *testing.M) {
|
|||
panic("can't create cache: " + err.Error())
|
||||
}
|
||||
|
||||
// Create a temporary directory for JetStream.
|
||||
d, err := ioutil.TempDir("./", "jetstream*")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(d)
|
||||
|
||||
// Draw up just enough Dendrite config for the server key
|
||||
// API to work.
|
||||
cfg := &config.Dendrite{}
|
||||
cfg.Defaults(true)
|
||||
cfg.Global.ServerName = gomatrixserverlib.ServerName(s.name)
|
||||
cfg.Global.PrivateKey = testPriv
|
||||
cfg.Global.Kafka.UseNaffka = true
|
||||
cfg.Global.JetStream.InMemory = true
|
||||
cfg.Global.JetStream.TopicPrefix = string(s.name[:1])
|
||||
cfg.Global.JetStream.StoragePath = config.Path(d)
|
||||
cfg.Global.KeyID = serverKeyID
|
||||
cfg.Global.KeyValidityPeriod = s.validity
|
||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource("file::memory:")
|
||||
|
|
|
|||
|
|
@ -23,10 +23,9 @@ func TestRoomsV3URLEscapeDoNot404(t *testing.T) {
|
|||
cfg.Global.KeyID = gomatrixserverlib.KeyID("ed25519:auto")
|
||||
cfg.Global.ServerName = gomatrixserverlib.ServerName("localhost")
|
||||
cfg.Global.PrivateKey = privKey
|
||||
cfg.Global.Kafka.UseNaffka = true
|
||||
cfg.Global.Kafka.Database.ConnectionString = config.DataSource("file::memory:")
|
||||
cfg.Global.JetStream.InMemory = true
|
||||
cfg.FederationAPI.Database.ConnectionString = config.DataSource("file::memory:")
|
||||
base := base.NewBaseDendrite(cfg, "Monolith", base.NoCacheMetrics)
|
||||
base := base.NewBaseDendrite(cfg, "Monolith")
|
||||
keyRing := &test.NopJSONVerifier{}
|
||||
fsAPI := base.FederationAPIHTTPClient()
|
||||
// TODO: This is pretty fragile, as if anything calls anything on these nils this test will break.
|
||||
|
|
|
|||
|
|
@ -249,7 +249,7 @@ func (r *FederationInternalAPI) performJoinUsingServer(
|
|||
roomserverAPI.KindNew,
|
||||
respState,
|
||||
event.Headered(respMakeJoin.RoomVersion),
|
||||
nil,
|
||||
nil, false,
|
||||
); err != nil {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"room_id": roomID,
|
||||
|
|
@ -430,7 +430,7 @@ func (r *FederationInternalAPI) performOutboundPeekUsingServer(
|
|||
roomserverAPI.KindNew,
|
||||
&respState,
|
||||
respPeek.LatestEvent.Headered(respPeek.RoomVersion),
|
||||
nil,
|
||||
nil, false,
|
||||
); err != nil {
|
||||
return fmt.Errorf("r.producer.SendEventWithState: %w", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ import (
|
|||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
|
|
@ -29,15 +30,42 @@ func GetEventAuth(
|
|||
roomID string,
|
||||
eventID string,
|
||||
) util.JSONResponse {
|
||||
// TODO: Optimisation: we shouldn't be querying all the room state
|
||||
// that is in state.StateEvents - we just ignore it.
|
||||
state, err := getState(ctx, request, rsAPI, roomID, eventID)
|
||||
event, resErr := fetchEvent(ctx, rsAPI, eventID)
|
||||
if resErr != nil {
|
||||
return *resErr
|
||||
}
|
||||
|
||||
if event.RoomID() != roomID {
|
||||
return util.JSONResponse{Code: http.StatusNotFound, JSON: jsonerror.NotFound("event does not belong to this room")}
|
||||
}
|
||||
resErr = allowedToSeeEvent(ctx, request.Origin(), rsAPI, eventID)
|
||||
if resErr != nil {
|
||||
return *resErr
|
||||
}
|
||||
|
||||
var response api.QueryStateAndAuthChainResponse
|
||||
err := rsAPI.QueryStateAndAuthChain(
|
||||
ctx,
|
||||
&api.QueryStateAndAuthChainRequest{
|
||||
RoomID: roomID,
|
||||
PrevEventIDs: []string{eventID},
|
||||
AuthEventIDs: event.AuthEventIDs(),
|
||||
OnlyFetchAuthChain: true,
|
||||
},
|
||||
&response,
|
||||
)
|
||||
if err != nil {
|
||||
return *err
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
|
||||
if !response.RoomExists {
|
||||
return util.JSONResponse{Code: http.StatusNotFound, JSON: nil}
|
||||
}
|
||||
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: gomatrixserverlib.RespEventAuth{AuthEvents: state.AuthEvents},
|
||||
JSON: gomatrixserverlib.RespEventAuth{
|
||||
AuthEvents: gomatrixserverlib.UnwrapEventHeaders(response.AuthChainEvents),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -194,6 +194,12 @@ func SendJoin(
|
|||
JSON: jsonerror.BadJSON("No state key was provided in the join event."),
|
||||
}
|
||||
}
|
||||
if !event.StateKeyEquals(event.Sender()) {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.BadJSON("Event state key must match the event sender."),
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the room ID is correct.
|
||||
if event.RoomID() != roomID {
|
||||
|
|
|
|||
|
|
@ -175,10 +175,16 @@ func SendLeave(
|
|||
}
|
||||
}
|
||||
|
||||
if event.StateKey() == nil {
|
||||
if event.StateKey() == nil || event.StateKeyEquals("") {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.InvalidArgumentValue("missing state_key"),
|
||||
JSON: jsonerror.BadJSON("No state key was provided in the leave event."),
|
||||
}
|
||||
}
|
||||
if !event.StateKeyEquals(event.Sender()) {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.BadJSON("Event state key must match the event sender."),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -371,7 +371,7 @@ func (t *txnReq) processTransaction(ctx context.Context) (*gomatrixserverlib.Res
|
|||
for _, task := range tasks {
|
||||
if task.err != nil {
|
||||
results[task.event.EventID()] = gomatrixserverlib.PDUResult{
|
||||
Error: task.err.Error(),
|
||||
// Error: task.err.Error(), TODO: this upsets tests if uncommented
|
||||
}
|
||||
} else {
|
||||
results[task.event.EventID()] = gomatrixserverlib.PDUResult{}
|
||||
|
|
@ -692,6 +692,7 @@ func (t *txnReq) processEvent(ctx context.Context, e *gomatrixserverlib.Event) e
|
|||
},
|
||||
api.DoNotSendToOtherServers,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
|
|
@ -734,6 +735,7 @@ withNextEvent:
|
|||
SendAsServer: api.DoNotSendToOtherServers,
|
||||
},
|
||||
},
|
||||
false,
|
||||
); err != nil {
|
||||
return fmt.Errorf("api.SendEvents: %w", err)
|
||||
}
|
||||
|
|
@ -882,6 +884,9 @@ func (t *txnReq) processEventWithMissingState(
|
|||
resolvedState,
|
||||
backwardsExtremity.Headered(roomVersion),
|
||||
hadEvents,
|
||||
// Send the events to the roomserver asynchronously, so they will be
|
||||
// processed when the roomserver is able, without blocking here.
|
||||
true,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("api.SendEventWithState: %w", err)
|
||||
|
|
@ -902,6 +907,9 @@ func (t *txnReq) processEventWithMissingState(
|
|||
append(headeredNewEvents, e.Headered(roomVersion)),
|
||||
api.DoNotSendToOtherServers,
|
||||
nil,
|
||||
// Send the events to the roomserver asynchronously, so they will be
|
||||
// processed when the roomserver is able, without blocking here.
|
||||
true,
|
||||
); err != nil {
|
||||
return fmt.Errorf("api.SendEvents: %w", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ func CreateInvitesFrom3PIDInvites(
|
|||
}
|
||||
|
||||
// Send all the events
|
||||
if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, evs, cfg.Matrix.ServerName, nil); err != nil {
|
||||
if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, evs, cfg.Matrix.ServerName, nil, false); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
|
@ -180,6 +180,7 @@ func ExchangeThirdPartyInvite(
|
|||
},
|
||||
cfg.Matrix.ServerName,
|
||||
nil,
|
||||
false,
|
||||
); err != nil {
|
||||
util.GetLogger(httpReq.Context()).WithError(err).Error("SendEvents failed")
|
||||
return jsonerror.InternalServerError()
|
||||
|
|
|
|||
17
go.mod
17
go.mod
|
|
@ -1,23 +1,31 @@
|
|||
module github.com/matrix-org/dendrite
|
||||
|
||||
replace github.com/nats-io/nats-server/v2 => github.com/neilalexander/nats-server/v2 v2.3.3-0.20220104162330-c76d5fd70423
|
||||
|
||||
replace github.com/nats-io/nats.go => github.com/neilalexander/nats.go v1.11.1-0.20220104162523-f4ddebe1061c
|
||||
|
||||
require (
|
||||
github.com/Arceliar/ironwood v0.0.0-20210619124114-6ad55cae5031
|
||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.0.1 // indirect
|
||||
github.com/MFAshby/stdemuxerhook v1.0.0
|
||||
github.com/Masterminds/semver/v3 v3.1.1
|
||||
github.com/Shopify/sarama v1.29.1
|
||||
github.com/S7evinK/saramajetstream v0.0.0-20210709110708-de6efc8c4a32
|
||||
github.com/Shopify/sarama v1.29.0
|
||||
github.com/codeclysm/extract v2.2.0+incompatible
|
||||
github.com/containerd/containerd v1.5.7 // indirect
|
||||
github.com/docker/docker v20.10.7+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/getsentry/sentry-go v0.11.0
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/gologme/log v1.2.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/h2non/filetype v1.1.1 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/juju/testing v0.0.0-20210324180055-18c50b0c2098 // indirect
|
||||
github.com/klauspost/compress v1.13.6 // indirect
|
||||
github.com/lib/pq v1.10.1
|
||||
github.com/libp2p/go-libp2p v0.13.0
|
||||
github.com/libp2p/go-libp2p-circuit v0.4.0
|
||||
|
|
@ -32,17 +40,19 @@ require (
|
|||
github.com/matrix-org/go-http-js-libp2p v0.0.0-20200518170932-783164aeeda4
|
||||
github.com/matrix-org/go-sqlite3-js v0.0.0-20210709140738-b0d1ba599a6d
|
||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220121103016-f282e8fdb351
|
||||
github.com/matrix-org/naffka v0.0.0-20210623111924-14ff508b58e0
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20211115192839-15a64d244aa2
|
||||
github.com/matrix-org/pinecone v0.0.0-20211213132835-aa2808d77947
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
|
||||
github.com/mattn/go-sqlite3 v1.14.8
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/nats-io/nats-server/v2 v2.3.2
|
||||
github.com/nats-io/nats.go v1.13.1-0.20211122170419-d7c1d78a50fc
|
||||
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
|
||||
github.com/ngrok/sqlmw v0.0.0-20200129213757-d5c93a81bec6
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pressly/goose v2.7.0+incompatible
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
|
|
@ -59,6 +69,7 @@ require (
|
|||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
gopkg.in/h2non/bimg.v1 v1.1.5
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
nhooyr.io/websocket v1.8.7
|
||||
|
|
|
|||
71
go.sum
71
go.sum
|
|
@ -84,17 +84,17 @@ github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5
|
|||
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
|
||||
github.com/RyanCarrier/dijkstra v1.0.0/go.mod h1:5agGUBNEtUAGIANmbw09fuO3a2htPEkc1jNH01qxCWA=
|
||||
github.com/RyanCarrier/dijkstra-1 v0.0.0-20170512020943-0e5801a26345/go.mod h1:OK4EvWJ441LQqGzed5NGB6vKBAE34n3z7iayPcEwr30=
|
||||
github.com/S7evinK/saramajetstream v0.0.0-20210709110708-de6efc8c4a32 h1:i3fOph9Hjleo6LbuqN9ODFxnwt7mOtYMpCGeC8qJN50=
|
||||
github.com/S7evinK/saramajetstream v0.0.0-20210709110708-de6efc8c4a32/go.mod h1:ne+jkLlzafIzaE4Q0Ze81T27dNgXe1wxovVEoAtSHTc=
|
||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU=
|
||||
github.com/Shopify/sarama v1.29.1 h1:wBAacXbYVLmWieEA/0X/JagDdCZ8NVFOfS6l6+2u5S0=
|
||||
github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE=
|
||||
github.com/Shopify/sarama v1.29.0 h1:ARid8o8oieau9XrHI55f/L3EoRAhm9px6sonbD7yuUE=
|
||||
github.com/Shopify/sarama v1.29.0/go.mod h1:2QpgD79wpdAESqNQMxNc0KYMkycd4slxGdV3TWSVqrU=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
|
|
@ -119,7 +119,6 @@ github.com/anacrolix/missinggo v1.2.1 h1:0IE3TqX5y5D0IxeMwTyIgqdDew4QrzcXaaEnJQy
|
|||
github.com/anacrolix/missinggo v1.2.1/go.mod h1:J5cMhif8jPmFoC3+Uvob3OXXNIhOUikzMt+uUjeM21Y=
|
||||
github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5urunoLS0Cbvb4V0uMQ=
|
||||
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
|
||||
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
|
|
@ -476,9 +475,9 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
|
|||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/gologme/log v1.2.0 h1:Ya5Ip/KD6FX7uH0S31QO87nCCSucKtF44TLbTtO7V4c=
|
||||
github.com/gologme/log v1.2.0/go.mod h1:gq31gQ8wEHkR+WekdWsqDuf8pXTUZA9BnnzTuPz1Y9U=
|
||||
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
|
|
@ -488,7 +487,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
|
|||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
|
|
@ -718,16 +716,16 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
|||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8=
|
||||
github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ=
|
||||
|
|
@ -767,7 +765,6 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ
|
|||
github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
|
||||
github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM=
|
||||
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
|
||||
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
|
||||
github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54=
|
||||
github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k=
|
||||
github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw=
|
||||
|
|
@ -923,7 +920,6 @@ github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdm
|
|||
github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0=
|
||||
github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
|
||||
github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
|
||||
github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
|
||||
github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
|
||||
github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw=
|
||||
github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
|
||||
|
|
@ -968,7 +964,6 @@ github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJ
|
|||
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
|
|
@ -995,10 +990,8 @@ github.com/matrix-org/go-sqlite3-js v0.0.0-20210709140738-b0d1ba599a6d/go.mod h1
|
|||
github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26/go.mod h1:3fxX6gUjWyI/2Bt7J1OLhpCzOfO/bB3AiX0cJtEKud0=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 h1:ZtO5uywdd5dLDCud4r0r55eP4j9FuUNpl60Gmntcop4=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220121103016-f282e8fdb351 h1:AbSwKMBFt5fbqwBFgrq1ie4vatlopWbSunczgRk6VXU=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220121103016-f282e8fdb351/go.mod h1:qFvhfbQ5orQxlH9vCiFnP4dW27xxnWHdNUBKyj/fbiY=
|
||||
github.com/matrix-org/naffka v0.0.0-20210623111924-14ff508b58e0 h1:HZCzy4oVzz55e+cOMiX/JtSF2UOY1evBl2raaE7ACcU=
|
||||
github.com/matrix-org/naffka v0.0.0-20210623111924-14ff508b58e0/go.mod h1:sjyPyRxKM5uw1nD2cJ6O2OxI6GOqyVBfNXqKjBZTBZE=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20211115192839-15a64d244aa2 h1:RFsBN3509Ql6NJ7TDVkcKoN3bb/tmqUqzur5c0AwIHQ=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20211115192839-15a64d244aa2/go.mod h1:rB8tBUUUo1rzUqpzklRDSooxZ6YMhoaEPx4SO5fGeUc=
|
||||
github.com/matrix-org/pinecone v0.0.0-20211213132835-aa2808d77947 h1:TxO9TMFAuF+Vz3vZV53z5mjycWtF1+naY9ffs6QfZxc=
|
||||
github.com/matrix-org/pinecone v0.0.0-20211213132835-aa2808d77947/go.mod h1:r6dsL+ylE0yXe/7zh8y/Bdh6aBYI1r+u4yZni9A4iyk=
|
||||
github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
||||
|
|
@ -1022,7 +1015,6 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
|
|||
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/mattn/go-sqlite3 v1.14.2/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus=
|
||||
github.com/mattn/go-sqlite3 v1.14.8 h1:gDp86IdQsN/xWjIEmr9MF6o9mpksUgh0fu+9ByFxzIU=
|
||||
github.com/mattn/go-sqlite3 v1.14.8/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
||||
|
|
@ -1042,6 +1034,8 @@ github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7
|
|||
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
||||
github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0=
|
||||
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||
github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
|
||||
github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
|
||||
github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
|
||||
|
|
@ -1132,15 +1126,21 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
|
|||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/jwt/v2 v2.2.0 h1:Yg/4WFK6vsqMudRg91eBb7Dh6XeVcDMPHycDE8CfltE=
|
||||
github.com/nats-io/jwt/v2 v2.2.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
|
||||
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
|
||||
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=
|
||||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
|
||||
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/neilalexander/nats-server/v2 v2.3.3-0.20220104162330-c76d5fd70423 h1:BLQVdjMH5XD4BYb0fa+c2Oh2Nr1vrO7GKvRnIJDxChc=
|
||||
github.com/neilalexander/nats-server/v2 v2.3.3-0.20220104162330-c76d5fd70423/go.mod h1:9sdEkBhyZMQG1M9TevnlYUwMusRACn2vlgOeqoHKwVo=
|
||||
github.com/neilalexander/nats.go v1.11.1-0.20220104162523-f4ddebe1061c h1:G2qsv7D0rY94HAu8pXmElMluuMHQ85waxIDQBhIzV2Q=
|
||||
github.com/neilalexander/nats.go v1.11.1-0.20220104162523-f4ddebe1061c/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
|
||||
github.com/neilalexander/utp v0.1.1-0.20210622132614-ee9a34a30488/go.mod h1:NPHGhPc0/wudcaCqL/H5AOddkRf8GPRhzOujuUKGQu8=
|
||||
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9 h1:lrVQzBtkeQEGGYUHwSX1XPe1E5GL6U3KYCNe2G4bncQ=
|
||||
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9/go.mod h1:NPHGhPc0/wudcaCqL/H5AOddkRf8GPRhzOujuUKGQu8=
|
||||
|
|
@ -1216,9 +1216,9 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9
|
|||
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
|
||||
github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
|
@ -1269,7 +1269,6 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
|||
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
|
|
@ -1368,6 +1367,7 @@ github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG
|
|||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
|
||||
github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.13.0 h1:3TFY9yxOQShrvmjdM76K+jc66zJeT6D3/VFFYCGQf7M=
|
||||
github.com/tidwall/gjson v1.13.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
|
|
@ -1424,9 +1424,7 @@ github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPyS
|
|||
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
|
||||
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
|
|
@ -1501,7 +1499,6 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
|
|
@ -1510,6 +1507,7 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
|
|
@ -1557,7 +1555,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
@ -1603,10 +1600,11 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
|
|||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210927181540-4e4d966f7476/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
|
|
@ -1637,6 +1635,7 @@ golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
@ -1730,12 +1729,14 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7-0.20210503195748-5c7c50ebbd4f h1:yQJrRE0hDxDFmZLlRaw+3vusO4fwNHgHIjUOMO7bHYI=
|
||||
golang.org/x/text v0.3.7-0.20210503195748-5c7c50ebbd4f/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
|
@ -1887,7 +1888,6 @@ gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8
|
|||
gopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
|
|
@ -1906,11 +1906,6 @@ gopkg.in/h2non/gock.v1 v1.0.14/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdOD
|
|||
gopkg.in/httprequest.v1 v1.1.1/go.mod h1:/CkavNL+g3qLOrpFHVrEx4NKepeqR4XTZWNj4sGGjz0=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
|
||||
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
|
||||
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
|
||||
gopkg.in/macaroon.v2 v2.1.0 h1:HZcsjBCzq9t0eBPMKqTN/uSN6JOm78ZJ2INbqcBQOUI=
|
||||
gopkg.in/macaroon.v2 v2.1.0/go.mod h1:OUb+TQP/OP0WOerC2Jp/3CwhIKyIa9kQjuc7H24e6/o=
|
||||
gopkg.in/mgo.v2 v2.0.0-20160818015218-f2b6f6c918c4/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ func MakeConfig(configDir, kafkaURI, database, host string, startPort int) (*con
|
|||
|
||||
cfg.MediaAPI.BasePath = config.Path(mediaBasePath)
|
||||
|
||||
cfg.Global.Kafka.Addresses = []string{kafkaURI}
|
||||
cfg.Global.JetStream.Addresses = []string{kafkaURI}
|
||||
|
||||
// TODO: Use different databases for the different schemas.
|
||||
// Using the same database for every schema currently works because
|
||||
|
|
|
|||
|
|
@ -69,7 +69,8 @@ type DeviceMessage struct {
|
|||
*DeviceKeys `json:"DeviceKeys,omitempty"`
|
||||
*eduapi.OutputCrossSigningKeyUpdate `json:"CrossSigningKeyUpdate,omitempty"`
|
||||
// A monotonically increasing number which represents device changes for this user.
|
||||
StreamID int
|
||||
StreamID int
|
||||
DeviceChangeID int64
|
||||
}
|
||||
|
||||
// DeviceKeys represents a set of device keys for a single device
|
||||
|
|
@ -224,8 +225,6 @@ type QueryKeysResponse struct {
|
|||
}
|
||||
|
||||
type QueryKeyChangesRequest struct {
|
||||
// The partition which had key events sent to
|
||||
Partition int32
|
||||
// The offset of the last received key event, or sarama.OffsetOldest if this is from the beginning
|
||||
Offset int64
|
||||
// The inclusive offset where to track key changes up to. Messages with this offset are included in the response.
|
||||
|
|
@ -236,8 +235,6 @@ type QueryKeyChangesRequest struct {
|
|||
type QueryKeyChangesResponse struct {
|
||||
// The set of users who have had their keys change.
|
||||
UserIDs []string
|
||||
// The partition being served - useful if the partition is unknown at request time
|
||||
Partition int32
|
||||
// The latest offset represented in this response.
|
||||
Offset int64
|
||||
// Set if there was a problem handling the request.
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/keyserver/api"
|
||||
"github.com/matrix-org/dendrite/keyserver/storage"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -50,7 +51,7 @@ func NewOutputCrossSigningKeyUpdateConsumer(
|
|||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "keyserver/keyserver",
|
||||
Topic: cfg.Global.Kafka.TopicFor(config.TopicOutputKeyChangeEvent),
|
||||
Topic: cfg.Global.JetStream.TopicFor(jetstream.OutputKeyChangeEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: keyDB,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -59,17 +59,13 @@ func (a *KeyInternalAPI) InputDeviceListUpdate(
|
|||
}
|
||||
|
||||
func (a *KeyInternalAPI) QueryKeyChanges(ctx context.Context, req *api.QueryKeyChangesRequest, res *api.QueryKeyChangesResponse) {
|
||||
if req.Partition < 0 {
|
||||
req.Partition = a.Producer.DefaultPartition()
|
||||
}
|
||||
userIDs, latest, err := a.DB.KeyChanges(ctx, req.Partition, req.Offset, req.ToOffset)
|
||||
userIDs, latest, err := a.DB.KeyChanges(ctx, req.Offset, req.ToOffset)
|
||||
if err != nil {
|
||||
res.Error = &api.KeyError{
|
||||
Err: err.Error(),
|
||||
}
|
||||
}
|
||||
res.Offset = latest
|
||||
res.Partition = req.Partition
|
||||
res.UserIDs = userIDs
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/keyserver/storage"
|
||||
"github.com/matrix-org/dendrite/setup/base"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/kafka"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -40,16 +40,16 @@ func AddInternalRoutes(router *mux.Router, intAPI api.KeyInternalAPI) {
|
|||
func NewInternalAPI(
|
||||
base *base.BaseDendrite, cfg *config.KeyServer, fedClient fedsenderapi.FederationClient,
|
||||
) api.KeyInternalAPI {
|
||||
consumer, producer := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
|
||||
js, consumer, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
|
||||
|
||||
db, err := storage.NewDatabase(&cfg.Database)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panicf("failed to connect to key server database")
|
||||
}
|
||||
keyChangeProducer := &producers.KeyChange{
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)),
|
||||
Producer: producer,
|
||||
DB: db,
|
||||
Topic: string(cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent)),
|
||||
JetStream: js,
|
||||
DB: db,
|
||||
}
|
||||
ap := &internal.KeyInternalAPI{
|
||||
DB: db,
|
||||
|
|
|
|||
|
|
@ -18,52 +18,47 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
eduapi "github.com/matrix-org/dendrite/eduserver/api"
|
||||
"github.com/matrix-org/dendrite/keyserver/api"
|
||||
"github.com/matrix-org/dendrite/keyserver/storage"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/nats-io/nats.go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// KeyChange produces key change events for the sync API and federation sender to consume
|
||||
type KeyChange struct {
|
||||
Topic string
|
||||
Producer sarama.SyncProducer
|
||||
DB storage.Database
|
||||
}
|
||||
|
||||
// DefaultPartition returns the default partition this process is sending key changes to.
|
||||
// NB: A keyserver MUST send key changes to only 1 partition or else query operations will
|
||||
// become inconsistent. Partitions can be sharded (e.g by hash of user ID of key change) but
|
||||
// then all keyservers must be queried to calculate the entire set of key changes between
|
||||
// two sync tokens.
|
||||
func (p *KeyChange) DefaultPartition() int32 {
|
||||
return 0
|
||||
Topic string
|
||||
JetStream nats.JetStreamContext
|
||||
DB storage.Database
|
||||
}
|
||||
|
||||
// ProduceKeyChanges creates new change events for each key
|
||||
func (p *KeyChange) ProduceKeyChanges(keys []api.DeviceMessage) error {
|
||||
userToDeviceCount := make(map[string]int)
|
||||
for _, key := range keys {
|
||||
var m sarama.ProducerMessage
|
||||
|
||||
id, err := p.DB.StoreKeyChange(context.Background(), key.UserID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key.DeviceChangeID = id
|
||||
value, err := json.Marshal(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.Topic = string(p.Topic)
|
||||
m.Key = sarama.StringEncoder(key.UserID)
|
||||
m.Value = sarama.ByteEncoder(value)
|
||||
m := &nats.Msg{
|
||||
Subject: p.Topic,
|
||||
Header: nats.Header{},
|
||||
}
|
||||
m.Header.Set(jetstream.UserID, key.UserID)
|
||||
m.Data = value
|
||||
|
||||
partition, offset, err := p.Producer.SendMessage(&m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = p.DB.StoreKeyChange(context.Background(), partition, offset, key.UserID)
|
||||
_, err = p.JetStream.PublishMsg(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
userToDeviceCount[key.UserID]++
|
||||
}
|
||||
for userID, count := range userToDeviceCount {
|
||||
|
|
@ -76,7 +71,6 @@ func (p *KeyChange) ProduceKeyChanges(keys []api.DeviceMessage) error {
|
|||
}
|
||||
|
||||
func (p *KeyChange) ProduceSigningKeyUpdate(key eduapi.CrossSigningKeyUpdate) error {
|
||||
var m sarama.ProducerMessage
|
||||
output := &api.DeviceMessage{
|
||||
Type: api.TypeCrossSigningUpdate,
|
||||
OutputCrossSigningKeyUpdate: &eduapi.OutputCrossSigningKeyUpdate{
|
||||
|
|
@ -84,20 +78,25 @@ func (p *KeyChange) ProduceSigningKeyUpdate(key eduapi.CrossSigningKeyUpdate) er
|
|||
},
|
||||
}
|
||||
|
||||
id, err := p.DB.StoreKeyChange(context.Background(), key.UserID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
output.DeviceChangeID = id
|
||||
|
||||
value, err := json.Marshal(output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.Topic = string(p.Topic)
|
||||
m.Key = sarama.StringEncoder(key.UserID)
|
||||
m.Value = sarama.ByteEncoder(value)
|
||||
|
||||
partition, offset, err := p.Producer.SendMessage(&m)
|
||||
if err != nil {
|
||||
return err
|
||||
m := &nats.Msg{
|
||||
Subject: p.Topic,
|
||||
Header: nats.Header{},
|
||||
}
|
||||
err = p.DB.StoreKeyChange(context.Background(), partition, offset, key.UserID)
|
||||
m.Header.Set(jetstream.UserID, key.UserID)
|
||||
m.Data = value
|
||||
|
||||
_, err = p.JetStream.PublishMsg(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -66,14 +66,14 @@ type Database interface {
|
|||
// cannot be claimed or if none exist for this (user, device, algorithm), instead it is omitted from the returned slice.
|
||||
ClaimKeys(ctx context.Context, userToDeviceToAlgorithm map[string]map[string]string) ([]api.OneTimeKeys, error)
|
||||
|
||||
// StoreKeyChange stores key change metadata after the change has been sent to Kafka. `userID` is the the user who has changed
|
||||
// their keys in some way.
|
||||
StoreKeyChange(ctx context.Context, partition int32, offset int64, userID string) error
|
||||
// StoreKeyChange stores key change metadata and returns the device change ID which represents the position in the /sync stream for this device change.
|
||||
// `userID` is the the user who has changed their keys in some way.
|
||||
StoreKeyChange(ctx context.Context, userID string) (int64, error)
|
||||
|
||||
// KeyChanges returns a list of user IDs who have modified their keys from the offset given (exclusive) to the offset given (inclusive).
|
||||
// A to offset of sarama.OffsetNewest means no upper limit.
|
||||
// Returns the offset of the latest key change.
|
||||
KeyChanges(ctx context.Context, partition int32, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error)
|
||||
KeyChanges(ctx context.Context, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error)
|
||||
|
||||
// StaleDeviceLists returns a list of user IDs ending with the domains provided who have stale device lists.
|
||||
// If no domains are given, all user IDs with stale device lists are returned.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,79 @@
|
|||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package deltas
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/pressly/goose"
|
||||
)
|
||||
|
||||
func LoadFromGoose() {
|
||||
goose.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges)
|
||||
}
|
||||
|
||||
func LoadRefactorKeyChanges(m *sqlutil.Migrations) {
|
||||
m.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges)
|
||||
}
|
||||
|
||||
func UpRefactorKeyChanges(tx *sql.Tx) error {
|
||||
// start counting from the last max offset, else 0. We need to do a count(*) first to see if there
|
||||
// even are entries in this table to know if we can query for log_offset. Without the count then
|
||||
// the query to SELECT the max log offset fails on new Dendrite instances as log_offset doesn't
|
||||
// exist on that table. Even though we discard the error, the txn is tainted and gets aborted :/
|
||||
var count int
|
||||
_ = tx.QueryRow(`SELECT count(*) FROM keyserver_key_changes`).Scan(&count)
|
||||
if count > 0 {
|
||||
var maxOffset int64
|
||||
_ = tx.QueryRow(`SELECT coalesce(MAX(log_offset), 0) AS offset FROM keyserver_key_changes`).Scan(&maxOffset)
|
||||
if _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE IF NOT EXISTS keyserver_key_changes_seq START %d`, maxOffset)); err != nil {
|
||||
return fmt.Errorf("failed to CREATE SEQUENCE for key changes, starting at %d: %s", maxOffset, err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := tx.Exec(`
|
||||
-- make the new table
|
||||
DROP TABLE IF EXISTS keyserver_key_changes;
|
||||
CREATE TABLE IF NOT EXISTS keyserver_key_changes (
|
||||
change_id BIGINT PRIMARY KEY DEFAULT nextval('keyserver_key_changes_seq'),
|
||||
user_id TEXT NOT NULL,
|
||||
CONSTRAINT keyserver_key_changes_unique_per_user UNIQUE (user_id)
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DownRefactorKeyChanges(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
-- Drop all data and revert back, we can't keep the data as Kafka offsets determine the numbers
|
||||
DROP SEQUENCE IF EXISTS keyserver_key_changes_seq;
|
||||
DROP TABLE IF EXISTS keyserver_key_changes;
|
||||
CREATE TABLE IF NOT EXISTS keyserver_key_changes (
|
||||
partition BIGINT NOT NULL,
|
||||
log_offset BIGINT NOT NULL,
|
||||
user_id TEXT NOT NULL,
|
||||
CONSTRAINT keyserver_key_changes_unique UNIQUE (partition, log_offset)
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute downgrade: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -26,27 +26,25 @@ import (
|
|||
|
||||
var keyChangesSchema = `
|
||||
-- Stores key change information about users. Used to determine when to send updated device lists to clients.
|
||||
CREATE SEQUENCE IF NOT EXISTS keyserver_key_changes_seq;
|
||||
CREATE TABLE IF NOT EXISTS keyserver_key_changes (
|
||||
partition BIGINT NOT NULL,
|
||||
log_offset BIGINT NOT NULL,
|
||||
change_id BIGINT PRIMARY KEY DEFAULT nextval('keyserver_key_changes_seq'),
|
||||
user_id TEXT NOT NULL,
|
||||
CONSTRAINT keyserver_key_changes_unique UNIQUE (partition, log_offset)
|
||||
CONSTRAINT keyserver_key_changes_unique_per_user UNIQUE (user_id)
|
||||
);
|
||||
`
|
||||
|
||||
// Replace based on partition|offset - we should never insert duplicates unless the kafka logs are wiped.
|
||||
// Rather than falling over, just overwrite (though this will mean clients with an existing sync token will
|
||||
// miss out on updates). TODO: Ideally we would detect when kafka logs are purged then purge this table too.
|
||||
// Replace based on user ID. We don't care how many times the user's keys have changed, only that they
|
||||
// have changed, hence we can just keep bumping the change ID for this user.
|
||||
const upsertKeyChangeSQL = "" +
|
||||
"INSERT INTO keyserver_key_changes (partition, log_offset, user_id)" +
|
||||
" VALUES ($1, $2, $3)" +
|
||||
" ON CONFLICT ON CONSTRAINT keyserver_key_changes_unique" +
|
||||
" DO UPDATE SET user_id = $3"
|
||||
"INSERT INTO keyserver_key_changes (user_id)" +
|
||||
" VALUES ($1)" +
|
||||
" ON CONFLICT ON CONSTRAINT keyserver_key_changes_unique_per_user" +
|
||||
" DO UPDATE SET change_id = nextval('keyserver_key_changes_seq')" +
|
||||
" RETURNING change_id"
|
||||
|
||||
// select the highest offset for each user in the range. The grouping by user gives distinct entries and then we just
|
||||
// take the max offset value as the latest offset.
|
||||
const selectKeyChangesSQL = "" +
|
||||
"SELECT user_id, MAX(log_offset) FROM keyserver_key_changes WHERE partition = $1 AND log_offset > $2 AND log_offset <= $3 GROUP BY user_id"
|
||||
"SELECT user_id, change_id FROM keyserver_key_changes WHERE change_id > $1 AND change_id <= $2"
|
||||
|
||||
type keyChangesStatements struct {
|
||||
db *sql.DB
|
||||
|
|
@ -59,31 +57,32 @@ func NewPostgresKeyChangesTable(db *sql.DB) (tables.KeyChanges, error) {
|
|||
db: db,
|
||||
}
|
||||
_, err := db.Exec(keyChangesSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.upsertKeyChangeStmt, err = db.Prepare(upsertKeyChangeSQL); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.selectKeyChangesStmt, err = db.Prepare(selectKeyChangesSQL); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
return s, err
|
||||
}
|
||||
|
||||
func (s *keyChangesStatements) InsertKeyChange(ctx context.Context, partition int32, offset int64, userID string) error {
|
||||
_, err := s.upsertKeyChangeStmt.ExecContext(ctx, partition, offset, userID)
|
||||
return err
|
||||
func (s *keyChangesStatements) Prepare() (err error) {
|
||||
if s.upsertKeyChangeStmt, err = s.db.Prepare(upsertKeyChangeSQL); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.selectKeyChangesStmt, err = s.db.Prepare(selectKeyChangesSQL); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *keyChangesStatements) InsertKeyChange(ctx context.Context, userID string) (changeID int64, err error) {
|
||||
err = s.upsertKeyChangeStmt.QueryRowContext(ctx, userID).Scan(&changeID)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *keyChangesStatements) SelectKeyChanges(
|
||||
ctx context.Context, partition int32, fromOffset, toOffset int64,
|
||||
ctx context.Context, fromOffset, toOffset int64,
|
||||
) (userIDs []string, latestOffset int64, err error) {
|
||||
if toOffset == sarama.OffsetNewest {
|
||||
toOffset = math.MaxInt64
|
||||
}
|
||||
latestOffset = fromOffset
|
||||
rows, err := s.selectKeyChangesStmt.QueryContext(ctx, partition, fromOffset, toOffset)
|
||||
rows, err := s.selectKeyChangesStmt.QueryContext(ctx, fromOffset, toOffset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ package postgres
|
|||
|
||||
import (
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/keyserver/storage/postgres/deltas"
|
||||
"github.com/matrix-org/dendrite/keyserver/storage/shared"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
)
|
||||
|
|
@ -51,6 +52,14 @@ func NewDatabase(dbProperties *config.DatabaseOptions) (*shared.Database, error)
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := sqlutil.NewMigrations()
|
||||
deltas.LoadRefactorKeyChanges(m)
|
||||
if err = m.RunDeltas(db, dbProperties); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = kc.Prepare(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d := &shared.Database{
|
||||
DB: db,
|
||||
Writer: sqlutil.NewDummyWriter(),
|
||||
|
|
|
|||
|
|
@ -135,14 +135,16 @@ func (d *Database) ClaimKeys(ctx context.Context, userToDeviceToAlgorithm map[st
|
|||
return result, err
|
||||
}
|
||||
|
||||
func (d *Database) StoreKeyChange(ctx context.Context, partition int32, offset int64, userID string) error {
|
||||
return d.Writer.Do(nil, nil, func(_ *sql.Tx) error {
|
||||
return d.KeyChangesTable.InsertKeyChange(ctx, partition, offset, userID)
|
||||
func (d *Database) StoreKeyChange(ctx context.Context, userID string) (id int64, err error) {
|
||||
err = d.Writer.Do(nil, nil, func(_ *sql.Tx) error {
|
||||
id, err = d.KeyChangesTable.InsertKeyChange(ctx, userID)
|
||||
return err
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (d *Database) KeyChanges(ctx context.Context, partition int32, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error) {
|
||||
return d.KeyChangesTable.SelectKeyChanges(ctx, partition, fromOffset, toOffset)
|
||||
func (d *Database) KeyChanges(ctx context.Context, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error) {
|
||||
return d.KeyChangesTable.SelectKeyChanges(ctx, fromOffset, toOffset)
|
||||
}
|
||||
|
||||
// StaleDeviceLists returns a list of user IDs ending with the domains provided who have stale device lists.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,76 @@
|
|||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package deltas
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/pressly/goose"
|
||||
)
|
||||
|
||||
func LoadFromGoose() {
|
||||
goose.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges)
|
||||
}
|
||||
|
||||
func LoadRefactorKeyChanges(m *sqlutil.Migrations) {
|
||||
m.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges)
|
||||
}
|
||||
|
||||
func UpRefactorKeyChanges(tx *sql.Tx) error {
|
||||
// start counting from the last max offset, else 0.
|
||||
var maxOffset int64
|
||||
var userID string
|
||||
_ = tx.QueryRow(`SELECT user_id, MAX(log_offset) FROM keyserver_key_changes GROUP BY user_id`).Scan(&userID, &maxOffset)
|
||||
|
||||
_, err := tx.Exec(`
|
||||
-- make the new table
|
||||
DROP TABLE IF EXISTS keyserver_key_changes;
|
||||
CREATE TABLE IF NOT EXISTS keyserver_key_changes (
|
||||
change_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
-- The key owner
|
||||
user_id TEXT NOT NULL,
|
||||
UNIQUE (user_id)
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||
}
|
||||
// to start counting from maxOffset, insert a row with that value
|
||||
if userID != "" {
|
||||
_, err = tx.Exec(`INSERT INTO keyserver_key_changes(change_id, user_id) VALUES($1, $2)`, maxOffset, userID)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DownRefactorKeyChanges(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
-- Drop all data and revert back, we can't keep the data as Kafka offsets determine the numbers
|
||||
DROP TABLE IF EXISTS keyserver_key_changes;
|
||||
CREATE TABLE IF NOT EXISTS keyserver_key_changes (
|
||||
partition BIGINT NOT NULL,
|
||||
offset BIGINT NOT NULL,
|
||||
-- The key owner
|
||||
user_id TEXT NOT NULL,
|
||||
UNIQUE (partition, offset)
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute downgrade: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -27,27 +27,22 @@ import (
|
|||
var keyChangesSchema = `
|
||||
-- Stores key change information about users. Used to determine when to send updated device lists to clients.
|
||||
CREATE TABLE IF NOT EXISTS keyserver_key_changes (
|
||||
partition BIGINT NOT NULL,
|
||||
offset BIGINT NOT NULL,
|
||||
change_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
-- The key owner
|
||||
user_id TEXT NOT NULL,
|
||||
UNIQUE (partition, offset)
|
||||
UNIQUE (user_id)
|
||||
);
|
||||
`
|
||||
|
||||
// Replace based on partition|offset - we should never insert duplicates unless the kafka logs are wiped.
|
||||
// Rather than falling over, just overwrite (though this will mean clients with an existing sync token will
|
||||
// miss out on updates). TODO: Ideally we would detect when kafka logs are purged then purge this table too.
|
||||
// Replace based on user ID. We don't care how many times the user's keys have changed, only that they
|
||||
// have changed, hence we can just keep bumping the change ID for this user.
|
||||
const upsertKeyChangeSQL = "" +
|
||||
"INSERT INTO keyserver_key_changes (partition, offset, user_id)" +
|
||||
" VALUES ($1, $2, $3)" +
|
||||
" ON CONFLICT (partition, offset)" +
|
||||
" DO UPDATE SET user_id = $3"
|
||||
"INSERT OR REPLACE INTO keyserver_key_changes (user_id)" +
|
||||
" VALUES ($1)" +
|
||||
" RETURNING change_id"
|
||||
|
||||
// select the highest offset for each user in the range. The grouping by user gives distinct entries and then we just
|
||||
// take the max offset value as the latest offset.
|
||||
const selectKeyChangesSQL = "" +
|
||||
"SELECT user_id, MAX(offset) FROM keyserver_key_changes WHERE partition = $1 AND offset > $2 AND offset <= $3 GROUP BY user_id"
|
||||
"SELECT user_id, change_id FROM keyserver_key_changes WHERE change_id > $1 AND change_id <= $2"
|
||||
|
||||
type keyChangesStatements struct {
|
||||
db *sql.DB
|
||||
|
|
@ -60,31 +55,32 @@ func NewSqliteKeyChangesTable(db *sql.DB) (tables.KeyChanges, error) {
|
|||
db: db,
|
||||
}
|
||||
_, err := db.Exec(keyChangesSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.upsertKeyChangeStmt, err = db.Prepare(upsertKeyChangeSQL); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.selectKeyChangesStmt, err = db.Prepare(selectKeyChangesSQL); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
return s, err
|
||||
}
|
||||
|
||||
func (s *keyChangesStatements) InsertKeyChange(ctx context.Context, partition int32, offset int64, userID string) error {
|
||||
_, err := s.upsertKeyChangeStmt.ExecContext(ctx, partition, offset, userID)
|
||||
return err
|
||||
func (s *keyChangesStatements) Prepare() (err error) {
|
||||
if s.upsertKeyChangeStmt, err = s.db.Prepare(upsertKeyChangeSQL); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.selectKeyChangesStmt, err = s.db.Prepare(selectKeyChangesSQL); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *keyChangesStatements) InsertKeyChange(ctx context.Context, userID string) (changeID int64, err error) {
|
||||
err = s.upsertKeyChangeStmt.QueryRowContext(ctx, userID).Scan(&changeID)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *keyChangesStatements) SelectKeyChanges(
|
||||
ctx context.Context, partition int32, fromOffset, toOffset int64,
|
||||
ctx context.Context, fromOffset, toOffset int64,
|
||||
) (userIDs []string, latestOffset int64, err error) {
|
||||
if toOffset == sarama.OffsetNewest {
|
||||
toOffset = math.MaxInt64
|
||||
}
|
||||
latestOffset = fromOffset
|
||||
rows, err := s.selectKeyChangesStmt.QueryContext(ctx, partition, fromOffset, toOffset)
|
||||
rows, err := s.selectKeyChangesStmt.QueryContext(ctx, fromOffset, toOffset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ package sqlite3
|
|||
import (
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/keyserver/storage/shared"
|
||||
"github.com/matrix-org/dendrite/keyserver/storage/sqlite3/deltas"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
)
|
||||
|
||||
|
|
@ -49,6 +50,15 @@ func NewDatabase(dbProperties *config.DatabaseOptions) (*shared.Database, error)
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := sqlutil.NewMigrations()
|
||||
deltas.LoadRefactorKeyChanges(m)
|
||||
if err = m.RunDeltas(db, dbProperties); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = kc.Prepare(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d := &shared.Database{
|
||||
DB: db,
|
||||
Writer: sqlutil.NewExclusiveWriter(),
|
||||
|
|
|
|||
|
|
@ -44,15 +44,18 @@ func MustNotError(t *testing.T, err error) {
|
|||
func TestKeyChanges(t *testing.T) {
|
||||
db, clean := MustCreateDatabase(t)
|
||||
defer clean()
|
||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 0, "@alice:localhost"))
|
||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 1, "@bob:localhost"))
|
||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 2, "@charlie:localhost"))
|
||||
userIDs, latest, err := db.KeyChanges(ctx, 0, 1, sarama.OffsetNewest)
|
||||
_, err := db.StoreKeyChange(ctx, "@alice:localhost")
|
||||
MustNotError(t, err)
|
||||
deviceChangeIDB, err := db.StoreKeyChange(ctx, "@bob:localhost")
|
||||
MustNotError(t, err)
|
||||
deviceChangeIDC, err := db.StoreKeyChange(ctx, "@charlie:localhost")
|
||||
MustNotError(t, err)
|
||||
userIDs, latest, err := db.KeyChanges(ctx, deviceChangeIDB, sarama.OffsetNewest)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to KeyChanges: %s", err)
|
||||
}
|
||||
if latest != 2 {
|
||||
t.Fatalf("KeyChanges: got latest=%d want 2", latest)
|
||||
if latest != deviceChangeIDC {
|
||||
t.Fatalf("KeyChanges: got latest=%d want %d", latest, deviceChangeIDC)
|
||||
}
|
||||
if !reflect.DeepEqual(userIDs, []string{"@charlie:localhost"}) {
|
||||
t.Fatalf("KeyChanges: wrong user_ids: %v", userIDs)
|
||||
|
|
@ -62,15 +65,21 @@ func TestKeyChanges(t *testing.T) {
|
|||
func TestKeyChangesNoDupes(t *testing.T) {
|
||||
db, clean := MustCreateDatabase(t)
|
||||
defer clean()
|
||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 0, "@alice:localhost"))
|
||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 1, "@alice:localhost"))
|
||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 2, "@alice:localhost"))
|
||||
userIDs, latest, err := db.KeyChanges(ctx, 0, 0, sarama.OffsetNewest)
|
||||
deviceChangeIDA, err := db.StoreKeyChange(ctx, "@alice:localhost")
|
||||
MustNotError(t, err)
|
||||
deviceChangeIDB, err := db.StoreKeyChange(ctx, "@alice:localhost")
|
||||
MustNotError(t, err)
|
||||
if deviceChangeIDA == deviceChangeIDB {
|
||||
t.Fatalf("Expected change ID to be different even when inserting key change for the same user, got %d for both changes", deviceChangeIDA)
|
||||
}
|
||||
deviceChangeID, err := db.StoreKeyChange(ctx, "@alice:localhost")
|
||||
MustNotError(t, err)
|
||||
userIDs, latest, err := db.KeyChanges(ctx, 0, sarama.OffsetNewest)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to KeyChanges: %s", err)
|
||||
}
|
||||
if latest != 2 {
|
||||
t.Fatalf("KeyChanges: got latest=%d want 2", latest)
|
||||
if latest != deviceChangeID {
|
||||
t.Fatalf("KeyChanges: got latest=%d want %d", latest, deviceChangeID)
|
||||
}
|
||||
if !reflect.DeepEqual(userIDs, []string{"@alice:localhost"}) {
|
||||
t.Fatalf("KeyChanges: wrong user_ids: %v", userIDs)
|
||||
|
|
@ -80,15 +89,18 @@ func TestKeyChangesNoDupes(t *testing.T) {
|
|||
func TestKeyChangesUpperLimit(t *testing.T) {
|
||||
db, clean := MustCreateDatabase(t)
|
||||
defer clean()
|
||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 0, "@alice:localhost"))
|
||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 1, "@bob:localhost"))
|
||||
MustNotError(t, db.StoreKeyChange(ctx, 0, 2, "@charlie:localhost"))
|
||||
userIDs, latest, err := db.KeyChanges(ctx, 0, 0, 1)
|
||||
deviceChangeIDA, err := db.StoreKeyChange(ctx, "@alice:localhost")
|
||||
MustNotError(t, err)
|
||||
deviceChangeIDB, err := db.StoreKeyChange(ctx, "@bob:localhost")
|
||||
MustNotError(t, err)
|
||||
_, err = db.StoreKeyChange(ctx, "@charlie:localhost")
|
||||
MustNotError(t, err)
|
||||
userIDs, latest, err := db.KeyChanges(ctx, deviceChangeIDA, deviceChangeIDB)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to KeyChanges: %s", err)
|
||||
}
|
||||
if latest != 1 {
|
||||
t.Fatalf("KeyChanges: got latest=%d want 1", latest)
|
||||
if latest != deviceChangeIDB {
|
||||
t.Fatalf("KeyChanges: got latest=%d want %d", latest, deviceChangeIDB)
|
||||
}
|
||||
if !reflect.DeepEqual(userIDs, []string{"@bob:localhost"}) {
|
||||
t.Fatalf("KeyChanges: wrong user_ids: %v", userIDs)
|
||||
|
|
|
|||
|
|
@ -44,10 +44,12 @@ type DeviceKeys interface {
|
|||
}
|
||||
|
||||
type KeyChanges interface {
|
||||
InsertKeyChange(ctx context.Context, partition int32, offset int64, userID string) error
|
||||
InsertKeyChange(ctx context.Context, userID string) (int64, error)
|
||||
// SelectKeyChanges returns the set (de-duplicated) of users who have changed their keys between the two offsets.
|
||||
// Results are exclusive of fromOffset and inclusive of toOffset. A toOffset of sarama.OffsetNewest means no upper offset.
|
||||
SelectKeyChanges(ctx context.Context, partition int32, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error)
|
||||
SelectKeyChanges(ctx context.Context, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error)
|
||||
|
||||
Prepare() error
|
||||
}
|
||||
|
||||
type StaleDeviceLists interface {
|
||||
|
|
|
|||
|
|
@ -86,6 +86,7 @@ type TransactionID struct {
|
|||
// InputRoomEventsRequest is a request to InputRoomEvents
|
||||
type InputRoomEventsRequest struct {
|
||||
InputRoomEvents []InputRoomEvent `json:"input_room_events"`
|
||||
Asynchronous bool `json:"async"`
|
||||
}
|
||||
|
||||
// InputRoomEventsResponse is a response to InputRoomEvents
|
||||
|
|
|
|||
|
|
@ -226,6 +226,10 @@ type QueryStateAndAuthChainRequest struct {
|
|||
PrevEventIDs []string `json:"prev_event_ids"`
|
||||
// The list of auth events for the event. Used to calculate the auth chain
|
||||
AuthEventIDs []string `json:"auth_event_ids"`
|
||||
// If true, the auth chain events for the auth event IDs given will be fetched only. Prev event IDs are ignored.
|
||||
// If false, state and auth chain events for the prev event IDs and entire current state will be included.
|
||||
// TODO: not a great API shape. It serves 2 main uses: false=>response for send_join, true=>response for /event_auth
|
||||
OnlyFetchAuthChain bool `json:"only_fetch_auth_chain"`
|
||||
// Should state resolution be ran on the result events?
|
||||
// TODO: check call sites and remove if we always want to do state res
|
||||
ResolveState bool `json:"resolve_state"`
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ func SendEvents(
|
|||
ctx context.Context, rsAPI RoomserverInternalAPI,
|
||||
kind Kind, events []*gomatrixserverlib.HeaderedEvent,
|
||||
sendAsServer gomatrixserverlib.ServerName, txnID *TransactionID,
|
||||
async bool,
|
||||
) error {
|
||||
ires := make([]InputRoomEvent, len(events))
|
||||
for i, event := range events {
|
||||
|
|
@ -38,7 +39,7 @@ func SendEvents(
|
|||
TransactionID: txnID,
|
||||
}
|
||||
}
|
||||
return SendInputRoomEvents(ctx, rsAPI, ires)
|
||||
return SendInputRoomEvents(ctx, rsAPI, ires, async)
|
||||
}
|
||||
|
||||
// SendEventWithState writes an event with the specified kind to the roomserver
|
||||
|
|
@ -47,7 +48,7 @@ func SendEvents(
|
|||
func SendEventWithState(
|
||||
ctx context.Context, rsAPI RoomserverInternalAPI, kind Kind,
|
||||
state *gomatrixserverlib.RespState, event *gomatrixserverlib.HeaderedEvent,
|
||||
haveEventIDs map[string]bool,
|
||||
haveEventIDs map[string]bool, async bool,
|
||||
) error {
|
||||
outliers, err := state.Events()
|
||||
if err != nil {
|
||||
|
|
@ -79,14 +80,18 @@ func SendEventWithState(
|
|||
StateEventIDs: stateEventIDs,
|
||||
})
|
||||
|
||||
return SendInputRoomEvents(ctx, rsAPI, ires)
|
||||
return SendInputRoomEvents(ctx, rsAPI, ires, async)
|
||||
}
|
||||
|
||||
// SendInputRoomEvents to the roomserver.
|
||||
func SendInputRoomEvents(
|
||||
ctx context.Context, rsAPI RoomserverInternalAPI, ires []InputRoomEvent,
|
||||
ctx context.Context, rsAPI RoomserverInternalAPI,
|
||||
ires []InputRoomEvent, async bool,
|
||||
) error {
|
||||
request := InputRoomEventsRequest{InputRoomEvents: ires}
|
||||
request := InputRoomEventsRequest{
|
||||
InputRoomEvents: ires,
|
||||
Asynchronous: async,
|
||||
}
|
||||
var response InputRoomEventsResponse
|
||||
rsAPI.InputRoomEvents(ctx, &request, &response)
|
||||
return response.Err()
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package internal
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/getsentry/sentry-go"
|
||||
asAPI "github.com/matrix-org/dendrite/appservice/api"
|
||||
fsAPI "github.com/matrix-org/dendrite/federationapi/api"
|
||||
|
|
@ -16,6 +15,8 @@ import (
|
|||
"github.com/matrix-org/dendrite/roomserver/storage"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/nats-io/nats.go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RoomserverInternalAPI is an implementation of api.RoomserverInternalAPI
|
||||
|
|
@ -33,19 +34,19 @@ type RoomserverInternalAPI struct {
|
|||
*perform.Forgetter
|
||||
DB storage.Database
|
||||
Cfg *config.RoomServer
|
||||
Producer sarama.SyncProducer
|
||||
Cache caching.RoomServerCaches
|
||||
ServerName gomatrixserverlib.ServerName
|
||||
KeyRing gomatrixserverlib.JSONVerifier
|
||||
fsAPI fsAPI.FederationInternalAPI
|
||||
asAPI asAPI.AppServiceQueryAPI
|
||||
OutputRoomEventTopic string // Kafka topic for new output room events
|
||||
InputRoomEventTopic string // JetStream topic for new input room events
|
||||
OutputRoomEventTopic string // JetStream topic for new output room events
|
||||
PerspectiveServerNames []gomatrixserverlib.ServerName
|
||||
}
|
||||
|
||||
func NewRoomserverAPI(
|
||||
cfg *config.RoomServer, roomserverDB storage.Database, producer sarama.SyncProducer,
|
||||
outputRoomEventTopic string, caches caching.RoomServerCaches,
|
||||
cfg *config.RoomServer, roomserverDB storage.Database, consumer nats.JetStreamContext,
|
||||
inputRoomEventTopic, outputRoomEventTopic string, caches caching.RoomServerCaches,
|
||||
perspectiveServerNames []gomatrixserverlib.ServerName,
|
||||
) *RoomserverInternalAPI {
|
||||
serverACLs := acls.NewServerACLs(roomserverDB)
|
||||
|
|
@ -63,8 +64,10 @@ func NewRoomserverAPI(
|
|||
},
|
||||
Inputer: &input.Inputer{
|
||||
DB: roomserverDB,
|
||||
InputRoomEventTopic: inputRoomEventTopic,
|
||||
OutputRoomEventTopic: outputRoomEventTopic,
|
||||
Producer: producer,
|
||||
JetStream: consumer,
|
||||
Durable: cfg.Matrix.JetStream.Durable("RoomserverInputConsumer"),
|
||||
ServerName: cfg.Matrix.ServerName,
|
||||
ACLs: serverACLs,
|
||||
},
|
||||
|
|
@ -135,6 +138,10 @@ func (r *RoomserverInternalAPI) SetFederationAPI(fsAPI fsAPI.FederationInternalA
|
|||
r.Forgetter = &perform.Forgetter{
|
||||
DB: r.DB,
|
||||
}
|
||||
|
||||
if err := r.Inputer.Start(); err != nil {
|
||||
logrus.WithError(err).Panic("failed to start roomserver input API")
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RoomserverInternalAPI) SetAppserviceAPI(asAPI asAPI.AppServiceQueryAPI) {
|
||||
|
|
|
|||
|
|
@ -19,19 +19,19 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/Arceliar/phony"
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/matrix-org/dendrite/internal/hooks"
|
||||
"github.com/matrix-org/dendrite/roomserver/acls"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/roomserver/storage"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/nats-io/nats.go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tidwall/gjson"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
var keyContentFields = map[string]string{
|
||||
|
|
@ -42,105 +42,167 @@ var keyContentFields = map[string]string{
|
|||
|
||||
type Inputer struct {
|
||||
DB storage.Database
|
||||
Producer sarama.SyncProducer
|
||||
JetStream nats.JetStreamContext
|
||||
Durable nats.SubOpt
|
||||
ServerName gomatrixserverlib.ServerName
|
||||
ACLs *acls.ServerACLs
|
||||
InputRoomEventTopic string
|
||||
OutputRoomEventTopic string
|
||||
workers sync.Map // room ID -> *inputWorker
|
||||
workers sync.Map // room ID -> *phony.Inbox
|
||||
}
|
||||
|
||||
type inputTask struct {
|
||||
ctx context.Context
|
||||
event *api.InputRoomEvent
|
||||
wg *sync.WaitGroup
|
||||
err error // written back by worker, only safe to read when all tasks are done
|
||||
}
|
||||
|
||||
type inputWorker struct {
|
||||
r *Inputer
|
||||
running atomic.Bool
|
||||
input *fifoQueue
|
||||
}
|
||||
|
||||
// Guarded by a CAS on w.running
|
||||
func (w *inputWorker) start() {
|
||||
defer w.running.Store(false)
|
||||
for {
|
||||
select {
|
||||
case <-w.input.wait():
|
||||
task, ok := w.input.pop()
|
||||
if !ok {
|
||||
continue
|
||||
// onMessage is called when a new event arrives in the roomserver input stream.
|
||||
func (r *Inputer) Start() error {
|
||||
_, err := r.JetStream.Subscribe(
|
||||
r.InputRoomEventTopic,
|
||||
// We specifically don't use jetstream.WithJetStreamMessage here because we
|
||||
// queue the task off to a room-specific queue and the ACK needs to be sent
|
||||
// later, possibly with an error response to the inputter if synchronous.
|
||||
func(msg *nats.Msg) {
|
||||
roomID := msg.Header.Get("room_id")
|
||||
var inputRoomEvent api.InputRoomEvent
|
||||
if err := json.Unmarshal(msg.Data, &inputRoomEvent); err != nil {
|
||||
_ = msg.Term()
|
||||
return
|
||||
}
|
||||
roomserverInputBackpressure.With(prometheus.Labels{
|
||||
"room_id": task.event.Event.RoomID(),
|
||||
}).Dec()
|
||||
hooks.Run(hooks.KindNewEventReceived, task.event.Event)
|
||||
_, task.err = w.r.processRoomEvent(task.ctx, task.event)
|
||||
if task.err == nil {
|
||||
hooks.Run(hooks.KindNewEventPersisted, task.event.Event)
|
||||
} else {
|
||||
sentry.CaptureException(task.err)
|
||||
inbox, _ := r.workers.LoadOrStore(roomID, &phony.Inbox{})
|
||||
roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Inc()
|
||||
inbox.(*phony.Inbox).Act(nil, func() {
|
||||
defer roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Dec()
|
||||
if err := r.processRoomEvent(context.TODO(), &inputRoomEvent); err != nil {
|
||||
sentry.CaptureException(err)
|
||||
} else {
|
||||
hooks.Run(hooks.KindNewEventPersisted, inputRoomEvent.Event)
|
||||
}
|
||||
_ = msg.Ack()
|
||||
})
|
||||
},
|
||||
// NATS wants to acknowledge automatically by default when the message is
|
||||
// read from the stream, but we want to override that behaviour by making
|
||||
// sure that we only acknowledge when we're happy we've done everything we
|
||||
// can. This ensures we retry things when it makes sense to do so.
|
||||
nats.ManualAck(),
|
||||
// NATS will try to redeliver things to us automatically if we don't ack
|
||||
// or nak them within a certain amount of time. This stops that from
|
||||
// happening, so we don't end up doing a lot of unnecessary duplicate work.
|
||||
nats.MaxDeliver(0),
|
||||
// Use a durable named consumer.
|
||||
r.Durable,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
// InputRoomEvents implements api.RoomserverInternalAPI
|
||||
func (r *Inputer) InputRoomEvents(
|
||||
ctx context.Context,
|
||||
request *api.InputRoomEventsRequest,
|
||||
response *api.InputRoomEventsResponse,
|
||||
) {
|
||||
if request.Asynchronous {
|
||||
var err error
|
||||
for _, e := range request.InputRoomEvents {
|
||||
msg := &nats.Msg{
|
||||
Subject: r.InputRoomEventTopic,
|
||||
Header: nats.Header{},
|
||||
}
|
||||
roomID := e.Event.RoomID()
|
||||
msg.Header.Set("room_id", roomID)
|
||||
msg.Data, err = json.Marshal(e)
|
||||
if err != nil {
|
||||
response.ErrMsg = err.Error()
|
||||
return
|
||||
}
|
||||
if _, err = r.JetStream.PublishMsg(msg); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
responses := make(chan error, len(request.InputRoomEvents))
|
||||
defer close(responses)
|
||||
for _, e := range request.InputRoomEvents {
|
||||
inputRoomEvent := e
|
||||
roomID := inputRoomEvent.Event.RoomID()
|
||||
inbox, _ := r.workers.LoadOrStore(roomID, &phony.Inbox{})
|
||||
roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Inc()
|
||||
inbox.(*phony.Inbox).Act(nil, func() {
|
||||
defer roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Dec()
|
||||
err := r.processRoomEvent(context.TODO(), &inputRoomEvent)
|
||||
if err != nil {
|
||||
sentry.CaptureException(err)
|
||||
} else {
|
||||
hooks.Run(hooks.KindNewEventPersisted, inputRoomEvent.Event)
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
default:
|
||||
responses <- err
|
||||
}
|
||||
})
|
||||
}
|
||||
for i := 0; i < len(request.InputRoomEvents); i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case err := <-responses:
|
||||
if err != nil {
|
||||
response.ErrMsg = err.Error()
|
||||
return
|
||||
}
|
||||
}
|
||||
task.wg.Done()
|
||||
case <-time.After(time.Second * 5):
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WriteOutputEvents implements OutputRoomEventWriter
|
||||
func (r *Inputer) WriteOutputEvents(roomID string, updates []api.OutputEvent) error {
|
||||
messages := make([]*sarama.ProducerMessage, len(updates))
|
||||
for i := range updates {
|
||||
value, err := json.Marshal(updates[i])
|
||||
var err error
|
||||
for _, update := range updates {
|
||||
msg := &nats.Msg{
|
||||
Subject: r.OutputRoomEventTopic,
|
||||
Header: nats.Header{},
|
||||
}
|
||||
msg.Header.Set(jetstream.RoomID, roomID)
|
||||
msg.Data, err = json.Marshal(update)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger := log.WithFields(log.Fields{
|
||||
"room_id": roomID,
|
||||
"type": updates[i].Type,
|
||||
"type": update.Type,
|
||||
})
|
||||
if updates[i].NewRoomEvent != nil {
|
||||
eventType := updates[i].NewRoomEvent.Event.Type()
|
||||
if update.NewRoomEvent != nil {
|
||||
eventType := update.NewRoomEvent.Event.Type()
|
||||
logger = logger.WithFields(log.Fields{
|
||||
"event_type": eventType,
|
||||
"event_id": updates[i].NewRoomEvent.Event.EventID(),
|
||||
"adds_state": len(updates[i].NewRoomEvent.AddsStateEventIDs),
|
||||
"removes_state": len(updates[i].NewRoomEvent.RemovesStateEventIDs),
|
||||
"send_as_server": updates[i].NewRoomEvent.SendAsServer,
|
||||
"sender": updates[i].NewRoomEvent.Event.Sender(),
|
||||
"event_id": update.NewRoomEvent.Event.EventID(),
|
||||
"adds_state": len(update.NewRoomEvent.AddsStateEventIDs),
|
||||
"removes_state": len(update.NewRoomEvent.RemovesStateEventIDs),
|
||||
"send_as_server": update.NewRoomEvent.SendAsServer,
|
||||
"sender": update.NewRoomEvent.Event.Sender(),
|
||||
})
|
||||
if updates[i].NewRoomEvent.Event.StateKey() != nil {
|
||||
logger = logger.WithField("state_key", *updates[i].NewRoomEvent.Event.StateKey())
|
||||
if update.NewRoomEvent.Event.StateKey() != nil {
|
||||
logger = logger.WithField("state_key", *update.NewRoomEvent.Event.StateKey())
|
||||
}
|
||||
contentKey := keyContentFields[eventType]
|
||||
if contentKey != "" {
|
||||
value := gjson.GetBytes(updates[i].NewRoomEvent.Event.Content(), contentKey)
|
||||
value := gjson.GetBytes(update.NewRoomEvent.Event.Content(), contentKey)
|
||||
if value.Exists() {
|
||||
logger = logger.WithField("content_value", value.String())
|
||||
}
|
||||
}
|
||||
|
||||
if eventType == "m.room.server_acl" && updates[i].NewRoomEvent.Event.StateKeyEquals("") {
|
||||
ev := updates[i].NewRoomEvent.Event.Unwrap()
|
||||
if eventType == "m.room.server_acl" && update.NewRoomEvent.Event.StateKeyEquals("") {
|
||||
ev := update.NewRoomEvent.Event.Unwrap()
|
||||
defer r.ACLs.OnServerACLUpdate(ev)
|
||||
}
|
||||
}
|
||||
logger.Infof("Producing to topic '%s'", r.OutputRoomEventTopic)
|
||||
messages[i] = &sarama.ProducerMessage{
|
||||
Topic: r.OutputRoomEventTopic,
|
||||
Key: sarama.StringEncoder(roomID),
|
||||
Value: sarama.ByteEncoder(value),
|
||||
logger.Tracef("Producing to topic '%s'", r.OutputRoomEventTopic)
|
||||
if _, err := r.JetStream.PublishMsg(msg); err != nil {
|
||||
logger.WithError(err).Errorf("Failed to produce to topic '%s': %s", r.OutputRoomEventTopic, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
errs := r.Producer.SendMessages(messages)
|
||||
if errs != nil {
|
||||
for _, err := range errs.(sarama.ProducerErrors) {
|
||||
log.WithError(err).WithField("message_bytes", err.Msg.Value.Length()).Error("Write to kafka failed")
|
||||
}
|
||||
}
|
||||
return errs
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
@ -156,67 +218,3 @@ var roomserverInputBackpressure = prometheus.NewGaugeVec(
|
|||
},
|
||||
[]string{"room_id"},
|
||||
)
|
||||
|
||||
// InputRoomEvents implements api.RoomserverInternalAPI
|
||||
func (r *Inputer) InputRoomEvents(
|
||||
_ context.Context,
|
||||
request *api.InputRoomEventsRequest,
|
||||
response *api.InputRoomEventsResponse,
|
||||
) {
|
||||
// Create a wait group. Each task that we dispatch will call Done on
|
||||
// this wait group so that we know when all of our events have been
|
||||
// processed.
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(len(request.InputRoomEvents))
|
||||
tasks := make([]*inputTask, len(request.InputRoomEvents))
|
||||
|
||||
for i, e := range request.InputRoomEvents {
|
||||
// Work out if we are running per-room workers or if we're just doing
|
||||
// it on a global basis (e.g. SQLite).
|
||||
roomID := "global"
|
||||
if r.DB.SupportsConcurrentRoomInputs() {
|
||||
roomID = e.Event.RoomID()
|
||||
}
|
||||
|
||||
// Look up the worker, or create it if it doesn't exist. This channel
|
||||
// is buffered to reduce the chance that we'll be blocked by another
|
||||
// room - the channel will be quite small as it's just pointer types.
|
||||
w, _ := r.workers.LoadOrStore(roomID, &inputWorker{
|
||||
r: r,
|
||||
input: newFIFOQueue(),
|
||||
})
|
||||
worker := w.(*inputWorker)
|
||||
|
||||
// Create a task. This contains the input event and a reference to
|
||||
// the wait group, so that the worker can notify us when this specific
|
||||
// task has been finished.
|
||||
tasks[i] = &inputTask{
|
||||
ctx: context.Background(),
|
||||
event: &request.InputRoomEvents[i],
|
||||
wg: wg,
|
||||
}
|
||||
|
||||
// Send the task to the worker.
|
||||
if worker.running.CAS(false, true) {
|
||||
go worker.start()
|
||||
}
|
||||
worker.input.push(tasks[i])
|
||||
roomserverInputBackpressure.With(prometheus.Labels{
|
||||
"room_id": roomID,
|
||||
}).Inc()
|
||||
}
|
||||
|
||||
// Wait for all of the workers to return results about our tasks.
|
||||
wg.Wait()
|
||||
|
||||
// If any of the tasks returned an error, we should probably report
|
||||
// that back to the caller.
|
||||
for _, task := range tasks {
|
||||
if task.err != nil {
|
||||
response.ErrMsg = task.err.Error()
|
||||
_, rejected := task.err.(*gomatrixserverlib.NotAllowed)
|
||||
response.NotAllowed = rejected
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ var processRoomEventDuration = prometheus.NewHistogramVec(
|
|||
func (r *Inputer) processRoomEvent(
|
||||
ctx context.Context,
|
||||
input *api.InputRoomEvent,
|
||||
) (eventID string, err error) {
|
||||
) (err error) {
|
||||
// Measure how long it takes to process this event.
|
||||
started := time.Now()
|
||||
defer func() {
|
||||
|
|
@ -88,11 +88,11 @@ func (r *Inputer) processRoomEvent(
|
|||
case gomatrixserverlib.EventIDFormatV1:
|
||||
if bytes.Equal(event.EventReference().EventSHA256, evs[0].EventReference().EventSHA256) {
|
||||
util.GetLogger(ctx).WithField("event_id", event.EventID()).Infof("Already processed event; ignoring")
|
||||
return event.EventID(), nil
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
util.GetLogger(ctx).WithField("event_id", event.EventID()).Infof("Already processed event; ignoring")
|
||||
return event.EventID(), nil
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -124,14 +124,14 @@ func (r *Inputer) processRoomEvent(
|
|||
// Store the event.
|
||||
_, _, stateAtEvent, redactionEvent, redactedEventID, err := r.DB.StoreEvent(ctx, event, authEventNIDs, isRejected)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("r.DB.StoreEvent: %w", err)
|
||||
return fmt.Errorf("r.DB.StoreEvent: %w", err)
|
||||
}
|
||||
|
||||
// if storing this event results in it being redacted then do so.
|
||||
if !isRejected && redactedEventID == event.EventID() {
|
||||
r, rerr := eventutil.RedactEvent(redactionEvent, event)
|
||||
if rerr != nil {
|
||||
return "", fmt.Errorf("eventutil.RedactEvent: %w", rerr)
|
||||
return fmt.Errorf("eventutil.RedactEvent: %w", rerr)
|
||||
}
|
||||
event = r
|
||||
}
|
||||
|
|
@ -146,15 +146,15 @@ func (r *Inputer) processRoomEvent(
|
|||
"room": event.RoomID(),
|
||||
"sender": event.Sender(),
|
||||
}).Debug("Stored outlier")
|
||||
return event.EventID(), nil
|
||||
return nil
|
||||
}
|
||||
|
||||
roomInfo, err := r.DB.RoomInfo(ctx, event.RoomID())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("r.DB.RoomInfo: %w", err)
|
||||
return fmt.Errorf("r.DB.RoomInfo: %w", err)
|
||||
}
|
||||
if roomInfo == nil {
|
||||
return "", fmt.Errorf("r.DB.RoomInfo missing for room %s", event.RoomID())
|
||||
return fmt.Errorf("r.DB.RoomInfo missing for room %s", event.RoomID())
|
||||
}
|
||||
|
||||
if stateAtEvent.BeforeStateSnapshotNID == 0 {
|
||||
|
|
@ -162,7 +162,7 @@ func (r *Inputer) processRoomEvent(
|
|||
// Lets calculate one.
|
||||
err = r.calculateAndSetState(ctx, input, *roomInfo, &stateAtEvent, event, isRejected)
|
||||
if err != nil && input.Kind != api.KindOld {
|
||||
return "", fmt.Errorf("r.calculateAndSetState: %w", err)
|
||||
return fmt.Errorf("r.calculateAndSetState: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -175,7 +175,7 @@ func (r *Inputer) processRoomEvent(
|
|||
"soft_fail": softfail,
|
||||
"sender": event.Sender(),
|
||||
}).Debug("Stored rejected event")
|
||||
return event.EventID(), rejectionErr
|
||||
return rejectionErr
|
||||
}
|
||||
|
||||
switch input.Kind {
|
||||
|
|
@ -189,7 +189,7 @@ func (r *Inputer) processRoomEvent(
|
|||
input.TransactionID, // transaction ID
|
||||
input.HasState, // rewrites state?
|
||||
); err != nil {
|
||||
return "", fmt.Errorf("r.updateLatestEvents: %w", err)
|
||||
return fmt.Errorf("r.updateLatestEvents: %w", err)
|
||||
}
|
||||
case api.KindOld:
|
||||
err = r.WriteOutputEvents(event.RoomID(), []api.OutputEvent{
|
||||
|
|
@ -201,7 +201,7 @@ func (r *Inputer) processRoomEvent(
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("r.WriteOutputEvents (old): %w", err)
|
||||
return fmt.Errorf("r.WriteOutputEvents (old): %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -220,12 +220,12 @@ func (r *Inputer) processRoomEvent(
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("r.WriteOutputEvents (redactions): %w", err)
|
||||
return fmt.Errorf("r.WriteOutputEvents (redactions): %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update the extremities of the event graph for the room
|
||||
return event.EventID(), nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Inputer) calculateAndSetState(
|
||||
|
|
|
|||
|
|
@ -1,64 +0,0 @@
|
|||
package input
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type fifoQueue struct {
|
||||
tasks []*inputTask
|
||||
count int
|
||||
mutex sync.Mutex
|
||||
notifs chan struct{}
|
||||
}
|
||||
|
||||
func newFIFOQueue() *fifoQueue {
|
||||
q := &fifoQueue{
|
||||
notifs: make(chan struct{}, 1),
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *fifoQueue) push(frame *inputTask) {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
q.tasks = append(q.tasks, frame)
|
||||
q.count++
|
||||
select {
|
||||
case q.notifs <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// pop returns the first item of the queue, if there is one.
|
||||
// The second return value will indicate if a task was returned.
|
||||
// You must check this value, even after calling wait().
|
||||
func (q *fifoQueue) pop() (*inputTask, bool) {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
if q.count == 0 {
|
||||
return nil, false
|
||||
}
|
||||
frame := q.tasks[0]
|
||||
q.tasks[0] = nil
|
||||
q.tasks = q.tasks[1:]
|
||||
q.count--
|
||||
if q.count == 0 {
|
||||
// Force a GC of the underlying array, since it might have
|
||||
// grown significantly if the queue was hammered for some reason
|
||||
q.tasks = nil
|
||||
}
|
||||
return frame, true
|
||||
}
|
||||
|
||||
// wait returns a channel which can be used to detect when an
|
||||
// item is waiting in the queue.
|
||||
func (q *fifoQueue) wait() <-chan struct{} {
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
if q.count > 0 && len(q.notifs) == 0 {
|
||||
ch := make(chan struct{})
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
return q.notifs
|
||||
}
|
||||
|
|
@ -457,6 +457,21 @@ func (r *Queryer) QueryStateAndAuthChain(
|
|||
response.RoomExists = true
|
||||
response.RoomVersion = info.RoomVersion
|
||||
|
||||
// handle this entirely separately to the other case so we don't have to pull out
|
||||
// the entire current state of the room
|
||||
// TODO: this probably means it should be a different query operation...
|
||||
if request.OnlyFetchAuthChain {
|
||||
var authEvents []*gomatrixserverlib.Event
|
||||
authEvents, err = GetAuthChain(ctx, r.DB.EventsFromIDs, request.AuthEventIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, event := range authEvents {
|
||||
response.AuthChainEvents = append(response.AuthChainEvents, event.Headered(info.RoomVersion))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var stateEvents []*gomatrixserverlib.Event
|
||||
stateEvents, err = r.loadStateAtEventIDs(ctx, *info, request.PrevEventIDs)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -23,8 +23,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/roomserver/internal"
|
||||
"github.com/matrix-org/dendrite/roomserver/storage"
|
||||
"github.com/matrix-org/dendrite/setup/base"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/kafka"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -41,8 +40,6 @@ func NewInternalAPI(
|
|||
) api.RoomserverInternalAPI {
|
||||
cfg := &base.Cfg.RoomServer
|
||||
|
||||
_, producer := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
|
||||
|
||||
var perspectiveServerNames []gomatrixserverlib.ServerName
|
||||
for _, kp := range base.Cfg.FederationAPI.KeyPerspectives {
|
||||
perspectiveServerNames = append(perspectiveServerNames, kp.ServerName)
|
||||
|
|
@ -53,8 +50,12 @@ func NewInternalAPI(
|
|||
logrus.WithError(err).Panicf("failed to connect to room server db")
|
||||
}
|
||||
|
||||
js, _, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
|
||||
|
||||
return internal.NewRoomserverAPI(
|
||||
cfg, roomserverDB, producer, string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)),
|
||||
cfg, roomserverDB, js,
|
||||
cfg.Matrix.JetStream.TopicFor(jetstream.InputRoomEvent),
|
||||
cfg.Matrix.JetStream.TopicFor(jetstream.OutputRoomEvent),
|
||||
base.Caches, perspectiveServerNames,
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,407 +0,0 @@
|
|||
package roomserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/roomserver/internal"
|
||||
"github.com/matrix-org/dendrite/roomserver/storage"
|
||||
"github.com/matrix-org/dendrite/setup/base"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
testOrigin = gomatrixserverlib.ServerName("kaer.morhen")
|
||||
// we have to use an on-disk DB because we open multiple connections due to the *Updater structs.
|
||||
// Using :memory: results in a brand new DB for each open connection, and sharing memory via
|
||||
// ?cache=shared just allows read-only sharing, so writes to the database on other connections are lost.
|
||||
roomserverDBFileURI = "file:roomserver_test.db"
|
||||
roomserverDBFilePath = "./roomserver_test.db"
|
||||
)
|
||||
|
||||
var (
|
||||
ctx = context.Background()
|
||||
)
|
||||
|
||||
type dummyProducer struct {
|
||||
topic string
|
||||
producedMessages []*api.OutputEvent
|
||||
}
|
||||
|
||||
// SendMessage produces a given message, and returns only when it either has
|
||||
// succeeded or failed to produce. It will return the partition and the offset
|
||||
// of the produced message, or an error if the message failed to produce.
|
||||
func (p *dummyProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) {
|
||||
if msg.Topic != p.topic {
|
||||
return 0, 0, nil
|
||||
}
|
||||
be := msg.Value.(sarama.ByteEncoder)
|
||||
b := json.RawMessage(be)
|
||||
fmt.Println("SENDING >>>>>>>> ", string(b))
|
||||
var out api.OutputEvent
|
||||
err = json.Unmarshal(b, &out)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
p.producedMessages = append(p.producedMessages, &out)
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
// SendMessages produces a given set of messages, and returns only when all
|
||||
// messages in the set have either succeeded or failed. Note that messages
|
||||
// can succeed and fail individually; if some succeed and some fail,
|
||||
// SendMessages will return an error.
|
||||
func (p *dummyProducer) SendMessages(msgs []*sarama.ProducerMessage) error {
|
||||
for _, m := range msgs {
|
||||
p.SendMessage(m)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close shuts down the producer and waits for any buffered messages to be
|
||||
// flushed. You must call this function before a producer object passes out of
|
||||
// scope, as it may otherwise leak memory. You must call this before calling
|
||||
// Close on the underlying client.
|
||||
func (p *dummyProducer) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteDatabase() {
|
||||
err := os.Remove(roomserverDBFilePath)
|
||||
if err != nil {
|
||||
fmt.Printf("failed to delete database %s: %s\n", roomserverDBFilePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
type fledglingEvent struct {
|
||||
Type string
|
||||
StateKey *string
|
||||
Content interface{}
|
||||
Sender string
|
||||
RoomID string
|
||||
}
|
||||
|
||||
func mustCreateEvents(t *testing.T, roomVer gomatrixserverlib.RoomVersion, events []fledglingEvent) (result []*gomatrixserverlib.HeaderedEvent) {
|
||||
t.Helper()
|
||||
depth := int64(1)
|
||||
seed := make([]byte, ed25519.SeedSize) // zero seed
|
||||
key := ed25519.NewKeyFromSeed(seed)
|
||||
var prevs []string
|
||||
roomState := make(map[gomatrixserverlib.StateKeyTuple]string) // state -> event ID
|
||||
for _, ev := range events {
|
||||
eb := gomatrixserverlib.EventBuilder{
|
||||
Sender: ev.Sender,
|
||||
Depth: depth,
|
||||
Type: ev.Type,
|
||||
StateKey: ev.StateKey,
|
||||
RoomID: ev.RoomID,
|
||||
PrevEvents: prevs,
|
||||
}
|
||||
err := eb.SetContent(ev.Content)
|
||||
if err != nil {
|
||||
t.Fatalf("mustCreateEvent: failed to marshal event content %+v", ev.Content)
|
||||
}
|
||||
stateNeeded, err := gomatrixserverlib.StateNeededForEventBuilder(&eb)
|
||||
if err != nil {
|
||||
t.Fatalf("mustCreateEvent: failed to work out auth_events : %s", err)
|
||||
}
|
||||
var authEvents []string
|
||||
for _, tuple := range stateNeeded.Tuples() {
|
||||
eventID := roomState[tuple]
|
||||
if eventID != "" {
|
||||
authEvents = append(authEvents, eventID)
|
||||
}
|
||||
}
|
||||
eb.AuthEvents = authEvents
|
||||
signedEvent, err := eb.Build(time.Now(), testOrigin, "ed25519:test", key, roomVer)
|
||||
if err != nil {
|
||||
t.Fatalf("mustCreateEvent: failed to sign event: %s", err)
|
||||
}
|
||||
depth++
|
||||
prevs = []string{signedEvent.EventID()}
|
||||
if ev.StateKey != nil {
|
||||
roomState[gomatrixserverlib.StateKeyTuple{
|
||||
EventType: ev.Type,
|
||||
StateKey: *ev.StateKey,
|
||||
}] = signedEvent.EventID()
|
||||
}
|
||||
result = append(result, signedEvent.Headered(roomVer))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func mustLoadRawEvents(t *testing.T, ver gomatrixserverlib.RoomVersion, events []json.RawMessage) []*gomatrixserverlib.HeaderedEvent {
|
||||
t.Helper()
|
||||
hs := make([]*gomatrixserverlib.HeaderedEvent, len(events))
|
||||
for i := range events {
|
||||
e, err := gomatrixserverlib.NewEventFromTrustedJSON(events[i], false, ver)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot load test data: " + err.Error())
|
||||
}
|
||||
hs[i] = e.Headered(ver)
|
||||
}
|
||||
return hs
|
||||
}
|
||||
|
||||
func mustCreateRoomserverAPI(t *testing.T) (api.RoomserverInternalAPI, *dummyProducer) {
|
||||
t.Helper()
|
||||
cfg := &config.Dendrite{}
|
||||
cfg.Defaults(true)
|
||||
cfg.Global.ServerName = testOrigin
|
||||
cfg.Global.Kafka.UseNaffka = true
|
||||
cfg.RoomServer.Database = config.DatabaseOptions{
|
||||
ConnectionString: roomserverDBFileURI,
|
||||
}
|
||||
dp := &dummyProducer{
|
||||
topic: cfg.Global.Kafka.TopicFor(config.TopicOutputRoomEvent),
|
||||
}
|
||||
cache, err := caching.NewInMemoryLRUCache(false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to make caches: %s", err)
|
||||
}
|
||||
base := &base.BaseDendrite{
|
||||
Caches: cache,
|
||||
Cfg: cfg,
|
||||
}
|
||||
roomserverDB, err := storage.Open(&cfg.RoomServer.Database, base.Caches)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panicf("failed to connect to room server db")
|
||||
}
|
||||
return internal.NewRoomserverAPI(
|
||||
&cfg.RoomServer, roomserverDB, dp, string(cfg.Global.Kafka.TopicFor(config.TopicOutputRoomEvent)),
|
||||
base.Caches, nil,
|
||||
), dp
|
||||
}
|
||||
|
||||
func mustSendEvents(t *testing.T, ver gomatrixserverlib.RoomVersion, events []json.RawMessage) (api.RoomserverInternalAPI, *dummyProducer, []*gomatrixserverlib.HeaderedEvent) {
|
||||
t.Helper()
|
||||
rsAPI, dp := mustCreateRoomserverAPI(t)
|
||||
hevents := mustLoadRawEvents(t, ver, events)
|
||||
if err := api.SendEvents(ctx, rsAPI, api.KindNew, hevents, testOrigin, nil); err != nil {
|
||||
t.Errorf("failed to SendEvents: %s", err)
|
||||
}
|
||||
return rsAPI, dp, hevents
|
||||
}
|
||||
|
||||
func TestOutputRedactedEvent(t *testing.T) {
|
||||
redactionEvents := []json.RawMessage{
|
||||
// create event
|
||||
[]byte(`{"auth_events":[],"content":{"creator":"@userid:kaer.morhen"},"depth":0,"event_id":"$N4us6vqqq3RjvpKd:kaer.morhen","hashes":{"sha256":"WTdrCn/YsiounXcJPsLP8xT0ZjHiO5Ov0NvXYmK2onE"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[],"prev_state":[],"room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"9+5JcpaN5b5KlHYHGp6r+GoNDH98lbfzGYwjfxensa5C5D/bDACaYnMDLnhwsHOE5nxgI+jT/GV271pz6PMSBQ"}},"state_key":"","type":"m.room.create"}`),
|
||||
// join event
|
||||
[]byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}]],"content":{"membership":"join"},"depth":1,"event_id":"$6sUiGPQ0a3tqYGKo:kaer.morhen","hashes":{"sha256":"eYVBC7RO+FlxRyW1aXYf/ad4Dzi7T93tArdGw3r4RwQ"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}]],"prev_state":[],"room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"tiDBTPFa53YMfHiupX3vSRE/ZcCiCjmGt7gDpIpDpwZapeays5Vqqcqb7KiywrDldpTkrrdJBAw2jXcq6ZyhDw"}},"state_key":"@userid:kaer.morhen","type":"m.room.member"}`),
|
||||
// room name
|
||||
[]byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}],["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"content":{"name":"My Room Name"},"depth":2,"event_id":"$VC1zZ9YWwuUbSNHD:kaer.morhen","hashes":{"sha256":"bpqTkfLx6KHzWz7/wwpsXnXwJWEGW14aV63ffexzDFg"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"prev_state":[],"room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"mhJZ3X4bAKrF/T0mtPf1K2Tmls0h6xGY1IPDpJ/SScQBqDlu3HQR2BPa7emqj5bViyLTWVNh+ZCpzx/6STTrAg"}},"state_key":"","type":"m.room.name"}`),
|
||||
// redact room name
|
||||
[]byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}],["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"content":{"reason":"Spamming"},"depth":3,"event_id":"$tJI0pE3b8u9UMYpT:kaer.morhen","hashes":{"sha256":"/3TStqa5SQqYaEtl7ajEvSRvu6d12MMKfICUzrBpd2Q"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$VC1zZ9YWwuUbSNHD:kaer.morhen",{"sha256":"+l8cNa7syvm0EF7CAmQRlYknLEMjivnI4FLhB/TUBEY"}]],"redacts":"$VC1zZ9YWwuUbSNHD:kaer.morhen","room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"QBOh+amf0vTJbm6+9VwAcR9uJviBIor2KON0Y7+EyQx5YbUZEzW1HPeJxarLIHBcxMzgOVzjuM+StzjbUgDzAg"}},"type":"m.room.redaction"}`),
|
||||
// message
|
||||
[]byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}],["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"content":{"body":"Test Message"},"depth":4,"event_id":"$o8KHsgSIYbJrddnd:kaer.morhen","hashes":{"sha256":"IE/rGVlKOpiGWeIo887g1CK1drYqcWDZhL6THZHkJ1c"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$tJI0pE3b8u9UMYpT:kaer.morhen",{"sha256":"zvmwyXuDox7jpA16JRH6Fc1zbfQht2tpkBbMTUOi3Jw"}]],"room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"/3z+pJjiJXWhwfqIEzmNksvBHCoXTktK/y0rRuWJXw6i1+ygRG/suDCKhFuuz6gPapRmEMPVILi2mJqHHXPKAg"}},"type":"m.room.message"}`),
|
||||
// redact previous message
|
||||
[]byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}],["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"content":{"reason":"Spamming more"},"depth":5,"event_id":"$UpsE8belb2gJItJG:kaer.morhen","hashes":{"sha256":"zU8PWJOld/I7OtjdpltFSKC+DMNm2ZyEXAHcprsafD0"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$o8KHsgSIYbJrddnd:kaer.morhen",{"sha256":"UgjMuCFXH4warIjKuwlRq9zZ6dSJrZWCd+CkqtgLSHM"}]],"redacts":"$o8KHsgSIYbJrddnd:kaer.morhen","room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"zxFGr/7aGOzqOEN6zRNrBpFkkMnfGFPbCteYL33wC+PycBPIK+2WRa5qlAR2+lcLiK3HjIzwRYkKNsVFTqvRAw"}},"type":"m.room.redaction"}`),
|
||||
}
|
||||
var redactedOutputs []api.OutputEvent
|
||||
deleteDatabase()
|
||||
_, producer, hevents := mustSendEvents(t, gomatrixserverlib.RoomVersionV1, redactionEvents)
|
||||
defer deleteDatabase()
|
||||
for _, msg := range producer.producedMessages {
|
||||
if msg.Type == api.OutputTypeRedactedEvent {
|
||||
redactedOutputs = append(redactedOutputs, *msg)
|
||||
}
|
||||
}
|
||||
wantRedactedOutputs := []api.OutputEvent{
|
||||
{
|
||||
Type: api.OutputTypeRedactedEvent,
|
||||
RedactedEvent: &api.OutputRedactedEvent{
|
||||
RedactedEventID: hevents[2].EventID(),
|
||||
RedactedBecause: hevents[3],
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: api.OutputTypeRedactedEvent,
|
||||
RedactedEvent: &api.OutputRedactedEvent{
|
||||
RedactedEventID: hevents[4].EventID(),
|
||||
RedactedBecause: hevents[5],
|
||||
},
|
||||
},
|
||||
}
|
||||
t.Logf("redactedOutputs: %+v", redactedOutputs)
|
||||
if len(wantRedactedOutputs) != len(redactedOutputs) {
|
||||
t.Fatalf("Got %d redacted events, want %d", len(redactedOutputs), len(wantRedactedOutputs))
|
||||
}
|
||||
for i := 0; i < len(wantRedactedOutputs); i++ {
|
||||
if !reflect.DeepEqual(*redactedOutputs[i].RedactedEvent, *wantRedactedOutputs[i].RedactedEvent) {
|
||||
t.Errorf("OutputRedactionEvent %d: wrong event got:\n%+v want:\n%+v", i+1, redactedOutputs[i].RedactedEvent, wantRedactedOutputs[i].RedactedEvent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This tests that rewriting state works correctly.
|
||||
// This creates a small room with a create/join/name state, then replays it
|
||||
// with a new room name. We expect the output events to contain the original events,
|
||||
// followed by a single OutputNewRoomEvent with RewritesState set to true with the
|
||||
// rewritten state events (with the 2nd room name).
|
||||
func TestOutputRewritesState(t *testing.T) {
|
||||
roomID := "!foo:" + string(testOrigin)
|
||||
alice := "@alice:" + string(testOrigin)
|
||||
emptyKey := ""
|
||||
originalEvents := mustCreateEvents(t, gomatrixserverlib.RoomVersionV6, []fledglingEvent{
|
||||
{
|
||||
RoomID: roomID,
|
||||
Sender: alice,
|
||||
Content: map[string]interface{}{
|
||||
"creator": alice,
|
||||
"room_version": "6",
|
||||
},
|
||||
StateKey: &emptyKey,
|
||||
Type: gomatrixserverlib.MRoomCreate,
|
||||
},
|
||||
{
|
||||
RoomID: roomID,
|
||||
Sender: alice,
|
||||
Content: map[string]interface{}{
|
||||
"membership": "join",
|
||||
},
|
||||
StateKey: &alice,
|
||||
Type: gomatrixserverlib.MRoomMember,
|
||||
},
|
||||
{
|
||||
RoomID: roomID,
|
||||
Sender: alice,
|
||||
Content: map[string]interface{}{
|
||||
"body": "hello world",
|
||||
},
|
||||
StateKey: nil,
|
||||
Type: "m.room.message",
|
||||
},
|
||||
{
|
||||
RoomID: roomID,
|
||||
Sender: alice,
|
||||
Content: map[string]interface{}{
|
||||
"name": "Room Name",
|
||||
},
|
||||
StateKey: &emptyKey,
|
||||
Type: "m.room.name",
|
||||
},
|
||||
})
|
||||
rewriteEvents := mustCreateEvents(t, gomatrixserverlib.RoomVersionV6, []fledglingEvent{
|
||||
{
|
||||
RoomID: roomID,
|
||||
Sender: alice,
|
||||
Content: map[string]interface{}{
|
||||
"creator": alice,
|
||||
},
|
||||
StateKey: &emptyKey,
|
||||
Type: gomatrixserverlib.MRoomCreate,
|
||||
},
|
||||
{
|
||||
RoomID: roomID,
|
||||
Sender: alice,
|
||||
Content: map[string]interface{}{
|
||||
"membership": "join",
|
||||
},
|
||||
StateKey: &alice,
|
||||
Type: gomatrixserverlib.MRoomMember,
|
||||
},
|
||||
{
|
||||
RoomID: roomID,
|
||||
Sender: alice,
|
||||
Content: map[string]interface{}{
|
||||
"name": "Room Name 2",
|
||||
},
|
||||
StateKey: &emptyKey,
|
||||
Type: "m.room.name",
|
||||
},
|
||||
{
|
||||
RoomID: roomID,
|
||||
Sender: alice,
|
||||
Content: map[string]interface{}{
|
||||
"body": "hello world 2",
|
||||
},
|
||||
StateKey: nil,
|
||||
Type: "m.room.message",
|
||||
},
|
||||
})
|
||||
deleteDatabase()
|
||||
rsAPI, producer := mustCreateRoomserverAPI(t)
|
||||
defer deleteDatabase()
|
||||
err := api.SendEvents(context.Background(), rsAPI, api.KindNew, originalEvents, testOrigin, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to send original events: %s", err)
|
||||
}
|
||||
// assert we got them produced, this is just a sanity check and isn't the intention of this test
|
||||
if len(producer.producedMessages) != len(originalEvents) {
|
||||
t.Fatalf("SendEvents didn't result in same number of produced output events: got %d want %d", len(producer.producedMessages), len(originalEvents))
|
||||
}
|
||||
producer.producedMessages = nil // we aren't actually interested in these events, just the rewrite ones
|
||||
|
||||
var inputEvents []api.InputRoomEvent
|
||||
// slowly build up the state IDs again, we're basically telling the roomserver what to store as a snapshot
|
||||
var stateIDs []string
|
||||
// skip the last event, we'll use this to tie together the rewrite as the KindNew event
|
||||
for i := 0; i < len(rewriteEvents)-1; i++ {
|
||||
ev := rewriteEvents[i]
|
||||
inputEvents = append(inputEvents, api.InputRoomEvent{
|
||||
Kind: api.KindOutlier,
|
||||
Event: ev,
|
||||
AuthEventIDs: ev.AuthEventIDs(),
|
||||
HasState: true,
|
||||
StateEventIDs: stateIDs,
|
||||
})
|
||||
if ev.StateKey() != nil {
|
||||
stateIDs = append(stateIDs, ev.EventID())
|
||||
}
|
||||
}
|
||||
lastEv := rewriteEvents[len(rewriteEvents)-1]
|
||||
inputEvents = append(inputEvents, api.InputRoomEvent{
|
||||
Kind: api.KindNew,
|
||||
Event: lastEv,
|
||||
AuthEventIDs: lastEv.AuthEventIDs(),
|
||||
HasState: true,
|
||||
StateEventIDs: stateIDs,
|
||||
})
|
||||
if err := api.SendInputRoomEvents(context.Background(), rsAPI, inputEvents); err != nil {
|
||||
t.Fatalf("SendInputRoomEvents returned error for rewrite events: %s", err)
|
||||
}
|
||||
// we should just have one output event with the entire state of the room in it
|
||||
if len(producer.producedMessages) != 1 {
|
||||
t.Fatalf("Rewritten events got output, want only 1 got %d", len(producer.producedMessages))
|
||||
}
|
||||
outputEvent := producer.producedMessages[len(producer.producedMessages)-1]
|
||||
if !outputEvent.NewRoomEvent.RewritesState {
|
||||
t.Errorf("RewritesState flag not set on output event")
|
||||
}
|
||||
if !reflect.DeepEqual(stateIDs, outputEvent.NewRoomEvent.AddsStateEventIDs) {
|
||||
t.Errorf("Output event is missing room state event IDs, got %v want %v", outputEvent.NewRoomEvent.AddsStateEventIDs, stateIDs)
|
||||
}
|
||||
if !bytes.Equal(outputEvent.NewRoomEvent.Event.JSON(), lastEv.JSON()) {
|
||||
t.Errorf(
|
||||
"Output event isn't the latest KindNew event:\ngot %s\nwant %s",
|
||||
string(outputEvent.NewRoomEvent.Event.JSON()),
|
||||
string(lastEv.JSON()),
|
||||
)
|
||||
}
|
||||
if len(outputEvent.NewRoomEvent.AddStateEvents) != len(stateIDs) {
|
||||
t.Errorf("Output event is missing room state events themselves, got %d want %d", len(outputEvent.NewRoomEvent.AddStateEvents), len(stateIDs))
|
||||
}
|
||||
// make sure the state got overwritten, check the room name
|
||||
hasRoomName := false
|
||||
for _, ev := range outputEvent.NewRoomEvent.AddStateEvents {
|
||||
if ev.Type() == "m.room.name" {
|
||||
hasRoomName = string(ev.Content()) == `{"name":"Room Name 2"}`
|
||||
}
|
||||
}
|
||||
if !hasRoomName {
|
||||
t.Errorf("Output event did not overwrite room state")
|
||||
}
|
||||
}
|
||||
|
|
@ -81,8 +81,6 @@ type BaseDendrite struct {
|
|||
Cfg *config.Dendrite
|
||||
Caches *caching.Caches
|
||||
DNSCache *gomatrixserverlib.DNSCache
|
||||
// KafkaConsumer sarama.Consumer
|
||||
// KafkaProducer sarama.SyncProducer
|
||||
}
|
||||
|
||||
const NoListener = ""
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ var keyIDRegexp = regexp.MustCompile("^ed25519:[a-zA-Z0-9_]+$")
|
|||
|
||||
// Version is the current version of the config format.
|
||||
// This will change whenever we make breaking changes to the config format.
|
||||
const Version = 1
|
||||
const Version = 2
|
||||
|
||||
// Dendrite contains all the config used by a dendrite process.
|
||||
// Relative paths are resolved relative to the current working directory
|
||||
|
|
@ -292,7 +292,7 @@ func (config *Dendrite) Derive() error {
|
|||
|
||||
// SetDefaults sets default config values if they are not explicitly set.
|
||||
func (c *Dendrite) Defaults(generate bool) {
|
||||
c.Version = 1
|
||||
c.Version = Version
|
||||
|
||||
c.Global.Defaults(generate)
|
||||
c.ClientAPI.Defaults(generate)
|
||||
|
|
@ -325,6 +325,7 @@ func (c *Dendrite) Verify(configErrs *ConfigErrors, isMonolith bool) {
|
|||
}
|
||||
|
||||
func (c *Dendrite) Wiring() {
|
||||
c.Global.JetStream.Matrix = &c.Global
|
||||
c.ClientAPI.Matrix = &c.Global
|
||||
c.EDUServer.Matrix = &c.Global
|
||||
c.FederationAPI.Matrix = &c.Global
|
||||
|
|
@ -420,7 +421,11 @@ func (config *Dendrite) check(_ bool) error { // monolithic
|
|||
|
||||
if config.Version != Version {
|
||||
configErrs.Add(fmt.Sprintf(
|
||||
"unknown config version %q, expected %q", config.Version, Version,
|
||||
"config version is %q, expected %q - this means that the format of the configuration "+
|
||||
"file has changed in some significant way, so please revisit the sample config "+
|
||||
"and ensure you are not missing any important options that may have been added "+
|
||||
"or changed recently!",
|
||||
config.Version, Version,
|
||||
))
|
||||
return configErrs
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,8 +46,8 @@ type Global struct {
|
|||
// Defaults to an empty array.
|
||||
TrustedIDServers []string `yaml:"trusted_third_party_id_servers"`
|
||||
|
||||
// Kafka/Naffka configuration
|
||||
Kafka Kafka `yaml:"kafka"`
|
||||
// JetStream configuration
|
||||
JetStream JetStream `yaml:"jetstream"`
|
||||
|
||||
// Metrics configuration
|
||||
Metrics Metrics `yaml:"metrics"`
|
||||
|
|
@ -68,7 +68,7 @@ func (c *Global) Defaults(generate bool) {
|
|||
}
|
||||
c.KeyValidityPeriod = time.Hour * 24 * 7
|
||||
|
||||
c.Kafka.Defaults(generate)
|
||||
c.JetStream.Defaults(generate)
|
||||
c.Metrics.Defaults(generate)
|
||||
c.DNSCache.Defaults()
|
||||
c.Sentry.Defaults()
|
||||
|
|
@ -78,7 +78,7 @@ func (c *Global) Verify(configErrs *ConfigErrors, isMonolith bool) {
|
|||
checkNotEmpty(configErrs, "global.server_name", string(c.ServerName))
|
||||
checkNotEmpty(configErrs, "global.private_key", string(c.PrivateKeyPath))
|
||||
|
||||
c.Kafka.Verify(configErrs, isMonolith)
|
||||
c.JetStream.Verify(configErrs, isMonolith)
|
||||
c.Metrics.Verify(configErrs, isMonolith)
|
||||
c.Sentry.Verify(configErrs, isMonolith)
|
||||
c.DNSCache.Verify(configErrs, isMonolith)
|
||||
|
|
|
|||
46
setup/config/config_jetstream.go
Normal file
46
setup/config/config_jetstream.go
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/nats-io/nats.go"
|
||||
)
|
||||
|
||||
type JetStream struct {
|
||||
Matrix *Global `yaml:"-"`
|
||||
|
||||
// Persistent directory to store JetStream streams in.
|
||||
StoragePath Path `yaml:"storage_path"`
|
||||
// A list of NATS addresses to connect to. If none are specified, an
|
||||
// internal NATS server will be used when running in monolith mode only.
|
||||
Addresses []string `yaml:"addresses"`
|
||||
// The prefix to use for stream names for this homeserver - really only
|
||||
// useful if running more than one Dendrite on the same NATS deployment.
|
||||
TopicPrefix string `yaml:"topic_prefix"`
|
||||
// Keep all storage in memory. This is mostly useful for unit tests.
|
||||
InMemory bool `yaml:"in_memory"`
|
||||
}
|
||||
|
||||
func (c *JetStream) TopicFor(name string) string {
|
||||
return fmt.Sprintf("%s%s", c.TopicPrefix, name)
|
||||
}
|
||||
|
||||
func (c *JetStream) Durable(name string) nats.SubOpt {
|
||||
return nats.Durable(c.TopicFor(name))
|
||||
}
|
||||
|
||||
func (c *JetStream) Defaults(generate bool) {
|
||||
c.Addresses = []string{}
|
||||
c.TopicPrefix = "Dendrite"
|
||||
if generate {
|
||||
c.StoragePath = Path("./")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *JetStream) Verify(configErrs *ConfigErrors, isMonolith bool) {
|
||||
// If we are running in a polylith deployment then we need at least
|
||||
// one NATS JetStream server to talk to.
|
||||
if !isMonolith {
|
||||
checkNotZero(configErrs, "global.jetstream.addresses", int64(len(c.Addresses)))
|
||||
}
|
||||
}
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
package config
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Defined Kafka topics.
|
||||
const (
|
||||
TopicOutputTypingEvent = "OutputTypingEvent"
|
||||
TopicOutputSendToDeviceEvent = "OutputSendToDeviceEvent"
|
||||
TopicOutputKeyChangeEvent = "OutputKeyChangeEvent"
|
||||
TopicOutputRoomEvent = "OutputRoomEvent"
|
||||
TopicOutputClientData = "OutputClientData"
|
||||
TopicOutputReceiptEvent = "OutputReceiptEvent"
|
||||
)
|
||||
|
||||
type Kafka struct {
|
||||
// A list of kafka addresses to connect to.
|
||||
Addresses []string `yaml:"addresses"`
|
||||
// The prefix to use for Kafka topic names for this homeserver - really only
|
||||
// useful if running more than one Dendrite on the same Kafka deployment.
|
||||
TopicPrefix string `yaml:"topic_prefix"`
|
||||
// Whether to use naffka instead of kafka.
|
||||
// Naffka can only be used when running dendrite as a single monolithic server.
|
||||
// Kafka can be used both with a monolithic server and when running the
|
||||
// components as separate servers.
|
||||
UseNaffka bool `yaml:"use_naffka"`
|
||||
// The Naffka database is used internally by the naffka library, if used.
|
||||
Database DatabaseOptions `yaml:"naffka_database"`
|
||||
// The max size a Kafka message passed between consumer/producer can have
|
||||
// Equals roughly max.message.bytes / fetch.message.max.bytes in Kafka
|
||||
MaxMessageBytes *int `yaml:"max_message_bytes"`
|
||||
}
|
||||
|
||||
func (k *Kafka) TopicFor(name string) string {
|
||||
return fmt.Sprintf("%s%s", k.TopicPrefix, name)
|
||||
}
|
||||
|
||||
func (c *Kafka) Defaults(generate bool) {
|
||||
c.UseNaffka = true
|
||||
c.Database.Defaults(10)
|
||||
if generate {
|
||||
c.Addresses = []string{"localhost:2181"}
|
||||
c.Database.ConnectionString = DataSource("file:naffka.db")
|
||||
}
|
||||
c.TopicPrefix = "Dendrite"
|
||||
|
||||
maxBytes := 1024 * 1024 * 8 // about 8MB
|
||||
c.MaxMessageBytes = &maxBytes
|
||||
}
|
||||
|
||||
func (c *Kafka) Verify(configErrs *ConfigErrors, isMonolith bool) {
|
||||
if c.UseNaffka {
|
||||
if !isMonolith {
|
||||
configErrs.Add("naffka can only be used in a monolithic server")
|
||||
}
|
||||
checkNotEmpty(configErrs, "global.kafka.database.connection_string", string(c.Database.ConnectionString))
|
||||
} else {
|
||||
// If we aren't using naffka then we need to have at least one kafka
|
||||
// server to talk to.
|
||||
checkNotZero(configErrs, "global.kafka.addresses", int64(len(c.Addresses)))
|
||||
}
|
||||
checkNotEmpty(configErrs, "global.kafka.topic_prefix", string(c.TopicPrefix))
|
||||
checkPositive(configErrs, "global.kafka.max_message_bytes", int64(*c.MaxMessageBytes))
|
||||
}
|
||||
|
|
@ -33,7 +33,7 @@ func TestLoadConfigRelative(t *testing.T) {
|
|||
}
|
||||
|
||||
const testConfig = `
|
||||
version: 1
|
||||
version: 2
|
||||
global:
|
||||
server_name: localhost
|
||||
private_key: matrix_key.pem
|
||||
|
|
|
|||
11
setup/jetstream/helpers.go
Normal file
11
setup/jetstream/helpers.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
package jetstream
|
||||
|
||||
import "github.com/nats-io/nats.go"
|
||||
|
||||
func WithJetStreamMessage(msg *nats.Msg, f func(msg *nats.Msg) bool) {
|
||||
if f(msg) {
|
||||
_ = msg.Ack()
|
||||
} else {
|
||||
_ = msg.Nak()
|
||||
}
|
||||
}
|
||||
98
setup/jetstream/nats.go
Normal file
98
setup/jetstream/nats.go
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
package jetstream
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
saramajs "github.com/S7evinK/saramajetstream"
|
||||
natsserver "github.com/nats-io/nats-server/v2/server"
|
||||
"github.com/nats-io/nats.go"
|
||||
natsclient "github.com/nats-io/nats.go"
|
||||
)
|
||||
|
||||
var natsServer *natsserver.Server
|
||||
var natsServerMutex sync.Mutex
|
||||
|
||||
func Prepare(cfg *config.JetStream) (nats.JetStreamContext, sarama.Consumer, sarama.SyncProducer) {
|
||||
// check if we need an in-process NATS Server
|
||||
if len(cfg.Addresses) != 0 {
|
||||
return setupNATS(cfg, nil)
|
||||
}
|
||||
natsServerMutex.Lock()
|
||||
if natsServer == nil {
|
||||
var err error
|
||||
natsServer, err = natsserver.NewServer(&natsserver.Options{
|
||||
ServerName: "monolith",
|
||||
DontListen: true,
|
||||
JetStream: true,
|
||||
StoreDir: string(cfg.StoragePath),
|
||||
NoSystemAccount: true,
|
||||
AllowNewAccounts: false,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
natsServer.ConfigureLogger()
|
||||
go natsServer.Start()
|
||||
}
|
||||
natsServerMutex.Unlock()
|
||||
if !natsServer.ReadyForConnections(time.Second * 10) {
|
||||
logrus.Fatalln("NATS did not start in time")
|
||||
}
|
||||
nc, err := natsclient.Connect("", natsclient.InProcessServer(natsServer))
|
||||
if err != nil {
|
||||
logrus.Fatalln("Failed to create NATS client")
|
||||
}
|
||||
return setupNATS(cfg, nc)
|
||||
}
|
||||
|
||||
func setupNATS(cfg *config.JetStream, nc *natsclient.Conn) (nats.JetStreamContext, sarama.Consumer, sarama.SyncProducer) {
|
||||
if nc == nil {
|
||||
var err error
|
||||
nc, err = nats.Connect(strings.Join(cfg.Addresses, ","))
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panic("Unable to connect to NATS")
|
||||
return nil, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
s, err := nc.JetStream()
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panic("Unable to get JetStream context")
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
for _, stream := range streams { // streams are defined in streams.go
|
||||
name := cfg.TopicFor(stream.Name)
|
||||
info, err := s.StreamInfo(name)
|
||||
if err != nil && err != natsclient.ErrStreamNotFound {
|
||||
logrus.WithError(err).Fatal("Unable to get stream info")
|
||||
}
|
||||
if info == nil {
|
||||
stream.Subjects = []string{name}
|
||||
|
||||
// If we're trying to keep everything in memory (e.g. unit tests)
|
||||
// then overwrite the storage policy.
|
||||
if cfg.InMemory {
|
||||
stream.Storage = nats.MemoryStorage
|
||||
}
|
||||
|
||||
// Namespace the streams without modifying the original streams
|
||||
// array, otherwise we end up with namespaces on namespaces.
|
||||
namespaced := *stream
|
||||
namespaced.Name = name
|
||||
if _, err = s.AddStream(&namespaced); err != nil {
|
||||
logrus.WithError(err).WithField("stream", name).Fatal("Unable to add stream")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
consumer := saramajs.NewJetStreamConsumer(nc, s, "")
|
||||
producer := saramajs.NewJetStreamProducer(nc, s, "")
|
||||
return s, consumer, producer
|
||||
}
|
||||
61
setup/jetstream/streams.go
Normal file
61
setup/jetstream/streams.go
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
package jetstream
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/nats.go"
|
||||
)
|
||||
|
||||
const (
|
||||
UserID = "user_id"
|
||||
RoomID = "room_id"
|
||||
)
|
||||
|
||||
var (
|
||||
InputRoomEvent = "InputRoomEvent"
|
||||
OutputRoomEvent = "OutputRoomEvent"
|
||||
OutputSendToDeviceEvent = "OutputSendToDeviceEvent"
|
||||
OutputKeyChangeEvent = "OutputKeyChangeEvent"
|
||||
OutputTypingEvent = "OutputTypingEvent"
|
||||
OutputClientData = "OutputClientData"
|
||||
OutputReceiptEvent = "OutputReceiptEvent"
|
||||
)
|
||||
|
||||
var streams = []*nats.StreamConfig{
|
||||
{
|
||||
Name: InputRoomEvent,
|
||||
Retention: nats.InterestPolicy,
|
||||
Storage: nats.FileStorage,
|
||||
},
|
||||
{
|
||||
Name: OutputRoomEvent,
|
||||
Retention: nats.InterestPolicy,
|
||||
Storage: nats.FileStorage,
|
||||
},
|
||||
{
|
||||
Name: OutputSendToDeviceEvent,
|
||||
Retention: nats.InterestPolicy,
|
||||
Storage: nats.FileStorage,
|
||||
},
|
||||
{
|
||||
Name: OutputKeyChangeEvent,
|
||||
Retention: nats.LimitsPolicy,
|
||||
Storage: nats.FileStorage,
|
||||
},
|
||||
{
|
||||
Name: OutputTypingEvent,
|
||||
Retention: nats.InterestPolicy,
|
||||
Storage: nats.MemoryStorage,
|
||||
MaxAge: time.Second * 60,
|
||||
},
|
||||
{
|
||||
Name: OutputClientData,
|
||||
Retention: nats.InterestPolicy,
|
||||
Storage: nats.FileStorage,
|
||||
},
|
||||
{
|
||||
Name: OutputReceiptEvent,
|
||||
Retention: nats.InterestPolicy,
|
||||
Storage: nats.FileStorage,
|
||||
},
|
||||
}
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
package kafka
|
||||
|
||||
import (
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/naffka"
|
||||
naffkaStorage "github.com/matrix-org/naffka/storage"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func SetupConsumerProducer(cfg *config.Kafka) (sarama.Consumer, sarama.SyncProducer) {
|
||||
if cfg.UseNaffka {
|
||||
return setupNaffka(cfg)
|
||||
}
|
||||
return setupKafka(cfg)
|
||||
}
|
||||
|
||||
// setupKafka creates kafka consumer/producer pair from the config.
|
||||
func setupKafka(cfg *config.Kafka) (sarama.Consumer, sarama.SyncProducer) {
|
||||
sCfg := sarama.NewConfig()
|
||||
sCfg.Producer.MaxMessageBytes = *cfg.MaxMessageBytes
|
||||
sCfg.Producer.Return.Successes = true
|
||||
sCfg.Consumer.Fetch.Default = int32(*cfg.MaxMessageBytes)
|
||||
|
||||
consumer, err := sarama.NewConsumer(cfg.Addresses, sCfg)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panic("failed to start kafka consumer")
|
||||
}
|
||||
|
||||
producer, err := sarama.NewSyncProducer(cfg.Addresses, sCfg)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panic("failed to setup kafka producers")
|
||||
}
|
||||
|
||||
return consumer, producer
|
||||
}
|
||||
|
||||
// In monolith mode with Naffka, we don't have the same constraints about
|
||||
// consuming the same topic from more than one place like we do with Kafka.
|
||||
// Therefore, we will only open one Naffka connection in case Naffka is
|
||||
// running on SQLite.
|
||||
var naffkaInstance *naffka.Naffka
|
||||
|
||||
// setupNaffka creates kafka consumer/producer pair from the config.
|
||||
func setupNaffka(cfg *config.Kafka) (sarama.Consumer, sarama.SyncProducer) {
|
||||
if naffkaInstance != nil {
|
||||
return naffkaInstance, naffkaInstance
|
||||
}
|
||||
naffkaDB, err := naffkaStorage.NewDatabase(string(cfg.Database.ConnectionString))
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panic("Failed to setup naffka database")
|
||||
}
|
||||
naffkaInstance, err = naffka.New(naffkaDB)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panic("Failed to setup naffka")
|
||||
}
|
||||
return naffkaInstance, naffkaInstance
|
||||
}
|
||||
|
|
@ -649,7 +649,7 @@ func (rc *reqCtx) injectResponseToRoomserver(res *gomatrixserverlib.MSC2836Event
|
|||
})
|
||||
}
|
||||
// we've got the data by this point so use a background context
|
||||
err = roomserver.SendInputRoomEvents(context.Background(), rc.rsAPI, ires)
|
||||
err = roomserver.SendInputRoomEvents(context.Background(), rc.rsAPI, ires, false)
|
||||
if err != nil {
|
||||
util.GetLogger(rc.ctx).WithError(err).Error("failed to inject MSC2836EventRelationshipsResponse into the roomserver")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,90 +18,90 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/syncapi/notifier"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/nats-io/nats.go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OutputClientDataConsumer consumes events that originated in the client API server.
|
||||
type OutputClientDataConsumer struct {
|
||||
clientAPIConsumer *internal.ContinualConsumer
|
||||
db storage.Database
|
||||
stream types.StreamProvider
|
||||
notifier *notifier.Notifier
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable nats.SubOpt
|
||||
topic string
|
||||
db storage.Database
|
||||
stream types.StreamProvider
|
||||
notifier *notifier.Notifier
|
||||
}
|
||||
|
||||
// NewOutputClientDataConsumer creates a new OutputClientData consumer. Call Start() to begin consuming from room servers.
|
||||
func NewOutputClientDataConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.SyncAPI,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
js nats.JetStreamContext,
|
||||
store storage.Database,
|
||||
notifier *notifier.Notifier,
|
||||
stream types.StreamProvider,
|
||||
) *OutputClientDataConsumer {
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "syncapi/clientapi",
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputClientData)),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: store,
|
||||
return &OutputClientDataConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputClientData),
|
||||
durable: cfg.Matrix.JetStream.Durable("SyncAPIClientAPIConsumer"),
|
||||
db: store,
|
||||
notifier: notifier,
|
||||
stream: stream,
|
||||
}
|
||||
s := &OutputClientDataConsumer{
|
||||
clientAPIConsumer: &consumer,
|
||||
db: store,
|
||||
notifier: notifier,
|
||||
stream: stream,
|
||||
}
|
||||
consumer.ProcessMessage = s.onMessage
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Start consuming from room servers
|
||||
func (s *OutputClientDataConsumer) Start() error {
|
||||
return s.clientAPIConsumer.Start()
|
||||
_, err := s.jetstream.Subscribe(s.topic, s.onMessage, s.durable)
|
||||
return err
|
||||
}
|
||||
|
||||
// onMessage is called when the sync server receives a new event from the client API server output log.
|
||||
// It is not safe for this function to be called from multiple goroutines, or else the
|
||||
// sync stream position may race and be incorrectly calculated.
|
||||
func (s *OutputClientDataConsumer) onMessage(msg *sarama.ConsumerMessage) error {
|
||||
// Parse out the event JSON
|
||||
var output eventutil.AccountData
|
||||
if err := json.Unmarshal(msg.Value, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("client API server output log: message parse failure")
|
||||
sentry.CaptureException(err)
|
||||
return nil
|
||||
}
|
||||
func (s *OutputClientDataConsumer) onMessage(msg *nats.Msg) {
|
||||
jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
|
||||
// Parse out the event JSON
|
||||
userID := msg.Header.Get(jetstream.UserID)
|
||||
var output eventutil.AccountData
|
||||
if err := json.Unmarshal(msg.Data, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("client API server output log: message parse failure")
|
||||
sentry.CaptureException(err)
|
||||
return true
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"type": output.Type,
|
||||
"room_id": output.RoomID,
|
||||
}).Info("received data from client API server")
|
||||
|
||||
streamPos, err := s.db.UpsertAccountData(
|
||||
context.TODO(), string(msg.Key), output.RoomID, output.Type,
|
||||
)
|
||||
if err != nil {
|
||||
sentry.CaptureException(err)
|
||||
log.WithFields(log.Fields{
|
||||
"type": output.Type,
|
||||
"room_id": output.RoomID,
|
||||
log.ErrorKey: err,
|
||||
}).Panicf("could not save account data")
|
||||
}
|
||||
"type": output.Type,
|
||||
"room_id": output.RoomID,
|
||||
}).Info("received data from client API server")
|
||||
|
||||
s.stream.Advance(streamPos)
|
||||
s.notifier.OnNewAccountData(string(msg.Key), types.StreamingToken{AccountDataPosition: streamPos})
|
||||
streamPos, err := s.db.UpsertAccountData(
|
||||
s.ctx, userID, output.RoomID, output.Type,
|
||||
)
|
||||
if err != nil {
|
||||
sentry.CaptureException(err)
|
||||
log.WithFields(log.Fields{
|
||||
"type": output.Type,
|
||||
"room_id": output.RoomID,
|
||||
log.ErrorKey: err,
|
||||
}).Panicf("could not save account data")
|
||||
}
|
||||
|
||||
return nil
|
||||
s.stream.Advance(streamPos)
|
||||
s.notifier.OnNewAccountData(userID, types.StreamingToken{AccountDataPosition: streamPos})
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,24 +18,27 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/matrix-org/dendrite/eduserver/api"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/syncapi/notifier"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/nats-io/nats.go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OutputReceiptEventConsumer consumes events that originated in the EDU server.
|
||||
type OutputReceiptEventConsumer struct {
|
||||
receiptConsumer *internal.ContinualConsumer
|
||||
db storage.Database
|
||||
stream types.StreamProvider
|
||||
notifier *notifier.Notifier
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable nats.SubOpt
|
||||
topic string
|
||||
db storage.Database
|
||||
stream types.StreamProvider
|
||||
notifier *notifier.Notifier
|
||||
}
|
||||
|
||||
// NewOutputReceiptEventConsumer creates a new OutputReceiptEventConsumer.
|
||||
|
|
@ -43,61 +46,54 @@ type OutputReceiptEventConsumer struct {
|
|||
func NewOutputReceiptEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.SyncAPI,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
js nats.JetStreamContext,
|
||||
store storage.Database,
|
||||
notifier *notifier.Notifier,
|
||||
stream types.StreamProvider,
|
||||
) *OutputReceiptEventConsumer {
|
||||
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "syncapi/eduserver/receipt",
|
||||
Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputReceiptEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: store,
|
||||
return &OutputReceiptEventConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputReceiptEvent),
|
||||
durable: cfg.Matrix.JetStream.Durable("SyncAPIEDUServerReceiptConsumer"),
|
||||
db: store,
|
||||
notifier: notifier,
|
||||
stream: stream,
|
||||
}
|
||||
|
||||
s := &OutputReceiptEventConsumer{
|
||||
receiptConsumer: &consumer,
|
||||
db: store,
|
||||
notifier: notifier,
|
||||
stream: stream,
|
||||
}
|
||||
|
||||
consumer.ProcessMessage = s.onMessage
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Start consuming from EDU api
|
||||
func (s *OutputReceiptEventConsumer) Start() error {
|
||||
return s.receiptConsumer.Start()
|
||||
_, err := s.jetstream.Subscribe(s.topic, s.onMessage, s.durable)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *OutputReceiptEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
|
||||
var output api.OutputReceiptEvent
|
||||
if err := json.Unmarshal(msg.Value, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("EDU server output log: message parse failure")
|
||||
sentry.CaptureException(err)
|
||||
return nil
|
||||
}
|
||||
func (s *OutputReceiptEventConsumer) onMessage(msg *nats.Msg) {
|
||||
jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
|
||||
var output api.OutputReceiptEvent
|
||||
if err := json.Unmarshal(msg.Data, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("EDU server output log: message parse failure")
|
||||
sentry.CaptureException(err)
|
||||
return true
|
||||
}
|
||||
|
||||
streamPos, err := s.db.StoreReceipt(
|
||||
context.TODO(),
|
||||
output.RoomID,
|
||||
output.Type,
|
||||
output.UserID,
|
||||
output.EventID,
|
||||
output.Timestamp,
|
||||
)
|
||||
if err != nil {
|
||||
sentry.CaptureException(err)
|
||||
return err
|
||||
}
|
||||
streamPos, err := s.db.StoreReceipt(
|
||||
s.ctx,
|
||||
output.RoomID,
|
||||
output.Type,
|
||||
output.UserID,
|
||||
output.EventID,
|
||||
output.Timestamp,
|
||||
)
|
||||
if err != nil {
|
||||
sentry.CaptureException(err)
|
||||
return true
|
||||
}
|
||||
|
||||
s.stream.Advance(streamPos)
|
||||
s.notifier.OnNewReceipt(output.RoomID, types.StreamingToken{ReceiptPosition: streamPos})
|
||||
s.stream.Advance(streamPos)
|
||||
s.notifier.OnNewReceipt(output.RoomID, types.StreamingToken{ReceiptPosition: streamPos})
|
||||
|
||||
return nil
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,27 +18,30 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/matrix-org/dendrite/eduserver/api"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/syncapi/notifier"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
"github.com/nats-io/nats.go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OutputSendToDeviceEventConsumer consumes events that originated in the EDU server.
|
||||
type OutputSendToDeviceEventConsumer struct {
|
||||
sendToDeviceConsumer *internal.ContinualConsumer
|
||||
db storage.Database
|
||||
serverName gomatrixserverlib.ServerName // our server name
|
||||
stream types.StreamProvider
|
||||
notifier *notifier.Notifier
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable nats.SubOpt
|
||||
topic string
|
||||
db storage.Database
|
||||
serverName gomatrixserverlib.ServerName // our server name
|
||||
stream types.StreamProvider
|
||||
notifier *notifier.Notifier
|
||||
}
|
||||
|
||||
// NewOutputSendToDeviceEventConsumer creates a new OutputSendToDeviceEventConsumer.
|
||||
|
|
@ -46,78 +49,71 @@ type OutputSendToDeviceEventConsumer struct {
|
|||
func NewOutputSendToDeviceEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.SyncAPI,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
js nats.JetStreamContext,
|
||||
store storage.Database,
|
||||
notifier *notifier.Notifier,
|
||||
stream types.StreamProvider,
|
||||
) *OutputSendToDeviceEventConsumer {
|
||||
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "syncapi/eduserver/sendtodevice",
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent)),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: store,
|
||||
return &OutputSendToDeviceEventConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputSendToDeviceEvent),
|
||||
durable: cfg.Matrix.JetStream.Durable("SyncAPIEDUServerSendToDeviceConsumer"),
|
||||
db: store,
|
||||
serverName: cfg.Matrix.ServerName,
|
||||
notifier: notifier,
|
||||
stream: stream,
|
||||
}
|
||||
|
||||
s := &OutputSendToDeviceEventConsumer{
|
||||
sendToDeviceConsumer: &consumer,
|
||||
db: store,
|
||||
serverName: cfg.Matrix.ServerName,
|
||||
notifier: notifier,
|
||||
stream: stream,
|
||||
}
|
||||
|
||||
consumer.ProcessMessage = s.onMessage
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Start consuming from EDU api
|
||||
func (s *OutputSendToDeviceEventConsumer) Start() error {
|
||||
return s.sendToDeviceConsumer.Start()
|
||||
_, err := s.jetstream.Subscribe(s.topic, s.onMessage, s.durable)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *OutputSendToDeviceEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
|
||||
var output api.OutputSendToDeviceEvent
|
||||
if err := json.Unmarshal(msg.Value, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("EDU server output log: message parse failure")
|
||||
sentry.CaptureException(err)
|
||||
return err
|
||||
}
|
||||
func (s *OutputSendToDeviceEventConsumer) onMessage(msg *nats.Msg) {
|
||||
jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
|
||||
var output api.OutputSendToDeviceEvent
|
||||
if err := json.Unmarshal(msg.Data, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("EDU server output log: message parse failure")
|
||||
sentry.CaptureException(err)
|
||||
return true
|
||||
}
|
||||
|
||||
_, domain, err := gomatrixserverlib.SplitID('@', output.UserID)
|
||||
if err != nil {
|
||||
sentry.CaptureException(err)
|
||||
return err
|
||||
}
|
||||
if domain != s.serverName {
|
||||
return nil
|
||||
}
|
||||
_, domain, err := gomatrixserverlib.SplitID('@', output.UserID)
|
||||
if err != nil {
|
||||
sentry.CaptureException(err)
|
||||
return true
|
||||
}
|
||||
if domain != s.serverName {
|
||||
return true
|
||||
}
|
||||
|
||||
util.GetLogger(context.TODO()).WithFields(log.Fields{
|
||||
"sender": output.Sender,
|
||||
"user_id": output.UserID,
|
||||
"device_id": output.DeviceID,
|
||||
"event_type": output.Type,
|
||||
}).Info("sync API received send-to-device event from EDU server")
|
||||
util.GetLogger(context.TODO()).WithFields(log.Fields{
|
||||
"sender": output.Sender,
|
||||
"user_id": output.UserID,
|
||||
"device_id": output.DeviceID,
|
||||
"event_type": output.Type,
|
||||
}).Info("sync API received send-to-device event from EDU server")
|
||||
|
||||
streamPos, err := s.db.StoreNewSendForDeviceMessage(
|
||||
context.TODO(), output.UserID, output.DeviceID, output.SendToDeviceEvent,
|
||||
)
|
||||
if err != nil {
|
||||
sentry.CaptureException(err)
|
||||
log.WithError(err).Errorf("failed to store send-to-device message")
|
||||
return err
|
||||
}
|
||||
streamPos, err := s.db.StoreNewSendForDeviceMessage(
|
||||
s.ctx, output.UserID, output.DeviceID, output.SendToDeviceEvent,
|
||||
)
|
||||
if err != nil {
|
||||
sentry.CaptureException(err)
|
||||
log.WithError(err).Errorf("failed to store send-to-device message")
|
||||
return false
|
||||
}
|
||||
|
||||
s.stream.Advance(streamPos)
|
||||
s.notifier.OnNewSendToDevice(
|
||||
output.UserID,
|
||||
[]string{output.DeviceID},
|
||||
types.StreamingToken{SendToDevicePosition: streamPos},
|
||||
)
|
||||
s.stream.Advance(streamPos)
|
||||
s.notifier.OnNewSendToDevice(
|
||||
output.UserID,
|
||||
[]string{output.DeviceID},
|
||||
types.StreamingToken{SendToDevicePosition: streamPos},
|
||||
)
|
||||
|
||||
return nil
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,27 +15,31 @@
|
|||
package consumers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/matrix-org/dendrite/eduserver/api"
|
||||
"github.com/matrix-org/dendrite/eduserver/cache"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/syncapi/notifier"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/nats-io/nats.go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OutputTypingEventConsumer consumes events that originated in the EDU server.
|
||||
type OutputTypingEventConsumer struct {
|
||||
typingConsumer *internal.ContinualConsumer
|
||||
eduCache *cache.EDUCache
|
||||
stream types.StreamProvider
|
||||
notifier *notifier.Notifier
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable nats.SubOpt
|
||||
topic string
|
||||
eduCache *cache.EDUCache
|
||||
stream types.StreamProvider
|
||||
notifier *notifier.Notifier
|
||||
}
|
||||
|
||||
// NewOutputTypingEventConsumer creates a new OutputTypingEventConsumer.
|
||||
|
|
@ -43,72 +47,60 @@ type OutputTypingEventConsumer struct {
|
|||
func NewOutputTypingEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.SyncAPI,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
js nats.JetStreamContext,
|
||||
store storage.Database,
|
||||
eduCache *cache.EDUCache,
|
||||
notifier *notifier.Notifier,
|
||||
stream types.StreamProvider,
|
||||
) *OutputTypingEventConsumer {
|
||||
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "syncapi/eduserver/typing",
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent)),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: store,
|
||||
return &OutputTypingEventConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputTypingEvent),
|
||||
durable: cfg.Matrix.JetStream.Durable("SyncAPIEDUServerTypingConsumer"),
|
||||
eduCache: eduCache,
|
||||
notifier: notifier,
|
||||
stream: stream,
|
||||
}
|
||||
|
||||
s := &OutputTypingEventConsumer{
|
||||
typingConsumer: &consumer,
|
||||
eduCache: eduCache,
|
||||
notifier: notifier,
|
||||
stream: stream,
|
||||
}
|
||||
|
||||
consumer.ProcessMessage = s.onMessage
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Start consuming from EDU api
|
||||
func (s *OutputTypingEventConsumer) Start() error {
|
||||
s.eduCache.SetTimeoutCallback(func(userID, roomID string, latestSyncPosition int64) {
|
||||
pos := types.StreamPosition(latestSyncPosition)
|
||||
s.stream.Advance(pos)
|
||||
s.notifier.OnNewTyping(roomID, types.StreamingToken{TypingPosition: pos})
|
||||
_, err := s.jetstream.Subscribe(s.topic, s.onMessage, s.durable)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *OutputTypingEventConsumer) onMessage(msg *nats.Msg) {
|
||||
jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
|
||||
var output api.OutputTypingEvent
|
||||
if err := json.Unmarshal(msg.Data, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("EDU server output log: message parse failure")
|
||||
sentry.CaptureException(err)
|
||||
return true
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"room_id": output.Event.RoomID,
|
||||
"user_id": output.Event.UserID,
|
||||
"typing": output.Event.Typing,
|
||||
}).Debug("received data from EDU server")
|
||||
|
||||
var typingPos types.StreamPosition
|
||||
typingEvent := output.Event
|
||||
if typingEvent.Typing {
|
||||
typingPos = types.StreamPosition(
|
||||
s.eduCache.AddTypingUser(typingEvent.UserID, typingEvent.RoomID, output.ExpireTime),
|
||||
)
|
||||
} else {
|
||||
typingPos = types.StreamPosition(
|
||||
s.eduCache.RemoveUser(typingEvent.UserID, typingEvent.RoomID),
|
||||
)
|
||||
}
|
||||
|
||||
s.stream.Advance(typingPos)
|
||||
s.notifier.OnNewTyping(output.Event.RoomID, types.StreamingToken{TypingPosition: typingPos})
|
||||
|
||||
return true
|
||||
})
|
||||
return s.typingConsumer.Start()
|
||||
}
|
||||
|
||||
func (s *OutputTypingEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
|
||||
var output api.OutputTypingEvent
|
||||
if err := json.Unmarshal(msg.Value, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("EDU server output log: message parse failure")
|
||||
sentry.CaptureException(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"room_id": output.Event.RoomID,
|
||||
"user_id": output.Event.UserID,
|
||||
"typing": output.Event.Typing,
|
||||
}).Debug("received data from EDU server")
|
||||
|
||||
var typingPos types.StreamPosition
|
||||
typingEvent := output.Event
|
||||
if typingEvent.Typing {
|
||||
typingPos = types.StreamPosition(
|
||||
s.eduCache.AddTypingUser(typingEvent.UserID, typingEvent.RoomID, output.ExpireTime),
|
||||
)
|
||||
} else {
|
||||
typingPos = types.StreamPosition(
|
||||
s.eduCache.RemoveUser(typingEvent.UserID, typingEvent.RoomID),
|
||||
)
|
||||
}
|
||||
|
||||
s.stream.Advance(typingPos)
|
||||
s.notifier.OnNewTyping(output.Event.RoomID, types.StreamingToken{TypingPosition: typingPos})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ package consumers
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/getsentry/sentry-go"
|
||||
|
|
@ -34,15 +33,14 @@ import (
|
|||
|
||||
// OutputKeyChangeEventConsumer consumes events that originated in the key server.
|
||||
type OutputKeyChangeEventConsumer struct {
|
||||
keyChangeConsumer *internal.ContinualConsumer
|
||||
db storage.Database
|
||||
notifier *notifier.Notifier
|
||||
stream types.PartitionedStreamProvider
|
||||
serverName gomatrixserverlib.ServerName // our server name
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI
|
||||
keyAPI api.KeyInternalAPI
|
||||
partitionToOffset map[int32]int64
|
||||
partitionToOffsetMu sync.Mutex
|
||||
ctx context.Context
|
||||
keyChangeConsumer *internal.ContinualConsumer
|
||||
db storage.Database
|
||||
notifier *notifier.Notifier
|
||||
stream types.StreamProvider
|
||||
serverName gomatrixserverlib.ServerName // our server name
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI
|
||||
keyAPI api.KeyInternalAPI
|
||||
}
|
||||
|
||||
// NewOutputKeyChangeEventConsumer creates a new OutputKeyChangeEventConsumer.
|
||||
|
|
@ -56,7 +54,7 @@ func NewOutputKeyChangeEventConsumer(
|
|||
rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
store storage.Database,
|
||||
notifier *notifier.Notifier,
|
||||
stream types.PartitionedStreamProvider,
|
||||
stream types.StreamProvider,
|
||||
) *OutputKeyChangeEventConsumer {
|
||||
|
||||
consumer := internal.ContinualConsumer{
|
||||
|
|
@ -68,15 +66,14 @@ func NewOutputKeyChangeEventConsumer(
|
|||
}
|
||||
|
||||
s := &OutputKeyChangeEventConsumer{
|
||||
keyChangeConsumer: &consumer,
|
||||
db: store,
|
||||
serverName: serverName,
|
||||
keyAPI: keyAPI,
|
||||
rsAPI: rsAPI,
|
||||
partitionToOffset: make(map[int32]int64),
|
||||
partitionToOffsetMu: sync.Mutex{},
|
||||
notifier: notifier,
|
||||
stream: stream,
|
||||
ctx: process.Context(),
|
||||
keyChangeConsumer: &consumer,
|
||||
db: store,
|
||||
serverName: serverName,
|
||||
keyAPI: keyAPI,
|
||||
rsAPI: rsAPI,
|
||||
notifier: notifier,
|
||||
stream: stream,
|
||||
}
|
||||
|
||||
consumer.ProcessMessage = s.onMessage
|
||||
|
|
@ -86,24 +83,10 @@ func NewOutputKeyChangeEventConsumer(
|
|||
|
||||
// Start consuming from the key server
|
||||
func (s *OutputKeyChangeEventConsumer) Start() error {
|
||||
offsets, err := s.keyChangeConsumer.StartOffsets()
|
||||
s.partitionToOffsetMu.Lock()
|
||||
for _, o := range offsets {
|
||||
s.partitionToOffset[o.Partition] = o.Offset
|
||||
}
|
||||
s.partitionToOffsetMu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *OutputKeyChangeEventConsumer) updateOffset(msg *sarama.ConsumerMessage) {
|
||||
s.partitionToOffsetMu.Lock()
|
||||
defer s.partitionToOffsetMu.Unlock()
|
||||
s.partitionToOffset[msg.Partition] = msg.Offset
|
||||
return s.keyChangeConsumer.Start()
|
||||
}
|
||||
|
||||
func (s *OutputKeyChangeEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
|
||||
defer s.updateOffset(msg)
|
||||
|
||||
var m api.DeviceMessage
|
||||
if err := json.Unmarshal(msg.Value, &m); err != nil {
|
||||
logrus.WithError(err).Errorf("failed to read device message from key change topic")
|
||||
|
|
@ -116,19 +99,22 @@ func (s *OutputKeyChangeEventConsumer) onMessage(msg *sarama.ConsumerMessage) er
|
|||
}
|
||||
switch m.Type {
|
||||
case api.TypeCrossSigningUpdate:
|
||||
return s.onCrossSigningMessage(m, msg.Offset, msg.Partition)
|
||||
return s.onCrossSigningMessage(m, m.DeviceChangeID)
|
||||
case api.TypeDeviceKeyUpdate:
|
||||
fallthrough
|
||||
default:
|
||||
return s.onDeviceKeyMessage(m, msg.Offset, msg.Partition)
|
||||
return s.onDeviceKeyMessage(m, m.DeviceChangeID)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *OutputKeyChangeEventConsumer) onDeviceKeyMessage(m api.DeviceMessage, offset int64, partition int32) error {
|
||||
func (s *OutputKeyChangeEventConsumer) onDeviceKeyMessage(m api.DeviceMessage, deviceChangeID int64) error {
|
||||
if m.DeviceKeys == nil {
|
||||
return nil
|
||||
}
|
||||
output := m.DeviceKeys
|
||||
// work out who we need to notify about the new key
|
||||
var queryRes roomserverAPI.QuerySharedUsersResponse
|
||||
err := s.rsAPI.QuerySharedUsers(context.Background(), &roomserverAPI.QuerySharedUsersRequest{
|
||||
err := s.rsAPI.QuerySharedUsers(s.ctx, &roomserverAPI.QuerySharedUsersRequest{
|
||||
UserID: output.UserID,
|
||||
}, &queryRes)
|
||||
if err != nil {
|
||||
|
|
@ -138,10 +124,7 @@ func (s *OutputKeyChangeEventConsumer) onDeviceKeyMessage(m api.DeviceMessage, o
|
|||
}
|
||||
// make sure we get our own key updates too!
|
||||
queryRes.UserIDsToCount[output.UserID] = 1
|
||||
posUpdate := types.LogPosition{
|
||||
Offset: offset,
|
||||
Partition: partition,
|
||||
}
|
||||
posUpdate := types.StreamPosition(deviceChangeID)
|
||||
|
||||
s.stream.Advance(posUpdate)
|
||||
for userID := range queryRes.UserIDsToCount {
|
||||
|
|
@ -151,11 +134,11 @@ func (s *OutputKeyChangeEventConsumer) onDeviceKeyMessage(m api.DeviceMessage, o
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *OutputKeyChangeEventConsumer) onCrossSigningMessage(m api.DeviceMessage, offset int64, partition int32) error {
|
||||
func (s *OutputKeyChangeEventConsumer) onCrossSigningMessage(m api.DeviceMessage, deviceChangeID int64) error {
|
||||
output := m.CrossSigningKeyUpdate
|
||||
// work out who we need to notify about the new key
|
||||
var queryRes roomserverAPI.QuerySharedUsersResponse
|
||||
err := s.rsAPI.QuerySharedUsers(context.Background(), &roomserverAPI.QuerySharedUsersRequest{
|
||||
err := s.rsAPI.QuerySharedUsers(s.ctx, &roomserverAPI.QuerySharedUsersRequest{
|
||||
UserID: output.UserID,
|
||||
}, &queryRes)
|
||||
if err != nil {
|
||||
|
|
@ -165,10 +148,7 @@ func (s *OutputKeyChangeEventConsumer) onCrossSigningMessage(m api.DeviceMessage
|
|||
}
|
||||
// make sure we get our own key updates too!
|
||||
queryRes.UserIDsToCount[output.UserID] = 1
|
||||
posUpdate := types.LogPosition{
|
||||
Offset: offset,
|
||||
Partition: partition,
|
||||
}
|
||||
posUpdate := types.StreamPosition(deviceChangeID)
|
||||
|
||||
s.stream.Advance(posUpdate)
|
||||
for userID := range queryRes.UserIDsToCount {
|
||||
|
|
|
|||
|
|
@ -19,24 +19,27 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/syncapi/notifier"
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/nats-io/nats.go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OutputRoomEventConsumer consumes events that originated in the room server.
|
||||
type OutputRoomEventConsumer struct {
|
||||
ctx context.Context
|
||||
cfg *config.SyncAPI
|
||||
rsAPI api.RoomserverInternalAPI
|
||||
rsConsumer *internal.ContinualConsumer
|
||||
jetstream nats.JetStreamContext
|
||||
durable nats.SubOpt
|
||||
topic string
|
||||
db storage.Database
|
||||
pduStream types.StreamProvider
|
||||
inviteStream types.StreamProvider
|
||||
|
|
@ -47,83 +50,84 @@ type OutputRoomEventConsumer struct {
|
|||
func NewOutputRoomEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.SyncAPI,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
js nats.JetStreamContext,
|
||||
store storage.Database,
|
||||
notifier *notifier.Notifier,
|
||||
pduStream types.StreamProvider,
|
||||
inviteStream types.StreamProvider,
|
||||
rsAPI api.RoomserverInternalAPI,
|
||||
) *OutputRoomEventConsumer {
|
||||
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "syncapi/roomserver",
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: store,
|
||||
}
|
||||
s := &OutputRoomEventConsumer{
|
||||
return &OutputRoomEventConsumer{
|
||||
ctx: process.Context(),
|
||||
cfg: cfg,
|
||||
rsConsumer: &consumer,
|
||||
jetstream: js,
|
||||
topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputRoomEvent),
|
||||
durable: cfg.Matrix.JetStream.Durable("SyncAPIRoomServerConsumer"),
|
||||
db: store,
|
||||
notifier: notifier,
|
||||
pduStream: pduStream,
|
||||
inviteStream: inviteStream,
|
||||
rsAPI: rsAPI,
|
||||
}
|
||||
consumer.ProcessMessage = s.onMessage
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Start consuming from room servers
|
||||
func (s *OutputRoomEventConsumer) Start() error {
|
||||
return s.rsConsumer.Start()
|
||||
_, err := s.jetstream.Subscribe(s.topic, s.onMessage, s.durable)
|
||||
return err
|
||||
}
|
||||
|
||||
// onMessage is called when the sync server receives a new event from the room server output log.
|
||||
// It is not safe for this function to be called from multiple goroutines, or else the
|
||||
// sync stream position may race and be incorrectly calculated.
|
||||
func (s *OutputRoomEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
|
||||
// Parse out the event JSON
|
||||
var output api.OutputEvent
|
||||
if err := json.Unmarshal(msg.Value, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("roomserver output log: message parse failure")
|
||||
return nil
|
||||
}
|
||||
|
||||
switch output.Type {
|
||||
case api.OutputTypeNewRoomEvent:
|
||||
// Ignore redaction events. We will add them to the database when they are
|
||||
// validated (when we receive OutputTypeRedactedEvent)
|
||||
event := output.NewRoomEvent.Event
|
||||
if event.Type() == gomatrixserverlib.MRoomRedaction && event.StateKey() == nil {
|
||||
// in the special case where the event redacts itself, just pass the message through because
|
||||
// we will never see the other part of the pair
|
||||
if event.Redacts() != event.EventID() {
|
||||
return nil
|
||||
}
|
||||
func (s *OutputRoomEventConsumer) onMessage(msg *nats.Msg) {
|
||||
jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
|
||||
// Parse out the event JSON
|
||||
var err error
|
||||
var output api.OutputEvent
|
||||
if err = json.Unmarshal(msg.Data, &output); err != nil {
|
||||
// If the message was invalid, log it and move on to the next message in the stream
|
||||
log.WithError(err).Errorf("roomserver output log: message parse failure")
|
||||
return true
|
||||
}
|
||||
return s.onNewRoomEvent(context.TODO(), *output.NewRoomEvent)
|
||||
case api.OutputTypeOldRoomEvent:
|
||||
return s.onOldRoomEvent(context.TODO(), *output.OldRoomEvent)
|
||||
case api.OutputTypeNewInviteEvent:
|
||||
return s.onNewInviteEvent(context.TODO(), *output.NewInviteEvent)
|
||||
case api.OutputTypeRetireInviteEvent:
|
||||
return s.onRetireInviteEvent(context.TODO(), *output.RetireInviteEvent)
|
||||
case api.OutputTypeNewPeek:
|
||||
return s.onNewPeek(context.TODO(), *output.NewPeek)
|
||||
case api.OutputTypeRetirePeek:
|
||||
return s.onRetirePeek(context.TODO(), *output.RetirePeek)
|
||||
case api.OutputTypeRedactedEvent:
|
||||
return s.onRedactEvent(context.TODO(), *output.RedactedEvent)
|
||||
default:
|
||||
log.WithField("type", output.Type).Debug(
|
||||
"roomserver output log: ignoring unknown output type",
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch output.Type {
|
||||
case api.OutputTypeNewRoomEvent:
|
||||
// Ignore redaction events. We will add them to the database when they are
|
||||
// validated (when we receive OutputTypeRedactedEvent)
|
||||
event := output.NewRoomEvent.Event
|
||||
if event.Type() == gomatrixserverlib.MRoomRedaction && event.StateKey() == nil {
|
||||
// in the special case where the event redacts itself, just pass the message through because
|
||||
// we will never see the other part of the pair
|
||||
if event.Redacts() != event.EventID() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
err = s.onNewRoomEvent(s.ctx, *output.NewRoomEvent)
|
||||
case api.OutputTypeOldRoomEvent:
|
||||
err = s.onOldRoomEvent(s.ctx, *output.OldRoomEvent)
|
||||
case api.OutputTypeNewInviteEvent:
|
||||
s.onNewInviteEvent(s.ctx, *output.NewInviteEvent)
|
||||
case api.OutputTypeRetireInviteEvent:
|
||||
s.onRetireInviteEvent(s.ctx, *output.RetireInviteEvent)
|
||||
case api.OutputTypeNewPeek:
|
||||
s.onNewPeek(s.ctx, *output.NewPeek)
|
||||
case api.OutputTypeRetirePeek:
|
||||
s.onRetirePeek(s.ctx, *output.RetirePeek)
|
||||
case api.OutputTypeRedactedEvent:
|
||||
err = s.onRedactEvent(s.ctx, *output.RedactedEvent)
|
||||
default:
|
||||
log.WithField("type", output.Type).Debug(
|
||||
"roomserver output log: ignoring unknown output type",
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).Error("roomserver output log: failed to process event")
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (s *OutputRoomEventConsumer) onRedactEvent(
|
||||
|
|
@ -275,12 +279,12 @@ func (s *OutputRoomEventConsumer) notifyJoinedPeeks(ctx context.Context, ev *gom
|
|||
|
||||
func (s *OutputRoomEventConsumer) onNewInviteEvent(
|
||||
ctx context.Context, msg api.OutputNewInviteEvent,
|
||||
) error {
|
||||
) {
|
||||
if msg.Event.StateKey() == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"event": string(msg.Event.JSON()),
|
||||
}).Panicf("roomserver output log: invite has no state key")
|
||||
return nil
|
||||
return
|
||||
}
|
||||
pduPos, err := s.db.AddInviteEvent(ctx, msg.Event)
|
||||
if err != nil {
|
||||
|
|
@ -292,18 +296,16 @@ func (s *OutputRoomEventConsumer) onNewInviteEvent(
|
|||
"pdupos": pduPos,
|
||||
log.ErrorKey: err,
|
||||
}).Panicf("roomserver output log: write invite failure")
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
s.inviteStream.Advance(pduPos)
|
||||
s.notifier.OnNewInvite(types.StreamingToken{InvitePosition: pduPos}, *msg.Event.StateKey())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *OutputRoomEventConsumer) onRetireInviteEvent(
|
||||
ctx context.Context, msg api.OutputRetireInviteEvent,
|
||||
) error {
|
||||
) {
|
||||
pduPos, err := s.db.RetireInviteEvent(ctx, msg.EventID)
|
||||
if err != nil {
|
||||
sentry.CaptureException(err)
|
||||
|
|
@ -312,19 +314,17 @@ func (s *OutputRoomEventConsumer) onRetireInviteEvent(
|
|||
"event_id": msg.EventID,
|
||||
log.ErrorKey: err,
|
||||
}).Panicf("roomserver output log: remove invite failure")
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
// Notify any active sync requests that the invite has been retired.
|
||||
s.inviteStream.Advance(pduPos)
|
||||
s.notifier.OnNewInvite(types.StreamingToken{InvitePosition: pduPos}, msg.TargetUserID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *OutputRoomEventConsumer) onNewPeek(
|
||||
ctx context.Context, msg api.OutputNewPeek,
|
||||
) error {
|
||||
) {
|
||||
sp, err := s.db.AddPeek(ctx, msg.RoomID, msg.UserID, msg.DeviceID)
|
||||
if err != nil {
|
||||
sentry.CaptureException(err)
|
||||
|
|
@ -332,7 +332,7 @@ func (s *OutputRoomEventConsumer) onNewPeek(
|
|||
log.WithFields(log.Fields{
|
||||
log.ErrorKey: err,
|
||||
}).Panicf("roomserver output log: write peek failure")
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
// tell the notifier about the new peek so it knows to wake up new devices
|
||||
|
|
@ -340,20 +340,18 @@ func (s *OutputRoomEventConsumer) onNewPeek(
|
|||
// index as PDUs, but we should fix this
|
||||
s.pduStream.Advance(sp)
|
||||
s.notifier.OnNewPeek(msg.RoomID, msg.UserID, msg.DeviceID, types.StreamingToken{PDUPosition: sp})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *OutputRoomEventConsumer) onRetirePeek(
|
||||
ctx context.Context, msg api.OutputRetirePeek,
|
||||
) error {
|
||||
) {
|
||||
sp, err := s.db.DeletePeek(ctx, msg.RoomID, msg.UserID, msg.DeviceID)
|
||||
if err != nil {
|
||||
// panic rather than continue with an inconsistent database
|
||||
log.WithFields(log.Fields{
|
||||
log.ErrorKey: err,
|
||||
}).Panicf("roomserver output log: write peek failure")
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
// tell the notifier about the new peek so it knows to wake up new devices
|
||||
|
|
@ -361,8 +359,6 @@ func (s *OutputRoomEventConsumer) onRetirePeek(
|
|||
// index as PDUs, but we should fix this
|
||||
s.pduStream.Advance(sp)
|
||||
s.notifier.OnRetirePeek(msg.RoomID, msg.UserID, msg.DeviceID, types.StreamingToken{PDUPosition: sp})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *OutputRoomEventConsumer) updateStateEvent(event *gomatrixserverlib.HeaderedEvent) (*gomatrixserverlib.HeaderedEvent, error) {
|
||||
|
|
|
|||
|
|
@ -47,8 +47,8 @@ func DeviceOTKCounts(ctx context.Context, keyAPI keyapi.KeyInternalAPI, userID,
|
|||
// be already filled in with join/leave information.
|
||||
func DeviceListCatchup(
|
||||
ctx context.Context, keyAPI keyapi.KeyInternalAPI, rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
userID string, res *types.Response, from, to types.LogPosition,
|
||||
) (newPos types.LogPosition, hasNew bool, err error) {
|
||||
userID string, res *types.Response, from, to types.StreamPosition,
|
||||
) (newPos types.StreamPosition, hasNew bool, err error) {
|
||||
|
||||
// Track users who we didn't track before but now do by virtue of sharing a room with them, or not.
|
||||
newlyJoinedRooms := joinedRooms(res, userID)
|
||||
|
|
@ -64,27 +64,18 @@ func DeviceListCatchup(
|
|||
}
|
||||
|
||||
// now also track users who we already share rooms with but who have updated their devices between the two tokens
|
||||
|
||||
var partition int32
|
||||
var offset int64
|
||||
partition = -1
|
||||
offset = sarama.OffsetOldest
|
||||
// Extract partition/offset from sync token
|
||||
// TODO: In a world where keyserver is sharded there will be multiple partitions and hence multiple QueryKeyChanges to make.
|
||||
if !from.IsEmpty() {
|
||||
partition = from.Partition
|
||||
offset = from.Offset
|
||||
offset := sarama.OffsetOldest
|
||||
toOffset := sarama.OffsetNewest
|
||||
if to > 0 && to > from {
|
||||
toOffset = int64(to)
|
||||
}
|
||||
var toOffset int64
|
||||
toOffset = sarama.OffsetNewest
|
||||
if toLog := to; toLog.Partition == partition && toLog.Offset > 0 {
|
||||
toOffset = toLog.Offset
|
||||
if from > 0 {
|
||||
offset = int64(from)
|
||||
}
|
||||
var queryRes keyapi.QueryKeyChangesResponse
|
||||
keyAPI.QueryKeyChanges(ctx, &keyapi.QueryKeyChangesRequest{
|
||||
Partition: partition,
|
||||
Offset: offset,
|
||||
ToOffset: toOffset,
|
||||
Offset: offset,
|
||||
ToOffset: toOffset,
|
||||
}, &queryRes)
|
||||
if queryRes.Error != nil {
|
||||
// don't fail the catchup because we may have got useful information by tracking membership
|
||||
|
|
@ -95,8 +86,8 @@ func DeviceListCatchup(
|
|||
var sharedUsersMap map[string]int
|
||||
sharedUsersMap, queryRes.UserIDs = filterSharedUsers(ctx, rsAPI, userID, queryRes.UserIDs)
|
||||
util.GetLogger(ctx).Debugf(
|
||||
"QueryKeyChanges request p=%d,off=%d,to=%d response p=%d off=%d uids=%v",
|
||||
partition, offset, toOffset, queryRes.Partition, queryRes.Offset, queryRes.UserIDs,
|
||||
"QueryKeyChanges request off=%d,to=%d response off=%d uids=%v",
|
||||
offset, toOffset, queryRes.Offset, queryRes.UserIDs,
|
||||
)
|
||||
userSet := make(map[string]bool)
|
||||
for _, userID := range res.DeviceLists.Changed {
|
||||
|
|
@ -125,13 +116,8 @@ func DeviceListCatchup(
|
|||
res.DeviceLists.Left = append(res.DeviceLists.Left, userID)
|
||||
}
|
||||
}
|
||||
// set the new token
|
||||
to = types.LogPosition{
|
||||
Partition: queryRes.Partition,
|
||||
Offset: queryRes.Offset,
|
||||
}
|
||||
|
||||
return to, hasNew, nil
|
||||
return types.StreamPosition(queryRes.Offset), hasNew, nil
|
||||
}
|
||||
|
||||
// TrackChangedUsers calculates the values of device_lists.changed|left in the /sync response.
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ import (
|
|||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
keyapi "github.com/matrix-org/dendrite/keyserver/api"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
|
|
@ -16,11 +15,7 @@ import (
|
|||
|
||||
var (
|
||||
syncingUser = "@alice:localhost"
|
||||
emptyToken = types.LogPosition{}
|
||||
newestToken = types.LogPosition{
|
||||
Offset: sarama.OffsetNewest,
|
||||
Partition: 0,
|
||||
}
|
||||
emptyToken = types.StreamPosition(0)
|
||||
)
|
||||
|
||||
type mockKeyAPI struct{}
|
||||
|
|
@ -186,7 +181,7 @@ func TestKeyChangeCatchupOnJoinShareNewUser(t *testing.T) {
|
|||
"!another:room": {syncingUser},
|
||||
},
|
||||
}
|
||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken)
|
||||
if err != nil {
|
||||
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
||||
}
|
||||
|
|
@ -209,7 +204,7 @@ func TestKeyChangeCatchupOnLeaveShareLeftUser(t *testing.T) {
|
|||
"!another:room": {syncingUser},
|
||||
},
|
||||
}
|
||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken)
|
||||
if err != nil {
|
||||
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
||||
}
|
||||
|
|
@ -232,7 +227,7 @@ func TestKeyChangeCatchupOnJoinShareNoNewUsers(t *testing.T) {
|
|||
"!another:room": {syncingUser, existingUser},
|
||||
},
|
||||
}
|
||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken)
|
||||
if err != nil {
|
||||
t.Fatalf("Catchup returned an error: %s", err)
|
||||
}
|
||||
|
|
@ -254,7 +249,7 @@ func TestKeyChangeCatchupOnLeaveShareNoUsers(t *testing.T) {
|
|||
"!another:room": {syncingUser, existingUser},
|
||||
},
|
||||
}
|
||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken)
|
||||
if err != nil {
|
||||
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
||||
}
|
||||
|
|
@ -313,7 +308,7 @@ func TestKeyChangeCatchupNoNewJoinsButMessages(t *testing.T) {
|
|||
roomID: {syncingUser, existingUser},
|
||||
},
|
||||
}
|
||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken)
|
||||
if err != nil {
|
||||
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
||||
}
|
||||
|
|
@ -341,7 +336,7 @@ func TestKeyChangeCatchupChangeAndLeft(t *testing.T) {
|
|||
"!another:room": {syncingUser},
|
||||
},
|
||||
}
|
||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken)
|
||||
_, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken)
|
||||
if err != nil {
|
||||
t.Fatalf("Catchup returned an error: %s", err)
|
||||
}
|
||||
|
|
@ -427,7 +422,7 @@ func TestKeyChangeCatchupChangeAndLeftSameRoom(t *testing.T) {
|
|||
},
|
||||
}
|
||||
_, hasNew, err := DeviceListCatchup(
|
||||
context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken,
|
||||
context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("DeviceListCatchup returned an error: %s", err)
|
||||
|
|
|
|||
|
|
@ -127,7 +127,7 @@ func TestNewEventAndJoinedToRoom(t *testing.T) {
|
|||
go func() {
|
||||
pos, err := waitForEvents(n, newTestSyncRequest(bob, bobDev, syncPositionBefore))
|
||||
if err != nil {
|
||||
t.Errorf("TestNewEventAndJoinedToRoom error: %w", err)
|
||||
t.Errorf("TestNewEventAndJoinedToRoom error: %s", err)
|
||||
}
|
||||
mustEqualPositions(t, pos, syncPositionAfter)
|
||||
wg.Done()
|
||||
|
|
@ -190,7 +190,7 @@ func TestNewInviteEventForUser(t *testing.T) {
|
|||
go func() {
|
||||
pos, err := waitForEvents(n, newTestSyncRequest(bob, bobDev, syncPositionBefore))
|
||||
if err != nil {
|
||||
t.Errorf("TestNewInviteEventForUser error: %w", err)
|
||||
t.Errorf("TestNewInviteEventForUser error: %s", err)
|
||||
}
|
||||
mustEqualPositions(t, pos, syncPositionAfter)
|
||||
wg.Done()
|
||||
|
|
@ -246,7 +246,7 @@ func TestMultipleRequestWakeup(t *testing.T) {
|
|||
poll := func() {
|
||||
pos, err := waitForEvents(n, newTestSyncRequest(bob, bobDev, syncPositionBefore))
|
||||
if err != nil {
|
||||
t.Errorf("TestMultipleRequestWakeup error: %w", err)
|
||||
t.Errorf("TestMultipleRequestWakeup error: %s", err)
|
||||
}
|
||||
mustEqualPositions(t, pos, syncPositionAfter)
|
||||
wg.Done()
|
||||
|
|
@ -284,7 +284,7 @@ func TestNewEventAndWasPreviouslyJoinedToRoom(t *testing.T) {
|
|||
go func() {
|
||||
pos, err := waitForEvents(n, newTestSyncRequest(bob, bobDev, syncPositionBefore))
|
||||
if err != nil {
|
||||
t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom error: %w", err)
|
||||
t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom error: %s", err)
|
||||
}
|
||||
mustEqualPositions(t, pos, syncPositionAfter)
|
||||
leaveWG.Done()
|
||||
|
|
@ -301,7 +301,7 @@ func TestNewEventAndWasPreviouslyJoinedToRoom(t *testing.T) {
|
|||
go func() {
|
||||
pos, err := waitForEvents(n, newTestSyncRequest(alice, aliceDev, syncPositionAfter))
|
||||
if err != nil {
|
||||
t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom error: %w", err)
|
||||
t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom error: %s", err)
|
||||
}
|
||||
mustEqualPositions(t, pos, syncPositionAfter2)
|
||||
aliceWG.Done()
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import (
|
|||
)
|
||||
|
||||
type DeviceListStreamProvider struct {
|
||||
PartitionedStreamProvider
|
||||
StreamProvider
|
||||
rsAPI api.RoomserverInternalAPI
|
||||
keyAPI keyapi.KeyInternalAPI
|
||||
}
|
||||
|
|
@ -18,15 +18,15 @@ type DeviceListStreamProvider struct {
|
|||
func (p *DeviceListStreamProvider) CompleteSync(
|
||||
ctx context.Context,
|
||||
req *types.SyncRequest,
|
||||
) types.LogPosition {
|
||||
) types.StreamPosition {
|
||||
return p.LatestPosition(ctx)
|
||||
}
|
||||
|
||||
func (p *DeviceListStreamProvider) IncrementalSync(
|
||||
ctx context.Context,
|
||||
req *types.SyncRequest,
|
||||
from, to types.LogPosition,
|
||||
) types.LogPosition {
|
||||
from, to types.StreamPosition,
|
||||
) types.StreamPosition {
|
||||
var err error
|
||||
to, _, err = internal.DeviceListCatchup(context.Background(), p.keyAPI, p.rsAPI, req.Device.UserID, req.Response, from, to)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ type Streams struct {
|
|||
InviteStreamProvider types.StreamProvider
|
||||
SendToDeviceStreamProvider types.StreamProvider
|
||||
AccountDataStreamProvider types.StreamProvider
|
||||
DeviceListStreamProvider types.PartitionedStreamProvider
|
||||
DeviceListStreamProvider types.StreamProvider
|
||||
}
|
||||
|
||||
func NewSyncStreamProviders(
|
||||
|
|
@ -48,9 +48,9 @@ func NewSyncStreamProviders(
|
|||
userAPI: userAPI,
|
||||
},
|
||||
DeviceListStreamProvider: &DeviceListStreamProvider{
|
||||
PartitionedStreamProvider: PartitionedStreamProvider{DB: d},
|
||||
rsAPI: rsAPI,
|
||||
keyAPI: keyAPI,
|
||||
StreamProvider: StreamProvider{DB: d},
|
||||
rsAPI: rsAPI,
|
||||
keyAPI: keyAPI,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,38 +0,0 @@
|
|||
package streams
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
)
|
||||
|
||||
type PartitionedStreamProvider struct {
|
||||
DB storage.Database
|
||||
latest types.LogPosition
|
||||
latestMutex sync.RWMutex
|
||||
}
|
||||
|
||||
func (p *PartitionedStreamProvider) Setup() {
|
||||
}
|
||||
|
||||
func (p *PartitionedStreamProvider) Advance(
|
||||
latest types.LogPosition,
|
||||
) {
|
||||
p.latestMutex.Lock()
|
||||
defer p.latestMutex.Unlock()
|
||||
|
||||
if latest.IsAfter(&p.latest) {
|
||||
p.latest = latest
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PartitionedStreamProvider) LatestPosition(
|
||||
ctx context.Context,
|
||||
) types.LogPosition {
|
||||
p.latestMutex.RLock()
|
||||
defer p.latestMutex.RUnlock()
|
||||
|
||||
return p.latest
|
||||
}
|
||||
|
|
@ -140,6 +140,12 @@ func (rp *RequestPool) OnIncomingSyncRequest(req *http.Request, device *userapi.
|
|||
// Extract values from request
|
||||
syncReq, err := newSyncRequest(req, *device, rp.db)
|
||||
if err != nil {
|
||||
if err == types.ErrMalformedSyncToken {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.InvalidArgumentValue(err.Error()),
|
||||
}
|
||||
}
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.Unknown(err.Error()),
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
keyapi "github.com/matrix-org/dendrite/keyserver/api"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/kafka"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
|
@ -48,7 +48,7 @@ func AddPublicRoutes(
|
|||
federation *gomatrixserverlib.FederationClient,
|
||||
cfg *config.SyncAPI,
|
||||
) {
|
||||
consumer, _ := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
|
||||
js, consumer, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
|
||||
|
||||
syncDB, err := storage.NewSyncServerDatasource(&cfg.Database)
|
||||
if err != nil {
|
||||
|
|
@ -65,15 +65,16 @@ func AddPublicRoutes(
|
|||
requestPool := sync.NewRequestPool(syncDB, cfg, userAPI, keyAPI, rsAPI, streams, notifier)
|
||||
|
||||
keyChangeConsumer := consumers.NewOutputKeyChangeEventConsumer(
|
||||
process, cfg.Matrix.ServerName, string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)),
|
||||
consumer, keyAPI, rsAPI, syncDB, notifier, streams.DeviceListStreamProvider,
|
||||
process, cfg.Matrix.ServerName, cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent),
|
||||
consumer, keyAPI, rsAPI, syncDB, notifier,
|
||||
streams.DeviceListStreamProvider,
|
||||
)
|
||||
if err = keyChangeConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panicf("failed to start key change consumer")
|
||||
}
|
||||
|
||||
roomConsumer := consumers.NewOutputRoomEventConsumer(
|
||||
process, cfg, consumer, syncDB, notifier, streams.PDUStreamProvider,
|
||||
process, cfg, js, syncDB, notifier, streams.PDUStreamProvider,
|
||||
streams.InviteStreamProvider, rsAPI,
|
||||
)
|
||||
if err = roomConsumer.Start(); err != nil {
|
||||
|
|
@ -81,28 +82,28 @@ func AddPublicRoutes(
|
|||
}
|
||||
|
||||
clientConsumer := consumers.NewOutputClientDataConsumer(
|
||||
process, cfg, consumer, syncDB, notifier, streams.AccountDataStreamProvider,
|
||||
process, cfg, js, syncDB, notifier, streams.AccountDataStreamProvider,
|
||||
)
|
||||
if err = clientConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panicf("failed to start client data consumer")
|
||||
}
|
||||
|
||||
typingConsumer := consumers.NewOutputTypingEventConsumer(
|
||||
process, cfg, consumer, syncDB, eduCache, notifier, streams.TypingStreamProvider,
|
||||
process, cfg, js, syncDB, eduCache, notifier, streams.TypingStreamProvider,
|
||||
)
|
||||
if err = typingConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panicf("failed to start typing consumer")
|
||||
}
|
||||
|
||||
sendToDeviceConsumer := consumers.NewOutputSendToDeviceEventConsumer(
|
||||
process, cfg, consumer, syncDB, notifier, streams.SendToDeviceStreamProvider,
|
||||
process, cfg, js, syncDB, notifier, streams.SendToDeviceStreamProvider,
|
||||
)
|
||||
if err = sendToDeviceConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panicf("failed to start send-to-device consumer")
|
||||
}
|
||||
|
||||
receiptConsumer := consumers.NewOutputReceiptEventConsumer(
|
||||
process, cfg, consumer, syncDB, notifier, streams.ReceiptStreamProvider,
|
||||
process, cfg, js, syncDB, notifier, streams.ReceiptStreamProvider,
|
||||
)
|
||||
if err = receiptConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panicf("failed to start receipts consumer")
|
||||
|
|
|
|||
|
|
@ -42,11 +42,3 @@ type StreamProvider interface {
|
|||
// LatestPosition returns the latest stream position for this stream.
|
||||
LatestPosition(ctx context.Context) StreamPosition
|
||||
}
|
||||
|
||||
type PartitionedStreamProvider interface {
|
||||
Setup()
|
||||
Advance(latest LogPosition)
|
||||
CompleteSync(ctx context.Context, req *SyncRequest) LogPosition
|
||||
IncrementalSync(ctx context.Context, req *SyncRequest, from, to LogPosition) LogPosition
|
||||
LatestPosition(ctx context.Context) LogPosition
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ package types
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -26,13 +27,10 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidSyncTokenType is returned when an attempt at creating a
|
||||
// new instance of SyncToken with an invalid type (i.e. neither "s"
|
||||
// nor "t").
|
||||
ErrInvalidSyncTokenType = fmt.Errorf("sync token has an unknown prefix (should be either s or t)")
|
||||
// ErrInvalidSyncTokenLen is returned when the pagination token is an
|
||||
// invalid length
|
||||
ErrInvalidSyncTokenLen = fmt.Errorf("sync token has an invalid length")
|
||||
// This error is returned when parsing sync tokens if the token is invalid. Callers can use this
|
||||
// error to detect whether to 400 or 401 the client. It is recommended to 401 them to force a
|
||||
// logout.
|
||||
ErrMalformedSyncToken = errors.New("malformed sync token")
|
||||
)
|
||||
|
||||
type StateDelta struct {
|
||||
|
|
@ -47,27 +45,6 @@ type StateDelta struct {
|
|||
// StreamPosition represents the offset in the sync stream a client is at.
|
||||
type StreamPosition int64
|
||||
|
||||
// LogPosition represents the offset in a Kafka log a client is at.
|
||||
type LogPosition struct {
|
||||
Partition int32
|
||||
Offset int64
|
||||
}
|
||||
|
||||
func (p *LogPosition) IsEmpty() bool {
|
||||
return p.Offset == 0
|
||||
}
|
||||
|
||||
// IsAfter returns true if this position is after `lp`.
|
||||
func (p *LogPosition) IsAfter(lp *LogPosition) bool {
|
||||
if lp == nil {
|
||||
return false
|
||||
}
|
||||
if p.Partition != lp.Partition {
|
||||
return false
|
||||
}
|
||||
return p.Offset > lp.Offset
|
||||
}
|
||||
|
||||
// StreamEvent is the same as gomatrixserverlib.Event but also has the PDU stream position for this event.
|
||||
type StreamEvent struct {
|
||||
*gomatrixserverlib.HeaderedEvent
|
||||
|
|
@ -124,7 +101,7 @@ type StreamingToken struct {
|
|||
SendToDevicePosition StreamPosition
|
||||
InvitePosition StreamPosition
|
||||
AccountDataPosition StreamPosition
|
||||
DeviceListPosition LogPosition
|
||||
DeviceListPosition StreamPosition
|
||||
}
|
||||
|
||||
// This will be used as a fallback by json.Marshal.
|
||||
|
|
@ -140,14 +117,11 @@ func (s *StreamingToken) UnmarshalText(text []byte) (err error) {
|
|||
|
||||
func (t StreamingToken) String() string {
|
||||
posStr := fmt.Sprintf(
|
||||
"s%d_%d_%d_%d_%d_%d",
|
||||
"s%d_%d_%d_%d_%d_%d_%d",
|
||||
t.PDUPosition, t.TypingPosition,
|
||||
t.ReceiptPosition, t.SendToDevicePosition,
|
||||
t.InvitePosition, t.AccountDataPosition,
|
||||
t.InvitePosition, t.AccountDataPosition, t.DeviceListPosition,
|
||||
)
|
||||
if dl := t.DeviceListPosition; !dl.IsEmpty() {
|
||||
posStr += fmt.Sprintf(".dl-%d-%d", dl.Partition, dl.Offset)
|
||||
}
|
||||
return posStr
|
||||
}
|
||||
|
||||
|
|
@ -166,14 +140,14 @@ func (t *StreamingToken) IsAfter(other StreamingToken) bool {
|
|||
return true
|
||||
case t.AccountDataPosition > other.AccountDataPosition:
|
||||
return true
|
||||
case t.DeviceListPosition.IsAfter(&other.DeviceListPosition):
|
||||
case t.DeviceListPosition > other.DeviceListPosition:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *StreamingToken) IsEmpty() bool {
|
||||
return t == nil || t.PDUPosition+t.TypingPosition+t.ReceiptPosition+t.SendToDevicePosition+t.InvitePosition+t.AccountDataPosition == 0 && t.DeviceListPosition.IsEmpty()
|
||||
return t == nil || t.PDUPosition+t.TypingPosition+t.ReceiptPosition+t.SendToDevicePosition+t.InvitePosition+t.AccountDataPosition+t.DeviceListPosition == 0
|
||||
}
|
||||
|
||||
// WithUpdates returns a copy of the StreamingToken with updates applied from another StreamingToken.
|
||||
|
|
@ -208,7 +182,7 @@ func (t *StreamingToken) ApplyUpdates(other StreamingToken) {
|
|||
if other.AccountDataPosition > t.AccountDataPosition {
|
||||
t.AccountDataPosition = other.AccountDataPosition
|
||||
}
|
||||
if other.DeviceListPosition.IsAfter(&t.DeviceListPosition) {
|
||||
if other.DeviceListPosition > t.DeviceListPosition {
|
||||
t.DeviceListPosition = other.DeviceListPosition
|
||||
}
|
||||
}
|
||||
|
|
@ -292,16 +266,18 @@ func NewTopologyTokenFromString(tok string) (token TopologyToken, err error) {
|
|||
|
||||
func NewStreamTokenFromString(tok string) (token StreamingToken, err error) {
|
||||
if len(tok) < 1 {
|
||||
err = fmt.Errorf("empty stream token")
|
||||
err = ErrMalformedSyncToken
|
||||
return
|
||||
}
|
||||
if tok[0] != SyncTokenTypeStream[0] {
|
||||
err = fmt.Errorf("stream token must start with 's'")
|
||||
err = ErrMalformedSyncToken
|
||||
return
|
||||
}
|
||||
categories := strings.Split(tok[1:], ".")
|
||||
parts := strings.Split(categories[0], "_")
|
||||
var positions [6]StreamPosition
|
||||
// Migration: Remove everything after and including '.' - we previously had tokens like:
|
||||
// s478_0_0_0_0_13.dl-0-2 but we have now removed partitioned stream positions
|
||||
tok = strings.Split(tok, ".")[0]
|
||||
parts := strings.Split(tok[1:], "_")
|
||||
var positions [7]StreamPosition
|
||||
for i, p := range parts {
|
||||
if i > len(positions) {
|
||||
break
|
||||
|
|
@ -309,6 +285,7 @@ func NewStreamTokenFromString(tok string) (token StreamingToken, err error) {
|
|||
var pos int
|
||||
pos, err = strconv.Atoi(p)
|
||||
if err != nil {
|
||||
err = ErrMalformedSyncToken
|
||||
return
|
||||
}
|
||||
positions[i] = StreamPosition(pos)
|
||||
|
|
@ -320,31 +297,7 @@ func NewStreamTokenFromString(tok string) (token StreamingToken, err error) {
|
|||
SendToDevicePosition: positions[3],
|
||||
InvitePosition: positions[4],
|
||||
AccountDataPosition: positions[5],
|
||||
}
|
||||
// dl-0-1234
|
||||
// $log_name-$partition-$offset
|
||||
for _, logStr := range categories[1:] {
|
||||
segments := strings.Split(logStr, "-")
|
||||
if len(segments) != 3 {
|
||||
err = fmt.Errorf("invalid log position %q", logStr)
|
||||
return
|
||||
}
|
||||
switch segments[0] {
|
||||
case "dl":
|
||||
// Device list syncing
|
||||
var partition, offset int
|
||||
if partition, err = strconv.Atoi(segments[1]); err != nil {
|
||||
return
|
||||
}
|
||||
if offset, err = strconv.Atoi(segments[2]); err != nil {
|
||||
return
|
||||
}
|
||||
token.DeviceListPosition.Partition = int32(partition)
|
||||
token.DeviceListPosition.Offset = int64(offset)
|
||||
default:
|
||||
err = fmt.Errorf("unrecognised token type %q", segments[0])
|
||||
return
|
||||
}
|
||||
DeviceListPosition: positions[6],
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,50 +2,17 @@ package types
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
func TestNewSyncTokenWithLogs(t *testing.T) {
|
||||
tests := map[string]*StreamingToken{
|
||||
"s4_0_0_0_0_0": {
|
||||
PDUPosition: 4,
|
||||
},
|
||||
"s4_0_0_0_0_0.dl-0-123": {
|
||||
PDUPosition: 4,
|
||||
DeviceListPosition: LogPosition{
|
||||
Partition: 0,
|
||||
Offset: 123,
|
||||
},
|
||||
},
|
||||
}
|
||||
for tok, want := range tests {
|
||||
got, err := NewStreamTokenFromString(tok)
|
||||
if err != nil {
|
||||
if want == nil {
|
||||
continue // error expected
|
||||
}
|
||||
t.Errorf("%s errored: %s", tok, err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(got, *want) {
|
||||
t.Errorf("%s mismatch: got %v want %v", tok, got, want)
|
||||
}
|
||||
gotStr := got.String()
|
||||
if gotStr != tok {
|
||||
t.Errorf("%s reserialisation mismatch: got %s want %s", tok, gotStr, tok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncTokens(t *testing.T) {
|
||||
shouldPass := map[string]string{
|
||||
"s4_0_0_0_0_0": StreamingToken{4, 0, 0, 0, 0, 0, LogPosition{}}.String(),
|
||||
"s3_1_0_0_0_0.dl-1-2": StreamingToken{3, 1, 0, 0, 0, 0, LogPosition{1, 2}}.String(),
|
||||
"s3_1_2_3_5_0": StreamingToken{3, 1, 2, 3, 5, 0, LogPosition{}}.String(),
|
||||
"t3_1": TopologyToken{3, 1}.String(),
|
||||
"s4_0_0_0_0_0_0": StreamingToken{4, 0, 0, 0, 0, 0, 0}.String(),
|
||||
"s3_1_0_0_0_0_2": StreamingToken{3, 1, 0, 0, 0, 0, 2}.String(),
|
||||
"s3_1_2_3_5_0_0": StreamingToken{3, 1, 2, 3, 5, 0, 0}.String(),
|
||||
"t3_1": TopologyToken{3, 1}.String(),
|
||||
}
|
||||
|
||||
for a, b := range shouldPass {
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue