diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.md b/.github/ISSUE_TEMPLATE/BUG_REPORT.md index 68ae922a3..206713e04 100644 --- a/.github/ISSUE_TEMPLATE/BUG_REPORT.md +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.md @@ -4,34 +4,50 @@ about: Create a report to help us improve --- - + ### Background information - + - **Dendrite version or git SHA**: - **Monolith or Polylith?**: - **SQLite3 or Postgres?**: - **Running in Docker?**: -- **`go version`**: +- **`go version`**: +- **Client used (if applicable)**: - ### Description - + - **What** is the problem: + - **Who** is affected: + - **How** is this bug manifesting: + - **When** did this first appear: + + ### Steps to reproduce + - list the steps - that reproduce the bug diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 92253214a..1204582e2 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,8 +1,7 @@ ### Pull Request Checklist - + -* [ ] I have added any new tests that need to pass to `sytest-whitelist` as specified in [docs/sytest.md](https://github.com/matrix-org/dendrite/blob/master/docs/sytest.md) * [ ] Pull request includes a [sign off](https://github.com/matrix-org/dendrite/blob/master/docs/CONTRIBUTING.md#sign-off) Signed-off-by: `Your Name ` diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 000000000..ad5a2660c --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,71 @@ +name: Tests + +on: + push: + branches: [ 'master' ] + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + complement: + runs-on: ubuntu-latest + steps: + # Env vars are set file a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on env to run Complement. + # See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path + - name: "Set Go Version" + run: | + echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH + echo "~/go/bin" >> $GITHUB_PATH + + - name: "Install Complement Dependencies" + # We don't need to install Go because it is included on the Ubuntu 20.04 image: + # See https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md specifically GOROOT_1_17_X64 + run: | + sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev + go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest + + - name: Run actions/checkout@v2 for dendrite + uses: actions/checkout@v2 + with: + path: dendrite + + # Attempt to check out the same branch of Complement as the PR. If it + # doesn't exist, fallback to master. + - name: Checkout complement + shell: bash + run: | + mkdir -p complement + # Attempt to use the version of complement which best matches the current + # build. Depending on whether this is a PR or release, etc. we need to + # use different fallbacks. + # + # 1. First check if there's a similarly named branch (GITHUB_HEAD_REF + # for pull requests, otherwise GITHUB_REF). + # 2. Attempt to use the base branch, e.g. when merging into release-vX.Y + # (GITHUB_BASE_REF for pull requests). + # 3. Use the default complement branch ("master"). + for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "master"; do + # Skip empty branch names and merge commits. + if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then + continue + fi + + (wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break + done + + # Build initial Dendrite image + - run: docker build -t complement-dendrite -f build/scripts/Complement.Dockerfile . + working-directory: dendrite + + # Run Complement + - run: | + set -o pipefail && + go test -v -json -tags dendrite_blacklist ./tests/... 2>&1 | gotestfmt + shell: bash + name: Run Complement Tests + env: + COMPLEMENT_BASE_IMAGE: complement-dendrite:latest + working-directory: complement \ No newline at end of file diff --git a/README.md b/README.md index 30bf19f6c..3ec9f0296 100644 --- a/README.md +++ b/README.md @@ -76,7 +76,7 @@ Then point your favourite Matrix client at `http://localhost:8008` or `https://l We use a script called Are We Synapse Yet which checks Sytest compliance rates. Sytest is a black-box homeserver test rig with around 900 tests. The script works out how many of these tests are passing on Dendrite and it -updates with CI. As of November 2020 we're at around 58% CS API coverage and 83% Federation coverage, though check +updates with CI. As of January 2022 we're at around 65% CS API coverage and 92% Federation coverage, though check CI for the latest numbers. In practice, this means you can communicate locally and via federation with Synapse servers such as matrix.org reasonably well. There's a long list of features that are not implemented, notably: - Push diff --git a/appservice/appservice.go b/appservice/appservice.go index 5f16c10b3..924a609ea 100644 --- a/appservice/appservice.go +++ b/appservice/appservice.go @@ -32,7 +32,7 @@ import ( roomserverAPI "github.com/matrix-org/dendrite/roomserver/api" "github.com/matrix-org/dendrite/setup/base" "github.com/matrix-org/dendrite/setup/config" - "github.com/matrix-org/dendrite/setup/kafka" + "github.com/matrix-org/dendrite/setup/jetstream" userapi "github.com/matrix-org/dendrite/userapi/api" "github.com/sirupsen/logrus" ) @@ -58,7 +58,7 @@ func NewInternalAPI( }, }, } - consumer, _ := kafka.SetupConsumerProducer(&base.Cfg.Global.Kafka) + js, _, _ := jetstream.Prepare(&base.Cfg.Global.JetStream) // Create a connection to the appservice postgres DB appserviceDB, err := storage.NewDatabase(&base.Cfg.AppServiceAPI.Database) @@ -97,7 +97,7 @@ func NewInternalAPI( // We can't add ASes at runtime so this is safe to do. if len(workerStates) > 0 { consumer := consumers.NewOutputRoomEventConsumer( - base.ProcessContext, base.Cfg, consumer, appserviceDB, + base.ProcessContext, base.Cfg, js, appserviceDB, rsAPI, workerStates, ) if err := consumer.Start(); err != nil { diff --git a/appservice/consumers/roomserver.go b/appservice/consumers/roomserver.go index 2ad7f68fe..8aea5c347 100644 --- a/appservice/consumers/roomserver.go +++ b/appservice/consumers/roomserver.go @@ -20,23 +20,26 @@ import ( "github.com/matrix-org/dendrite/appservice/storage" "github.com/matrix-org/dendrite/appservice/types" - "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/roomserver/api" "github.com/matrix-org/dendrite/setup/config" + "github.com/matrix-org/dendrite/setup/jetstream" "github.com/matrix-org/dendrite/setup/process" "github.com/matrix-org/gomatrixserverlib" + "github.com/nats-io/nats.go" - "github.com/Shopify/sarama" log "github.com/sirupsen/logrus" ) // OutputRoomEventConsumer consumes events that originated in the room server. type OutputRoomEventConsumer struct { - roomServerConsumer *internal.ContinualConsumer - asDB storage.Database - rsAPI api.RoomserverInternalAPI - serverName string - workerStates []types.ApplicationServiceWorkerState + ctx context.Context + jetstream nats.JetStreamContext + durable nats.SubOpt + topic string + asDB storage.Database + rsAPI api.RoomserverInternalAPI + serverName string + workerStates []types.ApplicationServiceWorkerState } // NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call @@ -44,55 +47,56 @@ type OutputRoomEventConsumer struct { func NewOutputRoomEventConsumer( process *process.ProcessContext, cfg *config.Dendrite, - kafkaConsumer sarama.Consumer, + js nats.JetStreamContext, appserviceDB storage.Database, rsAPI api.RoomserverInternalAPI, workerStates []types.ApplicationServiceWorkerState, ) *OutputRoomEventConsumer { - consumer := internal.ContinualConsumer{ - Process: process, - ComponentName: "appservice/roomserver", - Topic: cfg.Global.Kafka.TopicFor(config.TopicOutputRoomEvent), - Consumer: kafkaConsumer, - PartitionStore: appserviceDB, + return &OutputRoomEventConsumer{ + ctx: process.Context(), + jetstream: js, + durable: cfg.Global.JetStream.Durable("AppserviceRoomserverConsumer"), + topic: cfg.Global.JetStream.TopicFor(jetstream.OutputRoomEvent), + asDB: appserviceDB, + rsAPI: rsAPI, + serverName: string(cfg.Global.ServerName), + workerStates: workerStates, } - s := &OutputRoomEventConsumer{ - roomServerConsumer: &consumer, - asDB: appserviceDB, - rsAPI: rsAPI, - serverName: string(cfg.Global.ServerName), - workerStates: workerStates, - } - consumer.ProcessMessage = s.onMessage - - return s } // Start consuming from room servers func (s *OutputRoomEventConsumer) Start() error { - return s.roomServerConsumer.Start() + _, err := s.jetstream.Subscribe(s.topic, s.onMessage, s.durable) + return err } // onMessage is called when the appservice component receives a new event from // the room server output log. -func (s *OutputRoomEventConsumer) onMessage(msg *sarama.ConsumerMessage) error { - // Parse out the event JSON - var output api.OutputEvent - if err := json.Unmarshal(msg.Value, &output); err != nil { - // If the message was invalid, log it and move on to the next message in the stream - log.WithError(err).Errorf("roomserver output log: message parse failure") - return nil - } +func (s *OutputRoomEventConsumer) onMessage(msg *nats.Msg) { + jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool { + // Parse out the event JSON + var output api.OutputEvent + if err := json.Unmarshal(msg.Data, &output); err != nil { + // If the message was invalid, log it and move on to the next message in the stream + log.WithError(err).Errorf("roomserver output log: message parse failure") + return true + } - if output.Type != api.OutputTypeNewRoomEvent { - return nil - } + if output.Type != api.OutputTypeNewRoomEvent { + return true + } - events := []*gomatrixserverlib.HeaderedEvent{output.NewRoomEvent.Event} - events = append(events, output.NewRoomEvent.AddStateEvents...) + events := []*gomatrixserverlib.HeaderedEvent{output.NewRoomEvent.Event} + events = append(events, output.NewRoomEvent.AddStateEvents...) - // Send event to any relevant application services - return s.filterRoomserverEvents(context.TODO(), events) + // Send event to any relevant application services + if err := s.filterRoomserverEvents(context.TODO(), events); err != nil { + log.WithError(err).Errorf("roomserver output log: filter error") + return true + } + + return true + }) } // filterRoomserverEvents takes in events and decides whether any of them need diff --git a/appservice/storage/interface.go b/appservice/storage/interface.go index 735e2f90a..25d35af6c 100644 --- a/appservice/storage/interface.go +++ b/appservice/storage/interface.go @@ -17,12 +17,10 @@ package storage import ( "context" - "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/gomatrixserverlib" ) type Database interface { - internal.PartitionStorer StoreEvent(ctx context.Context, appServiceID string, event *gomatrixserverlib.HeaderedEvent) error GetEventsWithAppServiceID(ctx context.Context, appServiceID string, limit int) (int, int, []gomatrixserverlib.HeaderedEvent, bool, error) CountEventsWithAppServiceID(ctx context.Context, appServiceID string) (int, error) diff --git a/are-we-synapse-yet.py b/are-we-synapse-yet.py index 92c7b82b8..10b1be28a 100755 --- a/are-we-synapse-yet.py +++ b/are-we-synapse-yet.py @@ -177,6 +177,10 @@ def print_stats(header_name, gid_to_tests, gid_to_name, verbose): line = "%s: %s (%d/%d tests)" % (gid_to_name[gid].ljust(25, ' '), pct.rjust(4, ' '), group_passing, group_total) subsections.append(line) subsection_test_names[line] = test_names_and_marks + + # avoid errors when trying to divide by 0 + if total_tests == 0: + return pct = "{0:.0f}%".format(total_passing/total_tests * 100) print("%s: %s (%d/%d tests)" % (header_name, pct, total_passing, total_tests)) diff --git a/build/docker/README.md b/build/docker/README.md index 19e4234c5..7425d96cb 100644 --- a/build/docker/README.md +++ b/build/docker/README.md @@ -19,7 +19,6 @@ not contain the Go toolchain etc. There are three sample `docker-compose` files: -- `docker-compose.deps.yml` which runs the Postgres and Kafka prerequisites - `docker-compose.monolith.yml` which runs a monolith Dendrite deployment - `docker-compose.polylith.yml` which runs a polylith Dendrite deployment @@ -28,7 +27,7 @@ There are three sample `docker-compose` files: The `docker-compose` files refer to the `/etc/dendrite` volume as where the runtime config should come from. The mounted folder must contain: -- `dendrite.yaml` configuration file (based on the [`dendrite-config.yaml`](https://raw.githubusercontent.com/matrix-org/dendrite/master/dendrite-config.yaml) +- `dendrite.yaml` configuration file (from the [Docker config folder](https://github.com/matrix-org/dendrite/tree/master/build/docker/config) sample in the `build/docker/config` folder of this repository.) - `matrix_key.pem` server key, as generated using `cmd/generate-keys` - `server.crt` certificate file @@ -50,15 +49,9 @@ The key files will now exist in your current working directory, and can be mount ## Starting Dendrite as a monolith deployment -Create your config based on the [`dendrite-config.yaml`](https://raw.githubusercontent.com/matrix-org/dendrite/master/dendrite-config.yaml) configuration file in the `build/docker/config` folder of this repository. And rename the config file to `dendrite.yml` (and put it in your `config` directory). +Create your config based on the [`dendrite.yaml`](https://github.com/matrix-org/dendrite/tree/master/build/docker/config) configuration file in the `build/docker/config` folder of this repository. -Once in place, start the PostgreSQL dependency: - -``` -docker-compose -f docker-compose.deps.yml up postgres -``` - -Wait a few seconds for PostgreSQL to finish starting up, and then start a monolith: +Then start the deployment: ``` docker-compose -f docker-compose.monolith.yml up @@ -66,15 +59,9 @@ docker-compose -f docker-compose.monolith.yml up ## Starting Dendrite as a polylith deployment -Create your config based on the [`dendrite-config.yaml`](https://raw.githubusercontent.com/matrix-org/dendrite/master/dendrite-config.yaml) configuration file in the `build/docker/config` folder of this repository. And rename the config file to `dendrite.yml` (and put it in your `config` directory). +Create your config based on the [`dendrite-config.yaml`](https://github.com/matrix-org/dendrite/tree/master/build/docker/config) configuration file in the `build/docker/config` folder of this repository. -Once in place, start all the dependencies: - -``` -docker-compose -f docker-compose.deps.yml up -``` - -Wait a few seconds for PostgreSQL and Kafka to finish starting up, and then start a polylith: +Then start the deployment: ``` docker-compose -f docker-compose.polylith.yml up diff --git a/build/docker/config/dendrite-config.yaml b/build/docker/config/dendrite.yaml similarity index 88% rename from build/docker/config/dendrite-config.yaml rename to build/docker/config/dendrite.yaml index d6357747b..6d5ebc9fd 100644 --- a/build/docker/config/dendrite-config.yaml +++ b/build/docker/config/dendrite.yaml @@ -28,7 +28,7 @@ # connection can be idle in seconds - a negative value is unlimited. # The version of the configuration file. -version: 1 +version: 2 # Global Matrix configuration. This configuration applies to all components. global: @@ -62,29 +62,28 @@ global: - matrix.org - vector.im - # Configuration for Kafka/Naffka. - kafka: - # List of Kafka broker addresses to connect to. This is not needed if using - # Naffka in monolith mode. + # Configuration for NATS JetStream + jetstream: + # A list of NATS Server addresses to connect to. If none are specified, an + # internal NATS server will be started automatically when running Dendrite + # in monolith mode. It is required to specify the address of at least one + # NATS Server node if running in polylith mode. addresses: - - kafka:9092 + - jetstream:4222 - # The prefix to use for Kafka topic names for this homeserver. Change this only if - # you are running more than one Dendrite homeserver on the same Kafka deployment. + # Keep all NATS streams in memory, rather than persisting it to the storage + # path below. This option is present primarily for integration testing and + # should not be used on a real world Dendrite deployment. + in_memory: false + + # Persistent directory to store JetStream streams in. This directory + # should be preserved across Dendrite restarts. + storage_path: ./ + + # The prefix to use for stream names for this homeserver - really only + # useful if running more than one Dendrite on the same NATS deployment. topic_prefix: Dendrite - # Whether to use Naffka instead of Kafka. This is only available in monolith - # mode, but means that you can run a single-process server without requiring - # Kafka. - use_naffka: false - - # Naffka database options. Not required when using Kafka. - naffka_database: - connection_string: postgresql://dendrite:itsasecret@postgres/dendrite_naffka?sslmode=disable - max_open_conns: 10 - max_idle_conns: 2 - conn_max_lifetime: -1 - # Configuration for Prometheus metric collection. metrics: # Whether or not Prometheus metrics are enabled. @@ -266,6 +265,19 @@ media_api: height: 480 method: scale +# Configuration for experimental MSC's +mscs: + # A list of enabled MSC's + # Currently valid values are: + # - msc2836 (Threading, see https://github.com/matrix-org/matrix-doc/pull/2836) + # - msc2946 (Spaces Summary, see https://github.com/matrix-org/matrix-doc/pull/2946) + mscs: [] + database: + connection_string: postgresql://dendrite:itsasecret@postgres/dendrite_mscs?sslmode=disable + max_open_conns: 5 + max_idle_conns: 2 + conn_max_lifetime: -1 + # Configuration for the Room Server. room_server: internal_api: diff --git a/build/docker/docker-compose.deps.yml b/build/docker/docker-compose.deps.yml deleted file mode 100644 index aa0651889..000000000 --- a/build/docker/docker-compose.deps.yml +++ /dev/null @@ -1,42 +0,0 @@ -version: "3.4" -services: - # PostgreSQL is needed for both polylith and monolith modes. - postgres: - hostname: postgres - image: postgres:14 - restart: always - volumes: - - ./postgres/create_db.sh:/docker-entrypoint-initdb.d/20-create_db.sh - # To persist your PostgreSQL databases outside of the Docker image, to - # prevent data loss, you will need to add something like this: - # - ./path/to/persistent/storage:/var/lib/postgresql/data - environment: - POSTGRES_PASSWORD: itsasecret - POSTGRES_USER: dendrite - networks: - - internal - - # Zookeeper is only needed for polylith mode! - zookeeper: - hostname: zookeeper - image: zookeeper - networks: - - internal - - # Kafka is only needed for polylith mode! - kafka: - container_name: dendrite_kafka - hostname: kafka - image: wurstmeister/kafka - environment: - KAFKA_ADVERTISED_HOST_NAME: "kafka" - KAFKA_DELETE_TOPIC_ENABLE: "true" - KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181" - depends_on: - - zookeeper - networks: - - internal - -networks: - internal: - attachable: true diff --git a/build/docker/docker-compose.monolith.yml b/build/docker/docker-compose.monolith.yml index ef8975b9b..1a8fe4eee 100644 --- a/build/docker/docker-compose.monolith.yml +++ b/build/docker/docker-compose.monolith.yml @@ -1,5 +1,25 @@ version: "3.4" services: + postgres: + hostname: postgres + image: postgres:14 + restart: always + volumes: + - ./postgres/create_db.sh:/docker-entrypoint-initdb.d/20-create_db.sh + # To persist your PostgreSQL databases outside of the Docker image, + # to prevent data loss, modify the following ./path_to path: + - ./path_to/postgresql:/var/lib/postgresql/data + environment: + POSTGRES_PASSWORD: itsasecret + POSTGRES_USER: dendrite + healthcheck: + test: ["CMD-SHELL", "pg_isready -U dendrite"] + interval: 5s + timeout: 5s + retries: 5 + networks: + - internal + monolith: hostname: monolith image: matrixdotorg/dendrite-monolith:latest @@ -13,8 +33,11 @@ services: volumes: - ./config:/etc/dendrite - ./media:/var/dendrite/media + depends_on: + - postgres networks: - internal + restart: unless-stopped networks: internal: diff --git a/build/docker/docker-compose.polylith.yml b/build/docker/docker-compose.polylith.yml index 9bbd6a8f7..207d0451a 100644 --- a/build/docker/docker-compose.polylith.yml +++ b/build/docker/docker-compose.polylith.yml @@ -1,13 +1,51 @@ version: "3.4" services: + postgres: + hostname: postgres + image: postgres:14 + restart: always + volumes: + - ./postgres/create_db.sh:/docker-entrypoint-initdb.d/20-create_db.sh + # To persist your PostgreSQL databases outside of the Docker image, + # to prevent data loss, modify the following ./path_to path: + - ./path_to/postgresql:/var/lib/postgresql/data + environment: + POSTGRES_PASSWORD: itsasecret + POSTGRES_USER: dendrite + healthcheck: + test: ["CMD-SHELL", "pg_isready -U dendrite"] + interval: 5s + timeout: 5s + retries: 5 + networks: + - internal + + jetstream: + hostname: jetstream + image: nats:latest + command: | + --jetstream + --store_dir /var/lib/nats + --cluster_name Dendrite + volumes: + # To persist your NATS JetStream streams outside of the Docker image, + # prevent data loss, modify the following ./path_to path: + - ./path_to/nats:/var/lib/nats + networks: + - internal + client_api: hostname: client_api image: matrixdotorg/dendrite-polylith:latest command: clientapi volumes: - ./config:/etc/dendrite + depends_on: + - jetstream + - postgres networks: - internal + restart: unless-stopped media_api: hostname: media_api @@ -18,6 +56,7 @@ services: - ./media:/var/dendrite/media networks: - internal + restart: unless-stopped sync_api: hostname: sync_api @@ -25,8 +64,12 @@ services: command: syncapi volumes: - ./config:/etc/dendrite + depends_on: + - jetstream + - postgres networks: - internal + restart: unless-stopped room_server: hostname: room_server @@ -34,8 +77,12 @@ services: command: roomserver volumes: - ./config:/etc/dendrite + depends_on: + - jetstream + - postgres networks: - internal + restart: unless-stopped edu_server: hostname: edu_server @@ -43,8 +90,11 @@ services: command: eduserver volumes: - ./config:/etc/dendrite + depends_on: + - jetstream networks: - internal + restart: unless-stopped federation_api: hostname: federation_api @@ -52,8 +102,12 @@ services: command: federationapi volumes: - ./config:/etc/dendrite + depends_on: + - jetstream + - postgres networks: - internal + restart: unless-stopped key_server: hostname: key_server @@ -61,8 +115,12 @@ services: command: keyserver volumes: - ./config:/etc/dendrite + depends_on: + - jetstream + - postgres networks: - internal + restart: unless-stopped user_api: hostname: user_api @@ -70,8 +128,12 @@ services: command: userapi volumes: - ./config:/etc/dendrite + depends_on: + - jetstream + - postgres networks: - internal + restart: unless-stopped appservice_api: hostname: appservice_api @@ -82,8 +144,11 @@ services: networks: - internal depends_on: + - jetstream + - postgres - room_server - user_api + restart: unless-stopped networks: internal: diff --git a/build/docker/postgres/create_db.sh b/build/docker/postgres/create_db.sh index a7107e272..9093738e4 100755 --- a/build/docker/postgres/create_db.sh +++ b/build/docker/postgres/create_db.sh @@ -1,5 +1,5 @@ #!/bin/sh -for db in userapi_accounts userapi_devices mediaapi syncapi roomserver keyserver federationapi appservice naffka; do +for db in userapi_accounts userapi_devices mediaapi syncapi roomserver keyserver federationapi appservice mscs; do createdb -U dendrite -O dendrite dendrite_$db done diff --git a/build/gobind-pinecone/monolith.go b/build/gobind-pinecone/monolith.go index 1f7a889d9..1c9c0ac4e 100644 --- a/build/gobind-pinecone/monolith.go +++ b/build/gobind-pinecone/monolith.go @@ -281,8 +281,7 @@ func (m *DendriteMonolith) Start() { cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk)) cfg.Global.PrivateKey = sk cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID) - cfg.Global.Kafka.UseNaffka = true - cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-naffka.db", m.StorageDirectory, prefix)) + cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("file:%s/%s", m.StorageDirectory, prefix)) cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-account.db", m.StorageDirectory, prefix)) cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-device.db", m.StorageDirectory, prefix)) cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-mediaapi.db", m.CacheDirectory, prefix)) diff --git a/build/gobind-yggdrasil/monolith.go b/build/gobind-yggdrasil/monolith.go index 582a23728..1aae418d1 100644 --- a/build/gobind-yggdrasil/monolith.go +++ b/build/gobind-yggdrasil/monolith.go @@ -86,8 +86,7 @@ func (m *DendriteMonolith) Start() { cfg.Global.ServerName = gomatrixserverlib.ServerName(ygg.DerivedServerName()) cfg.Global.PrivateKey = ygg.PrivateKey() cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID) - cfg.Global.Kafka.UseNaffka = true - cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-naffka.db", m.StorageDirectory)) + cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("file:%s/", m.StorageDirectory)) cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-account.db", m.StorageDirectory)) cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-device.db", m.StorageDirectory)) cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-mediaapi.db", m.StorageDirectory)) diff --git a/build/scripts/Complement.Dockerfile b/build/scripts/Complement.Dockerfile index de51f16da..55b381ba5 100644 --- a/build/scripts/Complement.Dockerfile +++ b/build/scripts/Complement.Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.13-stretch as build +FROM golang:1.16-stretch as build RUN apt-get update && apt-get install -y sqlite3 WORKDIR /build diff --git a/clientapi/clientapi.go b/clientapi/clientapi.go index 64a7aa5e2..7c772125a 100644 --- a/clientapi/clientapi.go +++ b/clientapi/clientapi.go @@ -26,7 +26,7 @@ import ( keyserverAPI "github.com/matrix-org/dendrite/keyserver/api" roomserverAPI "github.com/matrix-org/dendrite/roomserver/api" "github.com/matrix-org/dendrite/setup/config" - "github.com/matrix-org/dendrite/setup/kafka" + "github.com/matrix-org/dendrite/setup/jetstream" userapi "github.com/matrix-org/dendrite/userapi/api" "github.com/matrix-org/dendrite/userapi/storage/accounts" "github.com/matrix-org/gomatrixserverlib" @@ -49,11 +49,11 @@ func AddPublicRoutes( extRoomsProvider api.ExtraPublicRoomsProvider, mscCfg *config.MSCs, ) { - _, producer := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka) + js, _, _ := jetstream.Prepare(&cfg.Matrix.JetStream) syncProducer := &producers.SyncAPIProducer{ - Producer: producer, - Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputClientData), + JetStream: js, + Topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputClientData), } routing.Setup( diff --git a/clientapi/producers/syncapi.go b/clientapi/producers/syncapi.go index 6ab8eef28..bd6af5f1f 100644 --- a/clientapi/producers/syncapi.go +++ b/clientapi/producers/syncapi.go @@ -17,39 +17,42 @@ package producers import ( "encoding/json" - "github.com/Shopify/sarama" "github.com/matrix-org/dendrite/internal/eventutil" + "github.com/matrix-org/dendrite/setup/jetstream" + "github.com/nats-io/nats.go" log "github.com/sirupsen/logrus" ) // SyncAPIProducer produces events for the sync API server to consume type SyncAPIProducer struct { - Topic string - Producer sarama.SyncProducer + Topic string + JetStream nats.JetStreamContext } // SendData sends account data to the sync API server func (p *SyncAPIProducer) SendData(userID string, roomID string, dataType string) error { - var m sarama.ProducerMessage + m := &nats.Msg{ + Subject: p.Topic, + Header: nats.Header{}, + } + m.Header.Set(jetstream.UserID, userID) data := eventutil.AccountData{ RoomID: roomID, Type: dataType, } - value, err := json.Marshal(data) + var err error + m.Data, err = json.Marshal(data) if err != nil { return err } - m.Topic = string(p.Topic) - m.Key = sarama.StringEncoder(userID) - m.Value = sarama.ByteEncoder(value) log.WithFields(log.Fields{ "user_id": userID, "room_id": roomID, "data_type": dataType, }).Infof("Producing to topic '%s'", p.Topic) - _, _, err = p.Producer.SendMessage(&m) + _, err = p.JetStream.PublishMsg(m) return err } diff --git a/clientapi/routing/createroom.go b/clientapi/routing/createroom.go index 8f96c3d35..e89d8ff24 100644 --- a/clientapi/routing/createroom.go +++ b/clientapi/routing/createroom.go @@ -451,22 +451,20 @@ func createRoom( util.GetLogger(req.Context()).WithError(err).Error("authEvents.AddEvent failed") return jsonerror.InternalServerError() } + } - accumulated := gomatrixserverlib.UnwrapEventHeaders(builtEvents) - if err = roomserverAPI.SendEventWithState( - req.Context(), - rsAPI, - roomserverAPI.KindNew, - &gomatrixserverlib.RespState{ - StateEvents: accumulated, - AuthEvents: accumulated, - }, - ev.Headered(roomVersion), - nil, - ); err != nil { - util.GetLogger(req.Context()).WithError(err).Error("SendEventWithState failed") - return jsonerror.InternalServerError() - } + inputs := make([]roomserverAPI.InputRoomEvent, 0, len(builtEvents)) + for _, event := range builtEvents { + inputs = append(inputs, roomserverAPI.InputRoomEvent{ + Kind: roomserverAPI.KindNew, + Event: event, + Origin: cfg.Matrix.ServerName, + SendAsServer: roomserverAPI.DoNotSendToOtherServers, + }) + } + if err = roomserverAPI.SendInputRoomEvents(req.Context(), rsAPI, inputs, false); err != nil { + util.GetLogger(req.Context()).WithError(err).Error("roomserverAPI.SendInputRoomEvents failed") + return jsonerror.InternalServerError() } // TODO(#269): Reserve room alias while we create the room. This stops us diff --git a/clientapi/routing/membership.go b/clientapi/routing/membership.go index 33fb38831..4ce820797 100644 --- a/clientapi/routing/membership.go +++ b/clientapi/routing/membership.go @@ -109,7 +109,9 @@ func sendMembership(ctx context.Context, accountDB accounts.Database, device *us roomserverAPI.KindNew, []*gomatrixserverlib.HeaderedEvent{event.Event.Headered(roomVer)}, cfg.Matrix.ServerName, + cfg.Matrix.ServerName, nil, + false, ); err != nil { util.GetLogger(ctx).WithError(err).Error("SendEvents failed") return jsonerror.InternalServerError() diff --git a/clientapi/routing/profile.go b/clientapi/routing/profile.go index 7bea35e50..017facd20 100644 --- a/clientapi/routing/profile.go +++ b/clientapi/routing/profile.go @@ -169,7 +169,7 @@ func SetAvatarURL( return jsonerror.InternalServerError() } - if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, nil); err != nil { + if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, cfg.Matrix.ServerName, nil, false); err != nil { util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed") return jsonerror.InternalServerError() } @@ -286,7 +286,7 @@ func SetDisplayName( return jsonerror.InternalServerError() } - if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, nil); err != nil { + if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, cfg.Matrix.ServerName, nil, false); err != nil { util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed") return jsonerror.InternalServerError() } diff --git a/clientapi/routing/redaction.go b/clientapi/routing/redaction.go index c25ca4eff..01ea818ab 100644 --- a/clientapi/routing/redaction.go +++ b/clientapi/routing/redaction.go @@ -120,7 +120,7 @@ func SendRedaction( JSON: jsonerror.NotFound("Room does not exist"), } } - if err = roomserverAPI.SendEvents(context.Background(), rsAPI, roomserverAPI.KindNew, []*gomatrixserverlib.HeaderedEvent{e}, cfg.Matrix.ServerName, nil); err != nil { + if err = roomserverAPI.SendEvents(context.Background(), rsAPI, roomserverAPI.KindNew, []*gomatrixserverlib.HeaderedEvent{e}, cfg.Matrix.ServerName, cfg.Matrix.ServerName, nil, false); err != nil { util.GetLogger(req.Context()).WithError(err).Errorf("failed to SendEvents") return jsonerror.InternalServerError() } diff --git a/clientapi/routing/sendevent.go b/clientapi/routing/sendevent.go index 204d2592a..606107b9f 100644 --- a/clientapi/routing/sendevent.go +++ b/clientapi/routing/sendevent.go @@ -121,7 +121,9 @@ func SendEvent( e.Headered(verRes.RoomVersion), }, cfg.Matrix.ServerName, + cfg.Matrix.ServerName, txnAndSessionID, + false, ); err != nil { util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed") return jsonerror.InternalServerError() diff --git a/clientapi/threepid/invites.go b/clientapi/threepid/invites.go index 53cd6b8ca..db62ce060 100644 --- a/clientapi/threepid/invites.go +++ b/clientapi/threepid/invites.go @@ -366,6 +366,8 @@ func emit3PIDInviteEvent( event.Headered(queryRes.RoomVersion), }, cfg.Matrix.ServerName, + cfg.Matrix.ServerName, nil, + false, ) } diff --git a/cmd/dendrite-demo-libp2p/main.go b/cmd/dendrite-demo-libp2p/main.go index 5f26e00c6..7cbd0b6d4 100644 --- a/cmd/dendrite-demo-libp2p/main.go +++ b/cmd/dendrite-demo-libp2p/main.go @@ -123,8 +123,8 @@ func main() { cfg.Global.ServerName = "p2p" cfg.Global.PrivateKey = privKey cfg.Global.KeyID = gomatrixserverlib.KeyID(fmt.Sprintf("ed25519:%s", *instanceName)) - cfg.Global.Kafka.UseNaffka = true cfg.FederationAPI.FederationMaxRetries = 6 + cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", *instanceName)) cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", *instanceName)) cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-device.db", *instanceName)) cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", *instanceName)) @@ -132,7 +132,6 @@ func main() { cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-roomserver.db", *instanceName)) cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationapi.db", *instanceName)) cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-appservice.db", *instanceName)) - cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-naffka.db", *instanceName)) cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-e2ekey.db", *instanceName)) cfg.MSCs.MSCs = []string{"msc2836"} cfg.MSCs.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mscs.db", *instanceName)) diff --git a/cmd/dendrite-demo-pinecone/main.go b/cmd/dendrite-demo-pinecone/main.go index 180f8ae02..a897dcd1a 100644 --- a/cmd/dendrite-demo-pinecone/main.go +++ b/cmd/dendrite-demo-pinecone/main.go @@ -158,7 +158,7 @@ func main() { cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk)) cfg.Global.PrivateKey = sk cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID) - cfg.Global.Kafka.UseNaffka = true + cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", *instanceName)) cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", *instanceName)) cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-device.db", *instanceName)) cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", *instanceName)) @@ -167,7 +167,6 @@ func main() { cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-keyserver.db", *instanceName)) cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationapi.db", *instanceName)) cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-appservice.db", *instanceName)) - cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-naffka.db", *instanceName)) cfg.MSCs.MSCs = []string{"msc2836", "msc2946"} if err := cfg.Derive(); err != nil { panic(err) diff --git a/cmd/dendrite-demo-yggdrasil/main.go b/cmd/dendrite-demo-yggdrasil/main.go index b8ac3f726..52e69ee59 100644 --- a/cmd/dendrite-demo-yggdrasil/main.go +++ b/cmd/dendrite-demo-yggdrasil/main.go @@ -77,7 +77,7 @@ func main() { cfg.Global.ServerName = gomatrixserverlib.ServerName(ygg.DerivedServerName()) cfg.Global.PrivateKey = ygg.PrivateKey() cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID) - cfg.Global.Kafka.UseNaffka = true + cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", *instanceName)) cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", *instanceName)) cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-device.db", *instanceName)) cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", *instanceName)) @@ -86,7 +86,6 @@ func main() { cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-keyserver.db", *instanceName)) cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationapi.db", *instanceName)) cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-appservice.db", *instanceName)) - cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-naffka.db", *instanceName)) cfg.MSCs.MSCs = []string{"msc2836"} cfg.MSCs.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mscs.db", *instanceName)) if err = cfg.Derive(); err != nil { diff --git a/cmd/dendrite-monolith-server/main.go b/cmd/dendrite-monolith-server/main.go index 088517343..4d0598f3f 100644 --- a/cmd/dendrite-monolith-server/main.go +++ b/cmd/dendrite-monolith-server/main.go @@ -99,10 +99,6 @@ func main() { } keyRing := fsAPI.KeyRing() - // The underlying roomserver implementation needs to be able to call the fedsender. - // This is different to rsAPI which can be the http client which doesn't need this dependency - rsImpl.SetFederationAPI(fsAPI, keyRing) - keyImpl := keyserver.NewInternalAPI(base, &base.Cfg.KeyServer, fsAPI) keyAPI := keyImpl if base.UseHTTPAPIs { diff --git a/cmd/dendrite-polylith-multi/personalities/federationapi.go b/cmd/dendrite-polylith-multi/personalities/federationapi.go index c50973793..44357d660 100644 --- a/cmd/dendrite-polylith-multi/personalities/federationapi.go +++ b/cmd/dendrite-polylith-multi/personalities/federationapi.go @@ -23,10 +23,10 @@ import ( func FederationAPI(base *basepkg.BaseDendrite, cfg *config.Dendrite) { userAPI := base.UserAPIClient() federation := base.CreateFederationClient() - fsAPI := base.FederationAPIHTTPClient() - keyRing := fsAPI.KeyRing() rsAPI := base.RoomserverHTTPClient() keyAPI := base.KeyServerHTTPClient() + fsAPI := federationapi.NewInternalAPI(base, federation, rsAPI, base.Caches, nil, true) + keyRing := fsAPI.KeyRing() federationapi.AddPublicRoutes( base.PublicFederationAPIMux, base.PublicKeyAPIMux, base.PublicWellKnownAPIMux, @@ -35,8 +35,7 @@ func FederationAPI(base *basepkg.BaseDendrite, cfg *config.Dendrite) { &base.Cfg.MSCs, nil, ) - intAPI := federationapi.NewInternalAPI(base, federation, rsAPI, base.Caches, nil, true) - federationapi.AddInternalRoutes(base.InternalAPIMux, intAPI) + federationapi.AddInternalRoutes(base.InternalAPIMux, fsAPI) base.SetupAndServeHTTP( base.Cfg.FederationAPI.InternalAPI.Listen, diff --git a/cmd/dendrite-upgrade-tests/main.go b/cmd/dendrite-upgrade-tests/main.go index aa8c7fdce..3241234ac 100644 --- a/cmd/dendrite-upgrade-tests/main.go +++ b/cmd/dendrite-upgrade-tests/main.go @@ -48,7 +48,7 @@ const HEAD = "HEAD" // due to the error: // When using COPY with more than one source file, the destination must be a directory and end with a / // We need to run a postgres anyway, so use the dockerfile associated with Complement instead. -const Dockerfile = `FROM golang:1.13-stretch as build +const Dockerfile = `FROM golang:1.16-stretch as build RUN apt-get update && apt-get install -y postgresql WORKDIR /build @@ -189,7 +189,9 @@ func buildDendrite(httpClient *http.Client, dockerClient *client.Client, tmpDir, if err := decoder.Decode(&dl); err != nil { return "", fmt.Errorf("failed to decode build image output line: %w", err) } - log.Printf("%s: %s", branchOrTagName, dl.Stream) + if len(strings.TrimSpace(dl.Stream)) > 0 { + log.Printf("%s: %s", branchOrTagName, dl.Stream) + } if dl.Aux != nil { imgID, ok := dl.Aux["ID"] if ok { @@ -425,8 +427,10 @@ func cleanup(dockerClient *client.Client) { // ignore all errors, we are just cleaning up and don't want to fail just because we fail to cleanup containers, _ := dockerClient.ContainerList(context.Background(), types.ContainerListOptions{ Filters: label(dendriteUpgradeTestLabel), + All: true, }) for _, c := range containers { + log.Printf("Removing container: %v %v\n", c.ID, c.Names) s := time.Second _ = dockerClient.ContainerStop(context.Background(), c.ID, &s) _ = dockerClient.ContainerRemove(context.Background(), c.ID, types.ContainerRemoveOptions{ diff --git a/cmd/dendritejs-pinecone/main.go b/cmd/dendritejs-pinecone/main.go index d83e9ce0a..62eea78f2 100644 --- a/cmd/dendritejs-pinecone/main.go +++ b/cmd/dendritejs-pinecone/main.go @@ -170,8 +170,7 @@ func startup() { cfg.RoomServer.Database.ConnectionString = "file:/idb/dendritejs_roomserver.db" cfg.SyncAPI.Database.ConnectionString = "file:/idb/dendritejs_syncapi.db" cfg.KeyServer.Database.ConnectionString = "file:/idb/dendritejs_e2ekey.db" - cfg.Global.Kafka.UseNaffka = true - cfg.Global.Kafka.Database.ConnectionString = "file:/idb/dendritejs_naffka.db" + cfg.Global.JetStream.StoragePath = "file:/idb/dendritejs/" cfg.Global.TrustedIDServers = []string{} cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID) cfg.Global.PrivateKey = sk diff --git a/cmd/dendritejs/main.go b/cmd/dendritejs/main.go index 13a0872d0..59de07cd0 100644 --- a/cmd/dendritejs/main.go +++ b/cmd/dendritejs/main.go @@ -173,8 +173,7 @@ func main() { cfg.RoomServer.Database.ConnectionString = "file:/idb/dendritejs_roomserver.db" cfg.SyncAPI.Database.ConnectionString = "file:/idb/dendritejs_syncapi.db" cfg.KeyServer.Database.ConnectionString = "file:/idb/dendritejs_e2ekey.db" - cfg.Global.Kafka.UseNaffka = true - cfg.Global.Kafka.Database.ConnectionString = "file:/idb/dendritejs_naffka.db" + cfg.Global.JetStream.StoragePath = "file:/idb/dendritejs/" cfg.Global.TrustedIDServers = []string{ "matrix.org", "vector.im", } diff --git a/cmd/generate-config/main.go b/cmd/generate-config/main.go index ff91e753f..a79470d83 100644 --- a/cmd/generate-config/main.go +++ b/cmd/generate-config/main.go @@ -16,13 +16,14 @@ func main() { dbURI := flag.String("db", "", "The DB URI to use for all components if not SQLite files") flag.Parse() - cfg := &config.Dendrite{} + cfg := &config.Dendrite{ + Version: config.Version, + } cfg.Defaults(true) if *serverName != "" { cfg.Global.ServerName = gomatrixserverlib.ServerName(*serverName) } if *dbURI != "" { - cfg.Global.Kafka.Database.ConnectionString = config.DataSource(*dbURI) cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(*dbURI) cfg.FederationAPI.Database.ConnectionString = config.DataSource(*dbURI) cfg.KeyServer.Database.ConnectionString = config.DataSource(*dbURI) @@ -87,7 +88,9 @@ func main() { cfg.FederationAPI.KeyPerspectives = config.KeyPerspectives{} cfg.MSCs.MSCs = []string{"msc2836", "msc2946", "msc2444", "msc2753"} cfg.Logging[0].Level = "trace" + cfg.Logging[0].Type = "std" cfg.UserAPI.BCryptCost = bcrypt.MinCost + cfg.Global.JetStream.InMemory = true } j, err := yaml.Marshal(cfg) diff --git a/dendrite-config.yaml b/dendrite-config.yaml index bf3811a28..38b146d70 100644 --- a/dendrite-config.yaml +++ b/dendrite-config.yaml @@ -28,7 +28,7 @@ # connection can be idle in seconds - a negative value is unlimited. # The version of the configuration file. -version: 1 +version: 2 # Global Matrix configuration. This configuration applies to all components. global: @@ -68,35 +68,28 @@ global: # to other servers and the federation API will not be exposed. disable_federation: false - # Configuration for Kafka/Naffka. - kafka: - # List of Kafka broker addresses to connect to. This is not needed if using - # Naffka in monolith mode. + # Configuration for NATS JetStream + jetstream: + # A list of NATS Server addresses to connect to. If none are specified, an + # internal NATS server will be started automatically when running Dendrite + # in monolith mode. It is required to specify the address of at least one + # NATS Server node if running in polylith mode. addresses: - - localhost:2181 + # - localhost:4222 - # The prefix to use for Kafka topic names for this homeserver. Change this only if - # you are running more than one Dendrite homeserver on the same Kafka deployment. + # Keep all NATS streams in memory, rather than persisting it to the storage + # path below. This option is present primarily for integration testing and + # should not be used on a real world Dendrite deployment. + in_memory: false + + # Persistent directory to store JetStream streams in. This directory + # should be preserved across Dendrite restarts. + storage_path: ./ + + # The prefix to use for stream names for this homeserver - really only + # useful if running more than one Dendrite on the same NATS deployment. topic_prefix: Dendrite - # Whether to use Naffka instead of Kafka. This is only available in monolith - # mode, but means that you can run a single-process server without requiring - # Kafka. - use_naffka: true - - # The max size a Kafka message is allowed to use. - # You only need to change this value, if you encounter issues with too large messages. - # Must be less than/equal to "max.message.bytes" configured in Kafka. - # Defaults to 8388608 bytes. - # max_message_bytes: 8388608 - - # Naffka database options. Not required when using Kafka. - naffka_database: - connection_string: file:naffka.db - max_open_conns: 10 - max_idle_conns: 2 - conn_max_lifetime: -1 - # Configuration for Prometheus metric collection. metrics: # Whether or not Prometheus metrics are enabled. @@ -121,8 +114,8 @@ global: # Configuration for the Appservice API. app_service_api: internal_api: - listen: http://localhost:7777 - connect: http://localhost:7777 + listen: http://localhost:7777 # Only used in polylith deployments + connect: http://localhost:7777 # Only used in polylith deployments database: connection_string: file:appservice.db max_open_conns: 10 @@ -140,8 +133,8 @@ app_service_api: # Configuration for the Client API. client_api: internal_api: - listen: http://localhost:7771 - connect: http://localhost:7771 + listen: http://localhost:7771 # Only used in polylith deployments + connect: http://localhost:7771 # Only used in polylith deployments external_api: listen: http://[::]:8071 @@ -181,14 +174,14 @@ client_api: # Configuration for the EDU server. edu_server: internal_api: - listen: http://localhost:7778 - connect: http://localhost:7778 + listen: http://localhost:7778 # Only used in polylith deployments + connect: http://localhost:7778 # Only used in polylith deployments # Configuration for the Federation API. federation_api: internal_api: - listen: http://localhost:7772 - connect: http://localhost:7772 + listen: http://localhost:7772 # Only used in polylith deployments + connect: http://localhost:7772 # Only used in polylith deployments external_api: listen: http://[::]:8072 database: @@ -237,8 +230,8 @@ federation_api: # Configuration for the Key Server (for end-to-end encryption). key_server: internal_api: - listen: http://localhost:7779 - connect: http://localhost:7779 + listen: http://localhost:7779 # Only used in polylith deployments + connect: http://localhost:7779 # Only used in polylith deployments database: connection_string: file:keyserver.db max_open_conns: 10 @@ -248,8 +241,8 @@ key_server: # Configuration for the Media API. media_api: internal_api: - listen: http://localhost:7774 - connect: http://localhost:7774 + listen: http://localhost:7774 # Only used in polylith deployments + connect: http://localhost:7774 # Only used in polylith deployments external_api: listen: http://[::]:8074 database: @@ -300,8 +293,8 @@ mscs: # Configuration for the Room Server. room_server: internal_api: - listen: http://localhost:7770 - connect: http://localhost:7770 + listen: http://localhost:7770 # Only used in polylith deployments + connect: http://localhost:7770 # Only used in polylith deployments database: connection_string: file:roomserver.db max_open_conns: 10 @@ -311,8 +304,8 @@ room_server: # Configuration for the Sync API. sync_api: internal_api: - listen: http://localhost:7773 - connect: http://localhost:7773 + listen: http://localhost:7773 # Only used in polylith deployments + connect: http://localhost:7773 # Only used in polylith deployments external_api: listen: http://[::]:8073 database: @@ -336,8 +329,8 @@ user_api: # This value can be low if performing tests or on embedded Dendrite instances (e.g WASM builds) # bcrypt_cost: 10 internal_api: - listen: http://localhost:7781 - connect: http://localhost:7781 + listen: http://localhost:7781 # Only used in polylith deployments + connect: http://localhost:7781 # Only used in polylith deployments account_database: connection_string: file:userapi_accounts.db max_open_conns: 10 @@ -375,6 +368,7 @@ logging: - type: std level: info - type: file + # The logging level, must be one of debug, info, warn, error, fatal, panic. level: info params: path: ./logs diff --git a/docs/FAQ.md b/docs/FAQ.md index 37c6b34c5..149efe619 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -39,6 +39,14 @@ No, not yet. This is a planned feature. Possibly - Dendrite does have some application service support but it is not well tested. Please let us know by raising a GitHub issue if you try it and run into problems. +Bridges known to work (as of v0.5.1): +- [Telegram](https://docs.mau.fi/bridges/python/telegram/index.html) +- [WhatsApp](https://docs.mau.fi/bridges/go/whatsapp/index.html) +- [Signal](https://docs.mau.fi/bridges/python/signal/index.html) +- [probably all other mautrix bridges](https://docs.mau.fi/bridges/) + +Remember to add the config file(s) to the `app_service_api` [config](https://github.com/matrix-org/dendrite/blob/de38be469a23813921d01bef3e14e95faab2a59e/dendrite-config.yaml#L130-L131). + ### Is it possible to prevent communication with the outside world? Yes, you can do this by disabling federation - set `disable_federation` to `true` in the `global` section of the Dendrite configuration file. diff --git a/docs/INSTALL.md b/docs/INSTALL.md index 1752d7e89..2afb43c6a 100644 --- a/docs/INSTALL.md +++ b/docs/INSTALL.md @@ -2,21 +2,23 @@ Dendrite can be run in one of two configurations: -* **Polylith mode**: A cluster of individual components, dealing with different - aspects of the Matrix protocol (see [WIRING.md](WIRING-Current.md)). Components communicate - with each other using internal HTTP APIs and [Apache Kafka](https://kafka.apache.org). - This will almost certainly be the preferred model for large-scale deployments. - * **Monolith mode**: All components run in the same process. In this mode, - Kafka is completely optional and can instead be replaced with an in-process - lightweight implementation called [Naffka](https://github.com/matrix-org/naffka). This - will usually be the preferred model for low-volume, low-user or experimental deployments. + it is possible to run an in-process [NATS Server](https://github.com/nats-io/nats-server) + instead of running a standalone deployment. This will usually be the preferred model for + low-to-mid volume deployments, providing the best balance between performance and resource usage. -For most deployments, it is **recommended to run in monolith mode with PostgreSQL databases**. +* **Polylith mode**: A cluster of individual components running in their own processes, dealing + with different aspects of the Matrix protocol (see [WIRING.md](WIRING-Current.md)). Components + communicate with each other using internal HTTP APIs and [NATS Server](https://github.com/nats-io/nats-server). + This will almost certainly be the preferred model for very large deployments but scalability + comes with a cost. API calls are expensive and therefore a polylith deployment may end up using + disproportionately more resources for a smaller number of users compared to a monolith deployment. + +In almost all cases, it is **recommended to run in monolith mode with PostgreSQL databases**. Regardless of whether you are running in polylith or monolith mode, each Dendrite component that -requires storage has its own database. Both Postgres and SQLite are supported and can be -mixed-and-matched across components as needed in the configuration file. +requires storage has its own database connections. Both Postgres and SQLite are supported and can +be mixed-and-matched across components as needed in the configuration file. Be advised that Dendrite is still in development and it's not recommended for use in production environments just yet! @@ -26,13 +28,11 @@ use in production environments just yet! Dendrite requires: * Go 1.15 or higher -* Postgres 9.6 or higher (if using Postgres databases, not needed for SQLite) +* PostgreSQL 12 or higher (if using PostgreSQL databases, not needed for SQLite) If you want to run a polylith deployment, you also need: -* Apache Kafka 0.10.2+ - -Please note that Kafka is **not required** for a monolith deployment. +* A standalone [NATS Server](https://github.com/nats-io/nats-server) deployment with JetStream enabled ## Building Dendrite @@ -49,40 +49,18 @@ Then build it: ./build.sh ``` -## Install Kafka (polylith only) +## Install NATS Server -Install and start Kafka (c.f. [scripts/install-local-kafka.sh](scripts/install-local-kafka.sh)): +Follow the [NATS Server installation instructions](https://docs.nats.io/running-a-nats-service/introduction/installation) and then [start your NATS deployment](https://docs.nats.io/running-a-nats-service/introduction/running). -```bash -KAFKA_URL=http://archive.apache.org/dist/kafka/2.1.0/kafka_2.11-2.1.0.tgz - -# Only download the kafka if it isn't already downloaded. -test -f kafka.tgz || wget $KAFKA_URL -O kafka.tgz -# Unpack the kafka over the top of any existing installation -mkdir -p kafka && tar xzf kafka.tgz -C kafka --strip-components 1 - -# Start the zookeeper running in the background. -# By default the zookeeper listens on localhost:2181 -kafka/bin/zookeeper-server-start.sh -daemon kafka/config/zookeeper.properties - -# Start the kafka server running in the background. -# By default the kafka listens on localhost:9092 -kafka/bin/kafka-server-start.sh -daemon kafka/config/server.properties -``` - -On macOS, you can use [Homebrew](https://brew.sh/) for easier setup of Kafka: - -```bash -brew install kafka -brew services start zookeeper -brew services start kafka -``` +JetStream must be enabled, either by passing the `-js` flag to `nats-server`, +or by specifying the `store_dir` option in the the `jetstream` configuration. ## Configuration ### PostgreSQL database setup -Assuming that PostgreSQL 9.6 (or later) is installed: +Assuming that PostgreSQL 12 (or later) is installed: * Create role, choosing a new password when prompted: @@ -109,7 +87,7 @@ On macOS, omit `sudo -u postgres` from the below commands. * If you want to run each Dendrite component with its own database: ```bash - for i in mediaapi syncapi roomserver signingkeyserver federationsender appservice keyserver userapi_accounts userapi_devices naffka; do + for i in mediaapi syncapi roomserver federationapi appservice keyserver userapi_accounts userapi_devices; do sudo -u postgres createdb -O dendrite dendrite_$i done ``` @@ -163,7 +141,11 @@ Create config file, based on `dendrite-config.yaml`. Call it `dendrite.yaml`. Th * `postgres://dendrite:password@localhost/dendrite_userapi_account?sslmode=disable` to connect to PostgreSQL without SSL/TLS * For SQLite on disk: `file:component.db` or `file:///path/to/component.db`, e.g. `file:userapi_account.db` * Postgres and SQLite can be mixed and matched on different components as desired. -* The `use_naffka` option if using Naffka in a monolith deployment +* Either one of the following in the `jetstream` configuration section: + * The `addresses` option — a list of one or more addresses of an external standalone + NATS Server deployment + * The `storage_path` — where on the filesystem the built-in NATS server should + store durable queues, if using the built-in NATS server There are other options which may be useful so review them all. In particular, if you are trying to federate from your Dendrite instance into public rooms @@ -177,11 +159,6 @@ using SQLite, all components **MUST** use their own database file. ## Starting a monolith server -It is possible to use Naffka as an in-process replacement to Kafka when using -the monolith server. To do this, set `use_naffka: true` in your `dendrite.yaml` -configuration and uncomment the relevant Naffka line in the `database` section. -Be sure to update the database username and password if needed. - The monolith server can be started as shown below. By default it listens for HTTP connections on port 8008, so you can configure your Matrix client to use `http://servername:8008` as the server: @@ -197,6 +174,10 @@ for HTTPS connections on port 8448: ./bin/dendrite-monolith-server --tls-cert=server.crt --tls-key=server.key ``` +If the `jetstream` section of the configuration contains no `addresses` but does +contain a `store_dir`, Dendrite will start up a built-in NATS JetStream node +automatically, eliminating the need to run a separate NATS server. + ## Starting a polylith deployment The following contains scripts which will run all the required processes in order to point a Matrix client at Dendrite. @@ -263,15 +244,6 @@ This is what implements the room DAG. Clients do not talk to this. ./bin/dendrite-polylith-multi --config=dendrite.yaml roomserver ``` -#### Federation sender - -This sends events from our users to other servers. This is only required if -you want to support federation. - -```bash -./bin/dendrite-polylith-multi --config=dendrite.yaml federationsender -``` - #### Appservice server This sends events from the network to [application @@ -291,14 +263,6 @@ This manages end-to-end encryption keys for users. ./bin/dendrite-polylith-multi --config=dendrite.yaml keyserver ``` -#### Signing key server - -This manages signing keys for servers. - -```bash -./bin/dendrite-polylith-multi --config=dendrite.yaml signingkeyserver -``` - #### EDU server This manages processing EDUs such as typing, send-to-device events and presence. Clients do not talk to diff --git a/docs/nginx/monolith-sample.conf b/docs/nginx/monolith-sample.conf index 0344aa96c..360eb9255 100644 --- a/docs/nginx/monolith-sample.conf +++ b/docs/nginx/monolith-sample.conf @@ -1,3 +1,7 @@ +#change IP to location of monolith server +upstream monolith{ + server 127.0.0.1:8008; +} server { listen 443 ssl; # IPv4 listen [::]:443 ssl; # IPv6 @@ -23,6 +27,6 @@ server { } location /_matrix { - proxy_pass http://monolith:8008; + proxy_pass http://monolith; } } diff --git a/eduserver/eduserver.go b/eduserver/eduserver.go index 97831f2bd..db03001ba 100644 --- a/eduserver/eduserver.go +++ b/eduserver/eduserver.go @@ -23,8 +23,7 @@ import ( "github.com/matrix-org/dendrite/eduserver/input" "github.com/matrix-org/dendrite/eduserver/inthttp" "github.com/matrix-org/dendrite/setup/base" - "github.com/matrix-org/dendrite/setup/config" - "github.com/matrix-org/dendrite/setup/kafka" + "github.com/matrix-org/dendrite/setup/jetstream" userapi "github.com/matrix-org/dendrite/userapi/api" ) @@ -43,16 +42,16 @@ func NewInternalAPI( ) api.EDUServerInputAPI { cfg := &base.Cfg.EDUServer - _, producer := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka) + js, _, _ := jetstream.Prepare(&cfg.Matrix.JetStream) return &input.EDUServerInputAPI{ Cache: eduCache, UserAPI: userAPI, - Producer: producer, - OutputTypingEventTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent), - OutputSendToDeviceEventTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent), - OutputReceiptEventTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputReceiptEvent), - OutputKeyChangeEventTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent), + JetStream: js, + OutputTypingEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputTypingEvent), + OutputSendToDeviceEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputSendToDeviceEvent), + OutputReceiptEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputReceiptEvent), + OutputKeyChangeEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent), ServerName: cfg.Matrix.ServerName, } } diff --git a/eduserver/input/input.go b/eduserver/input/input.go index bdc243745..e7501a907 100644 --- a/eduserver/input/input.go +++ b/eduserver/input/input.go @@ -21,12 +21,12 @@ import ( "encoding/json" "time" - "github.com/Shopify/sarama" "github.com/matrix-org/dendrite/eduserver/api" "github.com/matrix-org/dendrite/eduserver/cache" keyapi "github.com/matrix-org/dendrite/keyserver/api" userapi "github.com/matrix-org/dendrite/userapi/api" "github.com/matrix-org/gomatrixserverlib" + "github.com/nats-io/nats.go" "github.com/sirupsen/logrus" ) @@ -43,7 +43,7 @@ type EDUServerInputAPI struct { // The kafka topic to output new key change events to OutputKeyChangeEventTopic string // kafka producer - Producer sarama.SyncProducer + JetStream nats.JetStreamContext // Internal user query API UserAPI userapi.UserInternalAPI // our server name @@ -100,13 +100,11 @@ func (t *EDUServerInputAPI) InputCrossSigningKeyUpdate( "user_id": request.UserID, }).Infof("Producing to topic '%s'", t.OutputKeyChangeEventTopic) - m := &sarama.ProducerMessage{ - Topic: string(t.OutputKeyChangeEventTopic), - Key: sarama.StringEncoder(request.UserID), - Value: sarama.ByteEncoder(eventJSON), - } - - _, _, err = t.Producer.SendMessage(m) + _, err = t.JetStream.PublishMsg(&nats.Msg{ + Subject: t.OutputKeyChangeEventTopic, + Header: nats.Header{}, + Data: eventJSON, + }) return err } @@ -138,13 +136,11 @@ func (t *EDUServerInputAPI) sendTypingEvent(ite *api.InputTypingEvent) error { "typing": ite.Typing, }).Infof("Producing to topic '%s'", t.OutputTypingEventTopic) - m := &sarama.ProducerMessage{ - Topic: string(t.OutputTypingEventTopic), - Key: sarama.StringEncoder(ite.RoomID), - Value: sarama.ByteEncoder(eventJSON), - } - - _, _, err = t.Producer.SendMessage(m) + _, err = t.JetStream.PublishMsg(&nats.Msg{ + Subject: t.OutputTypingEventTopic, + Header: nats.Header{}, + Data: eventJSON, + }) return err } @@ -193,14 +189,10 @@ func (t *EDUServerInputAPI) sendToDeviceEvent(ise *api.InputSendToDeviceEvent) e return err } - m := &sarama.ProducerMessage{ - Topic: string(t.OutputSendToDeviceEventTopic), - Key: sarama.StringEncoder(ote.UserID), - Value: sarama.ByteEncoder(eventJSON), - } - - _, _, err = t.Producer.SendMessage(m) - if err != nil { + if _, err = t.JetStream.PublishMsg(&nats.Msg{ + Subject: t.OutputSendToDeviceEventTopic, + Data: eventJSON, + }); err != nil { logrus.WithError(err).Error("sendToDevice failed t.Producer.SendMessage") return err } @@ -228,11 +220,10 @@ func (t *EDUServerInputAPI) InputReceiptEvent( if err != nil { return err } - m := &sarama.ProducerMessage{ - Topic: t.OutputReceiptEventTopic, - Key: sarama.StringEncoder(request.InputReceiptEvent.RoomID + ":" + request.InputReceiptEvent.UserID), - Value: sarama.ByteEncoder(js), - } - _, _, err = t.Producer.SendMessage(m) + + _, err = t.JetStream.PublishMsg(&nats.Msg{ + Subject: t.OutputReceiptEventTopic, + Data: js, + }) return err } diff --git a/federationapi/api/api.go b/federationapi/api/api.go index 5d4eb8848..f5ee75b4b 100644 --- a/federationapi/api/api.go +++ b/federationapi/api/api.go @@ -23,6 +23,8 @@ type FederationClient interface { MSC2836EventRelationships(ctx context.Context, dst gomatrixserverlib.ServerName, r gomatrixserverlib.MSC2836EventRelationshipsRequest, roomVersion gomatrixserverlib.RoomVersion) (res gomatrixserverlib.MSC2836EventRelationshipsResponse, err error) MSC2946Spaces(ctx context.Context, dst gomatrixserverlib.ServerName, roomID string, r gomatrixserverlib.MSC2946SpacesRequest) (res gomatrixserverlib.MSC2946SpacesResponse, err error) LookupServerKeys(ctx context.Context, s gomatrixserverlib.ServerName, keyRequests map[gomatrixserverlib.PublicKeyLookupRequest]gomatrixserverlib.Timestamp) ([]gomatrixserverlib.ServerKeys, error) + GetEventAuth(ctx context.Context, s gomatrixserverlib.ServerName, roomVersion gomatrixserverlib.RoomVersion, roomID, eventID string) (res gomatrixserverlib.RespEventAuth, err error) + LookupMissingEvents(ctx context.Context, s gomatrixserverlib.ServerName, roomID string, missing gomatrixserverlib.MissingEvents, roomVersion gomatrixserverlib.RoomVersion) (res gomatrixserverlib.RespMissingEvents, err error) } // FederationClientError is returned from FederationClient methods in the event of a problem. @@ -186,7 +188,8 @@ type PerformServersAliveResponse struct { // QueryJoinedHostServerNamesInRoomRequest is a request to QueryJoinedHostServerNames type QueryJoinedHostServerNamesInRoomRequest struct { - RoomID string `json:"room_id"` + RoomID string `json:"room_id"` + ExcludeSelf bool `json:"exclude_self"` } // QueryJoinedHostServerNamesInRoomResponse is a response to QueryJoinedHostServerNames diff --git a/federationapi/consumers/eduserver.go b/federationapi/consumers/eduserver.go index 56ec9eaf8..c3e5b4d49 100644 --- a/federationapi/consumers/eduserver.go +++ b/federationapi/consumers/eduserver.go @@ -17,233 +17,238 @@ package consumers import ( "context" "encoding/json" - "fmt" - "github.com/Shopify/sarama" "github.com/matrix-org/dendrite/eduserver/api" "github.com/matrix-org/dendrite/federationapi/queue" "github.com/matrix-org/dendrite/federationapi/storage" - "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/setup/config" + "github.com/matrix-org/dendrite/setup/jetstream" "github.com/matrix-org/dendrite/setup/process" "github.com/matrix-org/gomatrixserverlib" "github.com/matrix-org/util" + "github.com/nats-io/nats.go" log "github.com/sirupsen/logrus" ) // OutputEDUConsumer consumes events that originate in EDU server. type OutputEDUConsumer struct { - typingConsumer *internal.ContinualConsumer - sendToDeviceConsumer *internal.ContinualConsumer - receiptConsumer *internal.ContinualConsumer - db storage.Database - queues *queue.OutgoingQueues - ServerName gomatrixserverlib.ServerName - TypingTopic string - SendToDeviceTopic string + ctx context.Context + jetstream nats.JetStreamContext + durable nats.SubOpt + db storage.Database + queues *queue.OutgoingQueues + ServerName gomatrixserverlib.ServerName + typingTopic string + sendToDeviceTopic string + receiptTopic string } // NewOutputEDUConsumer creates a new OutputEDUConsumer. Call Start() to begin consuming from EDU servers. func NewOutputEDUConsumer( process *process.ProcessContext, cfg *config.FederationAPI, - kafkaConsumer sarama.Consumer, + js nats.JetStreamContext, queues *queue.OutgoingQueues, store storage.Database, ) *OutputEDUConsumer { - c := &OutputEDUConsumer{ - typingConsumer: &internal.ContinualConsumer{ - Process: process, - ComponentName: "eduserver/typing", - Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent), - Consumer: kafkaConsumer, - PartitionStore: store, - }, - sendToDeviceConsumer: &internal.ContinualConsumer{ - Process: process, - ComponentName: "eduserver/sendtodevice", - Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent), - Consumer: kafkaConsumer, - PartitionStore: store, - }, - receiptConsumer: &internal.ContinualConsumer{ - Process: process, - ComponentName: "eduserver/receipt", - Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputReceiptEvent), - Consumer: kafkaConsumer, - PartitionStore: store, - }, + return &OutputEDUConsumer{ + ctx: process.Context(), + jetstream: js, queues: queues, db: store, ServerName: cfg.Matrix.ServerName, - TypingTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent), - SendToDeviceTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent), + durable: cfg.Matrix.JetStream.Durable("FederationAPIEDUServerConsumer"), + typingTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputTypingEvent), + sendToDeviceTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputSendToDeviceEvent), + receiptTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputReceiptEvent), } - c.typingConsumer.ProcessMessage = c.onTypingEvent - c.sendToDeviceConsumer.ProcessMessage = c.onSendToDeviceEvent - c.receiptConsumer.ProcessMessage = c.onReceiptEvent - - return c } // Start consuming from EDU servers func (t *OutputEDUConsumer) Start() error { - if err := t.typingConsumer.Start(); err != nil { - return fmt.Errorf("t.typingConsumer.Start: %w", err) + if _, err := t.jetstream.Subscribe(t.typingTopic, t.onTypingEvent, t.durable); err != nil { + return err } - if err := t.sendToDeviceConsumer.Start(); err != nil { - return fmt.Errorf("t.sendToDeviceConsumer.Start: %w", err) + if _, err := t.jetstream.Subscribe(t.sendToDeviceTopic, t.onSendToDeviceEvent, t.durable); err != nil { + return err } - if err := t.receiptConsumer.Start(); err != nil { - return fmt.Errorf("t.receiptConsumer.Start: %w", err) + if _, err := t.jetstream.Subscribe(t.receiptTopic, t.onReceiptEvent, t.durable); err != nil { + return err } return nil } // onSendToDeviceEvent is called in response to a message received on the // send-to-device events topic from the EDU server. -func (t *OutputEDUConsumer) onSendToDeviceEvent(msg *sarama.ConsumerMessage) error { +func (t *OutputEDUConsumer) onSendToDeviceEvent(msg *nats.Msg) { // Extract the send-to-device event from msg. - var ote api.OutputSendToDeviceEvent - if err := json.Unmarshal(msg.Value, &ote); err != nil { - log.WithError(err).Errorf("eduserver output log: message parse failed (expected send-to-device)") - return nil - } + jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool { + var ote api.OutputSendToDeviceEvent + if err := json.Unmarshal(msg.Data, &ote); err != nil { + log.WithError(err).Errorf("eduserver output log: message parse failed (expected send-to-device)") + return true + } - // only send send-to-device events which originated from us - _, originServerName, err := gomatrixserverlib.SplitID('@', ote.Sender) - if err != nil { - log.WithError(err).WithField("user_id", ote.Sender).Error("Failed to extract domain from send-to-device sender") - return nil - } - if originServerName != t.ServerName { - log.WithField("other_server", originServerName).Info("Suppressing send-to-device: originated elsewhere") - return nil - } + // only send send-to-device events which originated from us + _, originServerName, err := gomatrixserverlib.SplitID('@', ote.Sender) + if err != nil { + log.WithError(err).WithField("user_id", ote.Sender).Error("Failed to extract domain from send-to-device sender") + return true + } + if originServerName != t.ServerName { + log.WithField("other_server", originServerName).Info("Suppressing send-to-device: originated elsewhere") + return true + } - _, destServerName, err := gomatrixserverlib.SplitID('@', ote.UserID) - if err != nil { - log.WithError(err).WithField("user_id", ote.UserID).Error("Failed to extract domain from send-to-device destination") - return nil - } + _, destServerName, err := gomatrixserverlib.SplitID('@', ote.UserID) + if err != nil { + log.WithError(err).WithField("user_id", ote.UserID).Error("Failed to extract domain from send-to-device destination") + return true + } - // Pack the EDU and marshal it - edu := &gomatrixserverlib.EDU{ - Type: gomatrixserverlib.MDirectToDevice, - Origin: string(t.ServerName), - } - tdm := gomatrixserverlib.ToDeviceMessage{ - Sender: ote.Sender, - Type: ote.Type, - MessageID: util.RandomString(32), - Messages: map[string]map[string]json.RawMessage{ - ote.UserID: { - ote.DeviceID: ote.Content, + // Pack the EDU and marshal it + edu := &gomatrixserverlib.EDU{ + Type: gomatrixserverlib.MDirectToDevice, + Origin: string(t.ServerName), + } + tdm := gomatrixserverlib.ToDeviceMessage{ + Sender: ote.Sender, + Type: ote.Type, + MessageID: util.RandomString(32), + Messages: map[string]map[string]json.RawMessage{ + ote.UserID: { + ote.DeviceID: ote.Content, + }, }, - }, - } - if edu.Content, err = json.Marshal(tdm); err != nil { - return err - } + } + if edu.Content, err = json.Marshal(tdm); err != nil { + log.WithError(err).Error("failed to marshal EDU JSON") + return true + } - log.Infof("Sending send-to-device message into %q destination queue", destServerName) - return t.queues.SendEDU(edu, t.ServerName, []gomatrixserverlib.ServerName{destServerName}) + log.Infof("Sending send-to-device message into %q destination queue", destServerName) + if err := t.queues.SendEDU(edu, t.ServerName, []gomatrixserverlib.ServerName{destServerName}); err != nil { + log.WithError(err).Error("failed to send EDU") + return false + } + + return true + }) } // onTypingEvent is called in response to a message received on the typing // events topic from the EDU server. -func (t *OutputEDUConsumer) onTypingEvent(msg *sarama.ConsumerMessage) error { - // Extract the typing event from msg. - var ote api.OutputTypingEvent - if err := json.Unmarshal(msg.Value, &ote); err != nil { - // Skip this msg but continue processing messages. - log.WithError(err).Errorf("eduserver output log: message parse failed (expected typing)") - return nil - } +func (t *OutputEDUConsumer) onTypingEvent(msg *nats.Msg) { + jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool { + // Extract the typing event from msg. + var ote api.OutputTypingEvent + if err := json.Unmarshal(msg.Data, &ote); err != nil { + // Skip this msg but continue processing messages. + log.WithError(err).Errorf("eduserver output log: message parse failed (expected typing)") + _ = msg.Ack() + return true + } - // only send typing events which originated from us - _, typingServerName, err := gomatrixserverlib.SplitID('@', ote.Event.UserID) - if err != nil { - log.WithError(err).WithField("user_id", ote.Event.UserID).Error("Failed to extract domain from typing sender") - return nil - } - if typingServerName != t.ServerName { - log.WithField("other_server", typingServerName).Info("Suppressing typing notif: originated elsewhere") - return nil - } + // only send typing events which originated from us + _, typingServerName, err := gomatrixserverlib.SplitID('@', ote.Event.UserID) + if err != nil { + log.WithError(err).WithField("user_id", ote.Event.UserID).Error("Failed to extract domain from typing sender") + _ = msg.Ack() + return true + } + if typingServerName != t.ServerName { + return true + } - joined, err := t.db.GetJoinedHosts(context.TODO(), ote.Event.RoomID) - if err != nil { - return err - } + joined, err := t.db.GetJoinedHosts(t.ctx, ote.Event.RoomID) + if err != nil { + log.WithError(err).WithField("room_id", ote.Event.RoomID).Error("failed to get joined hosts for room") + return false + } - names := make([]gomatrixserverlib.ServerName, len(joined)) - for i := range joined { - names[i] = joined[i].ServerName - } + names := make([]gomatrixserverlib.ServerName, len(joined)) + for i := range joined { + names[i] = joined[i].ServerName + } - edu := &gomatrixserverlib.EDU{Type: ote.Event.Type} - if edu.Content, err = json.Marshal(map[string]interface{}{ - "room_id": ote.Event.RoomID, - "user_id": ote.Event.UserID, - "typing": ote.Event.Typing, - }); err != nil { - return err - } + edu := &gomatrixserverlib.EDU{Type: ote.Event.Type} + if edu.Content, err = json.Marshal(map[string]interface{}{ + "room_id": ote.Event.RoomID, + "user_id": ote.Event.UserID, + "typing": ote.Event.Typing, + }); err != nil { + log.WithError(err).Error("failed to marshal EDU JSON") + return true + } - return t.queues.SendEDU(edu, t.ServerName, names) + if err := t.queues.SendEDU(edu, t.ServerName, names); err != nil { + log.WithError(err).Error("failed to send EDU") + return false + } + + return true + }) } // onReceiptEvent is called in response to a message received on the receipt // events topic from the EDU server. -func (t *OutputEDUConsumer) onReceiptEvent(msg *sarama.ConsumerMessage) error { - // Extract the typing event from msg. - var receipt api.OutputReceiptEvent - if err := json.Unmarshal(msg.Value, &receipt); err != nil { - // Skip this msg but continue processing messages. - log.WithError(err).Errorf("eduserver output log: message parse failed (expected receipt)") - return nil - } +func (t *OutputEDUConsumer) onReceiptEvent(msg *nats.Msg) { + jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool { + // Extract the typing event from msg. + var receipt api.OutputReceiptEvent + if err := json.Unmarshal(msg.Data, &receipt); err != nil { + // Skip this msg but continue processing messages. + log.WithError(err).Errorf("eduserver output log: message parse failed (expected receipt)") + return true + } - // only send receipt events which originated from us - _, receiptServerName, err := gomatrixserverlib.SplitID('@', receipt.UserID) - if err != nil { - log.WithError(err).WithField("user_id", receipt.UserID).Error("Failed to extract domain from receipt sender") - return nil - } - if receiptServerName != t.ServerName { - return nil // don't log, very spammy as it logs for each remote receipt - } + // only send receipt events which originated from us + _, receiptServerName, err := gomatrixserverlib.SplitID('@', receipt.UserID) + if err != nil { + log.WithError(err).WithField("user_id", receipt.UserID).Error("failed to extract domain from receipt sender") + return true + } + if receiptServerName != t.ServerName { + return true + } - joined, err := t.db.GetJoinedHosts(context.TODO(), receipt.RoomID) - if err != nil { - return err - } + joined, err := t.db.GetJoinedHosts(t.ctx, receipt.RoomID) + if err != nil { + log.WithError(err).WithField("room_id", receipt.RoomID).Error("failed to get joined hosts for room") + return false + } - names := make([]gomatrixserverlib.ServerName, len(joined)) - for i := range joined { - names[i] = joined[i].ServerName - } + names := make([]gomatrixserverlib.ServerName, len(joined)) + for i := range joined { + names[i] = joined[i].ServerName + } - content := map[string]api.FederationReceiptMRead{} - content[receipt.RoomID] = api.FederationReceiptMRead{ - User: map[string]api.FederationReceiptData{ - receipt.UserID: { - Data: api.ReceiptTS{ - TS: receipt.Timestamp, + content := map[string]api.FederationReceiptMRead{} + content[receipt.RoomID] = api.FederationReceiptMRead{ + User: map[string]api.FederationReceiptData{ + receipt.UserID: { + Data: api.ReceiptTS{ + TS: receipt.Timestamp, + }, + EventIDs: []string{receipt.EventID}, }, - EventIDs: []string{receipt.EventID}, }, - }, - } + } - edu := &gomatrixserverlib.EDU{ - Type: gomatrixserverlib.MReceipt, - Origin: string(t.ServerName), - } - if edu.Content, err = json.Marshal(content); err != nil { - return err - } + edu := &gomatrixserverlib.EDU{ + Type: gomatrixserverlib.MReceipt, + Origin: string(t.ServerName), + } + if edu.Content, err = json.Marshal(content); err != nil { + log.WithError(err).Error("failed to marshal EDU JSON") + return true + } - return t.queues.SendEDU(edu, t.ServerName, names) + if err := t.queues.SendEDU(edu, t.ServerName, names); err != nil { + log.WithError(err).Error("failed to send EDU") + return false + } + + return true + }) } diff --git a/federationapi/consumers/keychange.go b/federationapi/consumers/keychange.go index a8ae0894a..6a737d0ad 100644 --- a/federationapi/consumers/keychange.go +++ b/federationapi/consumers/keychange.go @@ -27,6 +27,7 @@ import ( "github.com/matrix-org/dendrite/keyserver/api" roomserverAPI "github.com/matrix-org/dendrite/roomserver/api" "github.com/matrix-org/dendrite/setup/config" + "github.com/matrix-org/dendrite/setup/jetstream" "github.com/matrix-org/dendrite/setup/process" "github.com/matrix-org/gomatrixserverlib" "github.com/sirupsen/logrus" @@ -34,6 +35,7 @@ import ( // KeyChangeConsumer consumes events that originate in key server. type KeyChangeConsumer struct { + ctx context.Context consumer *internal.ContinualConsumer db storage.Database queues *queue.OutgoingQueues @@ -51,10 +53,11 @@ func NewKeyChangeConsumer( rsAPI roomserverAPI.RoomserverInternalAPI, ) *KeyChangeConsumer { c := &KeyChangeConsumer{ + ctx: process.Context(), consumer: &internal.ContinualConsumer{ Process: process, ComponentName: "federationapi/keychange", - Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)), + Topic: string(cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent)), Consumer: kafkaConsumer, PartitionStore: store, }, @@ -100,6 +103,9 @@ func (t *KeyChangeConsumer) onMessage(msg *sarama.ConsumerMessage) error { } func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) error { + if m.DeviceKeys == nil { + return nil + } logger := logrus.WithField("user_id", m.UserID) // only send key change events which originated from us @@ -113,7 +119,7 @@ func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) error { } var queryRes roomserverAPI.QueryRoomsForUserResponse - err = t.rsAPI.QueryRoomsForUser(context.Background(), &roomserverAPI.QueryRoomsForUserRequest{ + err = t.rsAPI.QueryRoomsForUser(t.ctx, &roomserverAPI.QueryRoomsForUserRequest{ UserID: m.UserID, WantMembership: "join", }, &queryRes) @@ -122,7 +128,7 @@ func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) error { return nil } // send this key change to all servers who share rooms with this user. - destinations, err := t.db.GetJoinedHostsForRooms(context.Background(), queryRes.RoomIDs) + destinations, err := t.db.GetJoinedHostsForRooms(t.ctx, queryRes.RoomIDs, true) if err != nil { logger.WithError(err).Error("failed to calculate joined hosts for rooms user is in") return nil @@ -165,7 +171,7 @@ func (t *KeyChangeConsumer) onCrossSigningMessage(m api.DeviceMessage) error { logger := logrus.WithField("user_id", output.UserID) var queryRes roomserverAPI.QueryRoomsForUserResponse - err = t.rsAPI.QueryRoomsForUser(context.Background(), &roomserverAPI.QueryRoomsForUserRequest{ + err = t.rsAPI.QueryRoomsForUser(t.ctx, &roomserverAPI.QueryRoomsForUserRequest{ UserID: output.UserID, WantMembership: "join", }, &queryRes) @@ -174,7 +180,7 @@ func (t *KeyChangeConsumer) onCrossSigningMessage(m api.DeviceMessage) error { return nil } // send this key change to all servers who share rooms with this user. - destinations, err := t.db.GetJoinedHostsForRooms(context.Background(), queryRes.RoomIDs) + destinations, err := t.db.GetJoinedHostsForRooms(t.ctx, queryRes.RoomIDs, true) if err != nil { logger.WithError(err).Error("fedsender key change consumer: failed to calculate joined hosts for rooms user is in") return nil diff --git a/federationapi/consumers/roomserver.go b/federationapi/consumers/roomserver.go index 20b1bacbc..25ea78274 100644 --- a/federationapi/consumers/roomserver.go +++ b/federationapi/consumers/roomserver.go @@ -19,117 +19,121 @@ import ( "encoding/json" "fmt" - "github.com/Shopify/sarama" "github.com/matrix-org/dendrite/federationapi/queue" "github.com/matrix-org/dendrite/federationapi/storage" "github.com/matrix-org/dendrite/federationapi/types" - "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/roomserver/api" "github.com/matrix-org/dendrite/setup/config" + "github.com/matrix-org/dendrite/setup/jetstream" "github.com/matrix-org/dendrite/setup/process" "github.com/matrix-org/gomatrixserverlib" + "github.com/nats-io/nats.go" log "github.com/sirupsen/logrus" ) // OutputRoomEventConsumer consumes events that originated in the room server. type OutputRoomEventConsumer struct { - cfg *config.FederationAPI - rsAPI api.RoomserverInternalAPI - rsConsumer *internal.ContinualConsumer - db storage.Database - queues *queue.OutgoingQueues + ctx context.Context + cfg *config.FederationAPI + rsAPI api.RoomserverInternalAPI + jetstream nats.JetStreamContext + durable nats.SubOpt + db storage.Database + queues *queue.OutgoingQueues + topic string } // NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call Start() to begin consuming from room servers. func NewOutputRoomEventConsumer( process *process.ProcessContext, cfg *config.FederationAPI, - kafkaConsumer sarama.Consumer, + js nats.JetStreamContext, queues *queue.OutgoingQueues, store storage.Database, rsAPI api.RoomserverInternalAPI, ) *OutputRoomEventConsumer { - consumer := internal.ContinualConsumer{ - Process: process, - ComponentName: "federationapi/roomserver", - Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)), - Consumer: kafkaConsumer, - PartitionStore: store, + return &OutputRoomEventConsumer{ + ctx: process.Context(), + cfg: cfg, + jetstream: js, + db: store, + queues: queues, + rsAPI: rsAPI, + durable: cfg.Matrix.JetStream.Durable("FederationAPIRoomServerConsumer"), + topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputRoomEvent), } - s := &OutputRoomEventConsumer{ - cfg: cfg, - rsConsumer: &consumer, - db: store, - queues: queues, - rsAPI: rsAPI, - } - consumer.ProcessMessage = s.onMessage - - return s } // Start consuming from room servers func (s *OutputRoomEventConsumer) Start() error { - return s.rsConsumer.Start() + _, err := s.jetstream.Subscribe( + s.topic, s.onMessage, s.durable, + nats.DeliverAll(), + nats.ManualAck(), + ) + return err } // onMessage is called when the federation server receives a new event from the room server output log. // It is unsafe to call this with messages for the same room in multiple gorountines // because updates it will likely fail with a types.EventIDMismatchError when it // realises that it cannot update the room state using the deltas. -func (s *OutputRoomEventConsumer) onMessage(msg *sarama.ConsumerMessage) error { - // Parse out the event JSON - var output api.OutputEvent - if err := json.Unmarshal(msg.Value, &output); err != nil { - // If the message was invalid, log it and move on to the next message in the stream - log.WithError(err).Errorf("roomserver output log: message parse failure") - return nil - } - - switch output.Type { - case api.OutputTypeNewRoomEvent: - ev := output.NewRoomEvent.Event - - if output.NewRoomEvent.RewritesState { - if err := s.db.PurgeRoomState(context.TODO(), ev.RoomID()); err != nil { - return fmt.Errorf("s.db.PurgeRoom: %w", err) - } +func (s *OutputRoomEventConsumer) onMessage(msg *nats.Msg) { + jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool { + // Parse out the event JSON + var output api.OutputEvent + if err := json.Unmarshal(msg.Data, &output); err != nil { + // If the message was invalid, log it and move on to the next message in the stream + log.WithError(err).Errorf("roomserver output log: message parse failure") + return true } - if err := s.processMessage(*output.NewRoomEvent); err != nil { - switch err.(type) { - case *queue.ErrorFederationDisabled: - log.WithField("error", output.Type).Info( - err.Error(), - ) - default: - // panic rather than continue with an inconsistent database + switch output.Type { + case api.OutputTypeNewRoomEvent: + ev := output.NewRoomEvent.Event + + if output.NewRoomEvent.RewritesState { + if err := s.db.PurgeRoomState(s.ctx, ev.RoomID()); err != nil { + log.WithError(err).Errorf("roomserver output log: purge room state failure") + return false + } + } + + if err := s.processMessage(*output.NewRoomEvent); err != nil { + switch err.(type) { + case *queue.ErrorFederationDisabled: + log.WithField("error", output.Type).Info( + err.Error(), + ) + default: + // panic rather than continue with an inconsistent database + log.WithFields(log.Fields{ + "event_id": ev.EventID(), + "event": string(ev.JSON()), + "add": output.NewRoomEvent.AddsStateEventIDs, + "del": output.NewRoomEvent.RemovesStateEventIDs, + log.ErrorKey: err, + }).Panicf("roomserver output log: write room event failure") + } + } + + case api.OutputTypeNewInboundPeek: + if err := s.processInboundPeek(*output.NewInboundPeek); err != nil { log.WithFields(log.Fields{ - "event_id": ev.EventID(), - "event": string(ev.JSON()), - "add": output.NewRoomEvent.AddsStateEventIDs, - "del": output.NewRoomEvent.RemovesStateEventIDs, + "event": output.NewInboundPeek, log.ErrorKey: err, - }).Panicf("roomserver output log: write room event failure") + }).Panicf("roomserver output log: remote peek event failure") + return false } - return nil - } - case api.OutputTypeNewInboundPeek: - if err := s.processInboundPeek(*output.NewInboundPeek); err != nil { - log.WithFields(log.Fields{ - "event": output.NewInboundPeek, - log.ErrorKey: err, - }).Panicf("roomserver output log: remote peek event failure") - return nil - } - default: - log.WithField("type", output.Type).Debug( - "roomserver output log: ignoring unknown output type", - ) - return nil - } - return nil + default: + log.WithField("type", output.Type).Debug( + "roomserver output log: ignoring unknown output type", + ) + } + + return true + }) } // processInboundPeek starts tracking a new federated inbound peek (replacing the existing one if any) @@ -146,7 +150,7 @@ func (s *OutputRoomEventConsumer) processInboundPeek(orp api.OutputNewInboundPee // // This is making the tests flakey. - return s.db.AddInboundPeek(context.TODO(), orp.ServerName, orp.RoomID, orp.PeekID, orp.RenewalInterval) + return s.db.AddInboundPeek(s.ctx, orp.ServerName, orp.RoomID, orp.PeekID, orp.RenewalInterval) } // processMessage updates the list of currently joined hosts in the room @@ -162,7 +166,7 @@ func (s *OutputRoomEventConsumer) processMessage(ore api.OutputNewRoomEvent) err // TODO(#290): handle EventIDMismatchError and recover the current state by // talking to the roomserver oldJoinedHosts, err := s.db.UpdateRoom( - context.TODO(), + s.ctx, ore.Event.RoomID(), ore.LastSentEventID, ore.Event.EventID(), @@ -255,7 +259,7 @@ func (s *OutputRoomEventConsumer) joinedHostsAtEvent( } // handle peeking hosts - inboundPeeks, err := s.db.GetInboundPeeks(context.TODO(), ore.Event.Event.RoomID()) + inboundPeeks, err := s.db.GetInboundPeeks(s.ctx, ore.Event.Event.RoomID()) if err != nil { return nil, err } @@ -373,7 +377,7 @@ func (s *OutputRoomEventConsumer) lookupStateEvents( // from the roomserver using the query API. eventReq := api.QueryEventsByIDRequest{EventIDs: missing} var eventResp api.QueryEventsByIDResponse - if err := s.rsAPI.QueryEventsByID(context.TODO(), &eventReq, &eventResp); err != nil { + if err := s.rsAPI.QueryEventsByID(s.ctx, &eventReq, &eventResp); err != nil { return nil, err } diff --git a/federationapi/federationapi.go b/federationapi/federationapi.go index 02c4cfdb4..63387b9d8 100644 --- a/federationapi/federationapi.go +++ b/federationapi/federationapi.go @@ -30,7 +30,7 @@ import ( roomserverAPI "github.com/matrix-org/dendrite/roomserver/api" "github.com/matrix-org/dendrite/setup/base" "github.com/matrix-org/dendrite/setup/config" - "github.com/matrix-org/dendrite/setup/kafka" + "github.com/matrix-org/dendrite/setup/jetstream" userapi "github.com/matrix-org/dendrite/userapi/api" "github.com/sirupsen/logrus" @@ -78,7 +78,7 @@ func NewInternalAPI( ) api.FederationInternalAPI { cfg := &base.Cfg.FederationAPI - federationDB, err := storage.NewDatabase(&cfg.Database, base.Caches) + federationDB, err := storage.NewDatabase(&cfg.Database, base.Caches, base.Cfg.Global.ServerName) if err != nil { logrus.WithError(err).Panic("failed to connect to federation sender db") } @@ -92,7 +92,7 @@ func NewInternalAPI( FailuresUntilBlacklist: cfg.FederationMaxRetries, } - consumer, _ := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka) + js, consumer, _ := jetstream.Prepare(&cfg.Matrix.JetStream) queues := queue.NewOutgoingQueues( federationDB, base.ProcessContext, @@ -106,7 +106,7 @@ func NewInternalAPI( ) rsConsumer := consumers.NewOutputRoomEventConsumer( - base.ProcessContext, cfg, consumer, queues, + base.ProcessContext, cfg, js, queues, federationDB, rsAPI, ) if err = rsConsumer.Start(); err != nil { @@ -114,7 +114,7 @@ func NewInternalAPI( } tsConsumer := consumers.NewOutputEDUConsumer( - base.ProcessContext, cfg, consumer, queues, federationDB, + base.ProcessContext, cfg, js, queues, federationDB, ) if err := tsConsumer.Start(); err != nil { logrus.WithError(err).Panic("failed to start typing server consumer") diff --git a/federationapi/federationapi_keys_test.go b/federationapi/federationapi_keys_test.go index 9e6c47cda..4774c8820 100644 --- a/federationapi/federationapi_keys_test.go +++ b/federationapi/federationapi_keys_test.go @@ -68,13 +68,22 @@ func TestMain(m *testing.M) { panic("can't create cache: " + err.Error()) } + // Create a temporary directory for JetStream. + d, err := ioutil.TempDir("./", "jetstream*") + if err != nil { + panic(err) + } + defer os.RemoveAll(d) + // Draw up just enough Dendrite config for the server key // API to work. cfg := &config.Dendrite{} cfg.Defaults(true) cfg.Global.ServerName = gomatrixserverlib.ServerName(s.name) cfg.Global.PrivateKey = testPriv - cfg.Global.Kafka.UseNaffka = true + cfg.Global.JetStream.InMemory = true + cfg.Global.JetStream.TopicPrefix = string(s.name[:1]) + cfg.Global.JetStream.StoragePath = config.Path(d) cfg.Global.KeyID = serverKeyID cfg.Global.KeyValidityPeriod = s.validity cfg.FederationAPI.Database.ConnectionString = config.DataSource("file::memory:") diff --git a/federationapi/federationapi_test.go b/federationapi/federationapi_test.go index 8b5bdd034..c660f12e0 100644 --- a/federationapi/federationapi_test.go +++ b/federationapi/federationapi_test.go @@ -23,10 +23,9 @@ func TestRoomsV3URLEscapeDoNot404(t *testing.T) { cfg.Global.KeyID = gomatrixserverlib.KeyID("ed25519:auto") cfg.Global.ServerName = gomatrixserverlib.ServerName("localhost") cfg.Global.PrivateKey = privKey - cfg.Global.Kafka.UseNaffka = true - cfg.Global.Kafka.Database.ConnectionString = config.DataSource("file::memory:") + cfg.Global.JetStream.InMemory = true cfg.FederationAPI.Database.ConnectionString = config.DataSource("file::memory:") - base := base.NewBaseDendrite(cfg, "Monolith", base.NoCacheMetrics) + base := base.NewBaseDendrite(cfg, "Monolith") keyRing := &test.NopJSONVerifier{} fsAPI := base.FederationAPIHTTPClient() // TODO: This is pretty fragile, as if anything calls anything on these nils this test will break. diff --git a/federationapi/internal/api.go b/federationapi/internal/api.go index 1f31b07cc..4e9fa8410 100644 --- a/federationapi/internal/api.go +++ b/federationapi/internal/api.go @@ -1,9 +1,9 @@ package internal import ( - "context" "crypto/ed25519" "encoding/base64" + "fmt" "sync" "time" @@ -142,7 +142,7 @@ func failBlacklistableError(err error, stats *statistics.ServerStatistics) (unti return } -func (a *FederationInternalAPI) doRequest( +func (a *FederationInternalAPI) doRequestIfNotBackingOffOrBlacklisted( s gomatrixserverlib.ServerName, request func() (interface{}, error), ) (interface{}, error) { stats, err := a.isBlacklistedOrBackingOff(s) @@ -167,141 +167,15 @@ func (a *FederationInternalAPI) doRequest( return res, nil } -func (a *FederationInternalAPI) GetUserDevices( - ctx context.Context, s gomatrixserverlib.ServerName, userID string, -) (gomatrixserverlib.RespUserDevices, error) { - ctx, cancel := context.WithTimeout(ctx, time.Second*30) - defer cancel() - ires, err := a.doRequest(s, func() (interface{}, error) { - return a.federation.GetUserDevices(ctx, s, userID) - }) - if err != nil { - return gomatrixserverlib.RespUserDevices{}, err +func (a *FederationInternalAPI) doRequestIfNotBlacklisted( + s gomatrixserverlib.ServerName, request func() (interface{}, error), +) (interface{}, error) { + stats := a.statistics.ForServer(s) + if _, blacklisted := stats.BackoffInfo(); blacklisted { + return stats, &api.FederationClientError{ + Err: fmt.Sprintf("server %q is blacklisted", s), + Blacklisted: true, + } } - return ires.(gomatrixserverlib.RespUserDevices), nil -} - -func (a *FederationInternalAPI) ClaimKeys( - ctx context.Context, s gomatrixserverlib.ServerName, oneTimeKeys map[string]map[string]string, -) (gomatrixserverlib.RespClaimKeys, error) { - ctx, cancel := context.WithTimeout(ctx, time.Second*30) - defer cancel() - ires, err := a.doRequest(s, func() (interface{}, error) { - return a.federation.ClaimKeys(ctx, s, oneTimeKeys) - }) - if err != nil { - return gomatrixserverlib.RespClaimKeys{}, err - } - return ires.(gomatrixserverlib.RespClaimKeys), nil -} - -func (a *FederationInternalAPI) QueryKeys( - ctx context.Context, s gomatrixserverlib.ServerName, keys map[string][]string, -) (gomatrixserverlib.RespQueryKeys, error) { - ires, err := a.doRequest(s, func() (interface{}, error) { - return a.federation.QueryKeys(ctx, s, keys) - }) - if err != nil { - return gomatrixserverlib.RespQueryKeys{}, err - } - return ires.(gomatrixserverlib.RespQueryKeys), nil -} - -func (a *FederationInternalAPI) Backfill( - ctx context.Context, s gomatrixserverlib.ServerName, roomID string, limit int, eventIDs []string, -) (res gomatrixserverlib.Transaction, err error) { - ctx, cancel := context.WithTimeout(ctx, time.Second*30) - defer cancel() - ires, err := a.doRequest(s, func() (interface{}, error) { - return a.federation.Backfill(ctx, s, roomID, limit, eventIDs) - }) - if err != nil { - return gomatrixserverlib.Transaction{}, err - } - return ires.(gomatrixserverlib.Transaction), nil -} - -func (a *FederationInternalAPI) LookupState( - ctx context.Context, s gomatrixserverlib.ServerName, roomID, eventID string, roomVersion gomatrixserverlib.RoomVersion, -) (res gomatrixserverlib.RespState, err error) { - ctx, cancel := context.WithTimeout(ctx, time.Second*30) - defer cancel() - ires, err := a.doRequest(s, func() (interface{}, error) { - return a.federation.LookupState(ctx, s, roomID, eventID, roomVersion) - }) - if err != nil { - return gomatrixserverlib.RespState{}, err - } - return ires.(gomatrixserverlib.RespState), nil -} - -func (a *FederationInternalAPI) LookupStateIDs( - ctx context.Context, s gomatrixserverlib.ServerName, roomID, eventID string, -) (res gomatrixserverlib.RespStateIDs, err error) { - ctx, cancel := context.WithTimeout(ctx, time.Second*30) - defer cancel() - ires, err := a.doRequest(s, func() (interface{}, error) { - return a.federation.LookupStateIDs(ctx, s, roomID, eventID) - }) - if err != nil { - return gomatrixserverlib.RespStateIDs{}, err - } - return ires.(gomatrixserverlib.RespStateIDs), nil -} - -func (a *FederationInternalAPI) GetEvent( - ctx context.Context, s gomatrixserverlib.ServerName, eventID string, -) (res gomatrixserverlib.Transaction, err error) { - ctx, cancel := context.WithTimeout(ctx, time.Second*30) - defer cancel() - ires, err := a.doRequest(s, func() (interface{}, error) { - return a.federation.GetEvent(ctx, s, eventID) - }) - if err != nil { - return gomatrixserverlib.Transaction{}, err - } - return ires.(gomatrixserverlib.Transaction), nil -} - -func (a *FederationInternalAPI) LookupServerKeys( - ctx context.Context, s gomatrixserverlib.ServerName, keyRequests map[gomatrixserverlib.PublicKeyLookupRequest]gomatrixserverlib.Timestamp, -) ([]gomatrixserverlib.ServerKeys, error) { - ctx, cancel := context.WithTimeout(ctx, time.Minute) - defer cancel() - ires, err := a.doRequest(s, func() (interface{}, error) { - return a.federation.LookupServerKeys(ctx, s, keyRequests) - }) - if err != nil { - return []gomatrixserverlib.ServerKeys{}, err - } - return ires.([]gomatrixserverlib.ServerKeys), nil -} - -func (a *FederationInternalAPI) MSC2836EventRelationships( - ctx context.Context, s gomatrixserverlib.ServerName, r gomatrixserverlib.MSC2836EventRelationshipsRequest, - roomVersion gomatrixserverlib.RoomVersion, -) (res gomatrixserverlib.MSC2836EventRelationshipsResponse, err error) { - ctx, cancel := context.WithTimeout(ctx, time.Minute) - defer cancel() - ires, err := a.doRequest(s, func() (interface{}, error) { - return a.federation.MSC2836EventRelationships(ctx, s, r, roomVersion) - }) - if err != nil { - return res, err - } - return ires.(gomatrixserverlib.MSC2836EventRelationshipsResponse), nil -} - -func (a *FederationInternalAPI) MSC2946Spaces( - ctx context.Context, s gomatrixserverlib.ServerName, roomID string, r gomatrixserverlib.MSC2946SpacesRequest, -) (res gomatrixserverlib.MSC2946SpacesResponse, err error) { - ctx, cancel := context.WithTimeout(ctx, time.Minute) - defer cancel() - ires, err := a.doRequest(s, func() (interface{}, error) { - return a.federation.MSC2946Spaces(ctx, s, roomID, r) - }) - if err != nil { - return res, err - } - return ires.(gomatrixserverlib.MSC2946SpacesResponse), nil + return request() } diff --git a/federationapi/internal/federationclient.go b/federationapi/internal/federationclient.go new file mode 100644 index 000000000..b31db466c --- /dev/null +++ b/federationapi/internal/federationclient.go @@ -0,0 +1,180 @@ +package internal + +import ( + "context" + "time" + + "github.com/matrix-org/gomatrixserverlib" +) + +// Functions here are "proxying" calls to the gomatrixserverlib federation +// client. + +func (a *FederationInternalAPI) GetEventAuth( + ctx context.Context, s gomatrixserverlib.ServerName, + roomVersion gomatrixserverlib.RoomVersion, roomID, eventID string, +) (res gomatrixserverlib.RespEventAuth, err error) { + ctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + ires, err := a.doRequestIfNotBlacklisted(s, func() (interface{}, error) { + return a.federation.GetEventAuth(ctx, s, roomVersion, roomID, eventID) + }) + if err != nil { + return gomatrixserverlib.RespEventAuth{}, err + } + return ires.(gomatrixserverlib.RespEventAuth), nil +} + +func (a *FederationInternalAPI) GetUserDevices( + ctx context.Context, s gomatrixserverlib.ServerName, userID string, +) (gomatrixserverlib.RespUserDevices, error) { + ctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + ires, err := a.doRequestIfNotBlacklisted(s, func() (interface{}, error) { + return a.federation.GetUserDevices(ctx, s, userID) + }) + if err != nil { + return gomatrixserverlib.RespUserDevices{}, err + } + return ires.(gomatrixserverlib.RespUserDevices), nil +} + +func (a *FederationInternalAPI) ClaimKeys( + ctx context.Context, s gomatrixserverlib.ServerName, oneTimeKeys map[string]map[string]string, +) (gomatrixserverlib.RespClaimKeys, error) { + ctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + ires, err := a.doRequestIfNotBackingOffOrBlacklisted(s, func() (interface{}, error) { + return a.federation.ClaimKeys(ctx, s, oneTimeKeys) + }) + if err != nil { + return gomatrixserverlib.RespClaimKeys{}, err + } + return ires.(gomatrixserverlib.RespClaimKeys), nil +} + +func (a *FederationInternalAPI) QueryKeys( + ctx context.Context, s gomatrixserverlib.ServerName, keys map[string][]string, +) (gomatrixserverlib.RespQueryKeys, error) { + ires, err := a.doRequestIfNotBackingOffOrBlacklisted(s, func() (interface{}, error) { + return a.federation.QueryKeys(ctx, s, keys) + }) + if err != nil { + return gomatrixserverlib.RespQueryKeys{}, err + } + return ires.(gomatrixserverlib.RespQueryKeys), nil +} + +func (a *FederationInternalAPI) Backfill( + ctx context.Context, s gomatrixserverlib.ServerName, roomID string, limit int, eventIDs []string, +) (res gomatrixserverlib.Transaction, err error) { + ctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + ires, err := a.doRequestIfNotBlacklisted(s, func() (interface{}, error) { + return a.federation.Backfill(ctx, s, roomID, limit, eventIDs) + }) + if err != nil { + return gomatrixserverlib.Transaction{}, err + } + return ires.(gomatrixserverlib.Transaction), nil +} + +func (a *FederationInternalAPI) LookupState( + ctx context.Context, s gomatrixserverlib.ServerName, roomID, eventID string, roomVersion gomatrixserverlib.RoomVersion, +) (res gomatrixserverlib.RespState, err error) { + ctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + ires, err := a.doRequestIfNotBlacklisted(s, func() (interface{}, error) { + return a.federation.LookupState(ctx, s, roomID, eventID, roomVersion) + }) + if err != nil { + return gomatrixserverlib.RespState{}, err + } + return ires.(gomatrixserverlib.RespState), nil +} + +func (a *FederationInternalAPI) LookupStateIDs( + ctx context.Context, s gomatrixserverlib.ServerName, roomID, eventID string, +) (res gomatrixserverlib.RespStateIDs, err error) { + ctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + ires, err := a.doRequestIfNotBlacklisted(s, func() (interface{}, error) { + return a.federation.LookupStateIDs(ctx, s, roomID, eventID) + }) + if err != nil { + return gomatrixserverlib.RespStateIDs{}, err + } + return ires.(gomatrixserverlib.RespStateIDs), nil +} + +func (a *FederationInternalAPI) LookupMissingEvents( + ctx context.Context, s gomatrixserverlib.ServerName, roomID string, + missing gomatrixserverlib.MissingEvents, roomVersion gomatrixserverlib.RoomVersion, +) (res gomatrixserverlib.RespMissingEvents, err error) { + ctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + ires, err := a.doRequestIfNotBlacklisted(s, func() (interface{}, error) { + return a.federation.LookupMissingEvents(ctx, s, roomID, missing, roomVersion) + }) + if err != nil { + return gomatrixserverlib.RespMissingEvents{}, err + } + return ires.(gomatrixserverlib.RespMissingEvents), nil +} + +func (a *FederationInternalAPI) GetEvent( + ctx context.Context, s gomatrixserverlib.ServerName, eventID string, +) (res gomatrixserverlib.Transaction, err error) { + ctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + ires, err := a.doRequestIfNotBlacklisted(s, func() (interface{}, error) { + return a.federation.GetEvent(ctx, s, eventID) + }) + if err != nil { + return gomatrixserverlib.Transaction{}, err + } + return ires.(gomatrixserverlib.Transaction), nil +} + +func (a *FederationInternalAPI) LookupServerKeys( + ctx context.Context, s gomatrixserverlib.ServerName, keyRequests map[gomatrixserverlib.PublicKeyLookupRequest]gomatrixserverlib.Timestamp, +) ([]gomatrixserverlib.ServerKeys, error) { + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + ires, err := a.doRequestIfNotBlacklisted(s, func() (interface{}, error) { + return a.federation.LookupServerKeys(ctx, s, keyRequests) + }) + if err != nil { + return []gomatrixserverlib.ServerKeys{}, err + } + return ires.([]gomatrixserverlib.ServerKeys), nil +} + +func (a *FederationInternalAPI) MSC2836EventRelationships( + ctx context.Context, s gomatrixserverlib.ServerName, r gomatrixserverlib.MSC2836EventRelationshipsRequest, + roomVersion gomatrixserverlib.RoomVersion, +) (res gomatrixserverlib.MSC2836EventRelationshipsResponse, err error) { + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + ires, err := a.doRequestIfNotBlacklisted(s, func() (interface{}, error) { + return a.federation.MSC2836EventRelationships(ctx, s, r, roomVersion) + }) + if err != nil { + return res, err + } + return ires.(gomatrixserverlib.MSC2836EventRelationshipsResponse), nil +} + +func (a *FederationInternalAPI) MSC2946Spaces( + ctx context.Context, s gomatrixserverlib.ServerName, roomID string, r gomatrixserverlib.MSC2946SpacesRequest, +) (res gomatrixserverlib.MSC2946SpacesResponse, err error) { + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + ires, err := a.doRequestIfNotBlacklisted(s, func() (interface{}, error) { + return a.federation.MSC2946Spaces(ctx, s, roomID, r) + }) + if err != nil { + return res, err + } + return ires.(gomatrixserverlib.MSC2946SpacesResponse), nil +} diff --git a/federationapi/internal/perform.go b/federationapi/internal/perform.go index 82d04c21e..4dd53c11b 100644 --- a/federationapi/internal/perform.go +++ b/federationapi/internal/perform.go @@ -249,7 +249,9 @@ func (r *FederationInternalAPI) performJoinUsingServer( roomserverAPI.KindNew, respState, event.Headered(respMakeJoin.RoomVersion), + serverName, nil, + false, ); err != nil { logrus.WithFields(logrus.Fields{ "room_id": roomID, @@ -430,7 +432,9 @@ func (r *FederationInternalAPI) performOutboundPeekUsingServer( roomserverAPI.KindNew, &respState, respPeek.LatestEvent.Headered(respPeek.RoomVersion), + serverName, nil, + false, ); err != nil { return fmt.Errorf("r.producer.SendEventWithState: %w", err) } diff --git a/federationapi/internal/query.go b/federationapi/internal/query.go index bac813331..b0a76eeb7 100644 --- a/federationapi/internal/query.go +++ b/federationapi/internal/query.go @@ -16,7 +16,7 @@ func (f *FederationInternalAPI) QueryJoinedHostServerNamesInRoom( request *api.QueryJoinedHostServerNamesInRoomRequest, response *api.QueryJoinedHostServerNamesInRoomResponse, ) (err error) { - joinedHosts, err := f.db.GetJoinedHostsForRooms(ctx, []string{request.RoomID}) + joinedHosts, err := f.db.GetJoinedHostsForRooms(ctx, []string{request.RoomID}, request.ExcludeSelf) if err != nil { return } @@ -28,7 +28,7 @@ func (f *FederationInternalAPI) QueryJoinedHostServerNamesInRoom( func (a *FederationInternalAPI) fetchServerKeysDirectly(ctx context.Context, serverName gomatrixserverlib.ServerName) (*gomatrixserverlib.ServerKeys, error) { ctx, cancel := context.WithTimeout(ctx, time.Second*30) defer cancel() - ires, err := a.doRequest(serverName, func() (interface{}, error) { + ires, err := a.doRequestIfNotBackingOffOrBlacklisted(serverName, func() (interface{}, error) { return a.federation.GetServerKeys(ctx, serverName) }) if err != nil { diff --git a/federationapi/inthttp/client.go b/federationapi/inthttp/client.go index af6b801b3..a65df906f 100644 --- a/federationapi/inthttp/client.go +++ b/federationapi/inthttp/client.go @@ -26,16 +26,18 @@ const ( FederationAPIPerformServersAlivePath = "/federationapi/performServersAlive" FederationAPIPerformBroadcastEDUPath = "/federationapi/performBroadcastEDU" - FederationAPIGetUserDevicesPath = "/federationapi/client/getUserDevices" - FederationAPIClaimKeysPath = "/federationapi/client/claimKeys" - FederationAPIQueryKeysPath = "/federationapi/client/queryKeys" - FederationAPIBackfillPath = "/federationapi/client/backfill" - FederationAPILookupStatePath = "/federationapi/client/lookupState" - FederationAPILookupStateIDsPath = "/federationapi/client/lookupStateIDs" - FederationAPIGetEventPath = "/federationapi/client/getEvent" - FederationAPILookupServerKeysPath = "/federationapi/client/lookupServerKeys" - FederationAPIEventRelationshipsPath = "/federationapi/client/msc2836eventRelationships" - FederationAPISpacesSummaryPath = "/federationapi/client/msc2946spacesSummary" + FederationAPIGetUserDevicesPath = "/federationapi/client/getUserDevices" + FederationAPIClaimKeysPath = "/federationapi/client/claimKeys" + FederationAPIQueryKeysPath = "/federationapi/client/queryKeys" + FederationAPIBackfillPath = "/federationapi/client/backfill" + FederationAPILookupStatePath = "/federationapi/client/lookupState" + FederationAPILookupStateIDsPath = "/federationapi/client/lookupStateIDs" + FederationAPILookupMissingEventsPath = "/federationapi/client/lookupMissingEvents" + FederationAPIGetEventPath = "/federationapi/client/getEvent" + FederationAPILookupServerKeysPath = "/federationapi/client/lookupServerKeys" + FederationAPIEventRelationshipsPath = "/federationapi/client/msc2836eventRelationships" + FederationAPISpacesSummaryPath = "/federationapi/client/msc2946spacesSummary" + FederationAPIGetEventAuthPath = "/federationapi/client/getEventAuth" FederationAPIInputPublicKeyPath = "/federationapi/inputPublicKey" FederationAPIQueryPublicKeyPath = "/federationapi/queryPublicKey" @@ -353,6 +355,49 @@ func (h *httpFederationInternalAPI) LookupStateIDs( return *response.Res, nil } +type lookupMissingEvents struct { + S gomatrixserverlib.ServerName + RoomID string + Missing gomatrixserverlib.MissingEvents + RoomVersion gomatrixserverlib.RoomVersion + Res struct { + Events []gomatrixserverlib.RawJSON `json:"events"` + } + Err *api.FederationClientError +} + +func (h *httpFederationInternalAPI) LookupMissingEvents( + ctx context.Context, s gomatrixserverlib.ServerName, roomID string, + missing gomatrixserverlib.MissingEvents, roomVersion gomatrixserverlib.RoomVersion, +) (res gomatrixserverlib.RespMissingEvents, err error) { + span, ctx := opentracing.StartSpanFromContext(ctx, "LookupMissingEvents") + defer span.Finish() + + request := lookupMissingEvents{ + S: s, + RoomID: roomID, + Missing: missing, + RoomVersion: roomVersion, + } + apiURL := h.federationAPIURL + FederationAPILookupMissingEventsPath + err = httputil.PostJSON(ctx, span, h.httpClient, apiURL, &request, &request) + if err != nil { + return res, err + } + if request.Err != nil { + return res, request.Err + } + res.Events = make([]*gomatrixserverlib.Event, 0, len(request.Res.Events)) + for _, js := range request.Res.Events { + ev, err := gomatrixserverlib.NewEventFromUntrustedJSON(js, roomVersion) + if err != nil { + return res, err + } + res.Events = append(res.Events, ev) + } + return res, nil +} + type getEvent struct { S gomatrixserverlib.ServerName EventID string @@ -382,6 +427,40 @@ func (h *httpFederationInternalAPI) GetEvent( return *response.Res, nil } +type getEventAuth struct { + S gomatrixserverlib.ServerName + RoomVersion gomatrixserverlib.RoomVersion + RoomID string + EventID string + Res *gomatrixserverlib.RespEventAuth + Err *api.FederationClientError +} + +func (h *httpFederationInternalAPI) GetEventAuth( + ctx context.Context, s gomatrixserverlib.ServerName, + roomVersion gomatrixserverlib.RoomVersion, roomID, eventID string, +) (gomatrixserverlib.RespEventAuth, error) { + span, ctx := opentracing.StartSpanFromContext(ctx, "GetEventAuth") + defer span.Finish() + + request := getEventAuth{ + S: s, + RoomVersion: roomVersion, + RoomID: roomID, + EventID: eventID, + } + var response getEventAuth + apiURL := h.federationAPIURL + FederationAPIGetEventAuthPath + err := httputil.PostJSON(ctx, span, h.httpClient, apiURL, &request, &response) + if err != nil { + return gomatrixserverlib.RespEventAuth{}, err + } + if response.Err != nil { + return gomatrixserverlib.RespEventAuth{}, response.Err + } + return *response.Res, nil +} + func (h *httpFederationInternalAPI) QueryServerKeys( ctx context.Context, req *api.QueryServerKeysRequest, res *api.QueryServerKeysResponse, ) error { diff --git a/federationapi/inthttp/server.go b/federationapi/inthttp/server.go index 7133eddd0..8d193d9c9 100644 --- a/federationapi/inthttp/server.go +++ b/federationapi/inthttp/server.go @@ -241,6 +241,34 @@ func AddRoutes(intAPI api.FederationInternalAPI, internalAPIMux *mux.Router) { return util.JSONResponse{Code: http.StatusOK, JSON: request} }), ) + internalAPIMux.Handle( + FederationAPILookupMissingEventsPath, + httputil.MakeInternalAPI("LookupMissingEvents", func(req *http.Request) util.JSONResponse { + var request lookupMissingEvents + if err := json.NewDecoder(req.Body).Decode(&request); err != nil { + return util.MessageResponse(http.StatusBadRequest, err.Error()) + } + res, err := intAPI.LookupMissingEvents(req.Context(), request.S, request.RoomID, request.Missing, request.RoomVersion) + if err != nil { + ferr, ok := err.(*api.FederationClientError) + if ok { + request.Err = ferr + } else { + request.Err = &api.FederationClientError{ + Err: err.Error(), + } + } + } + for _, event := range res.Events { + js, err := json.Marshal(event) + if err != nil { + return util.MessageResponse(http.StatusInternalServerError, err.Error()) + } + request.Res.Events = append(request.Res.Events, js) + } + return util.JSONResponse{Code: http.StatusOK, JSON: request} + }), + ) internalAPIMux.Handle( FederationAPIGetEventPath, httputil.MakeInternalAPI("GetEvent", func(req *http.Request) util.JSONResponse { @@ -263,6 +291,28 @@ func AddRoutes(intAPI api.FederationInternalAPI, internalAPIMux *mux.Router) { return util.JSONResponse{Code: http.StatusOK, JSON: request} }), ) + internalAPIMux.Handle( + FederationAPIGetEventAuthPath, + httputil.MakeInternalAPI("GetEventAuth", func(req *http.Request) util.JSONResponse { + var request getEventAuth + if err := json.NewDecoder(req.Body).Decode(&request); err != nil { + return util.MessageResponse(http.StatusBadRequest, err.Error()) + } + res, err := intAPI.GetEventAuth(req.Context(), request.S, request.RoomVersion, request.RoomID, request.EventID) + if err != nil { + ferr, ok := err.(*api.FederationClientError) + if ok { + request.Err = ferr + } else { + request.Err = &api.FederationClientError{ + Err: err.Error(), + } + } + } + request.Res = &res + return util.JSONResponse{Code: http.StatusOK, JSON: request} + }), + ) internalAPIMux.Handle( FederationAPIQueryServerKeysPath, httputil.MakeInternalAPI("QueryServerKeys", func(req *http.Request) util.JSONResponse { diff --git a/federationapi/routing/eventauth.go b/federationapi/routing/eventauth.go index 34eaad1c5..d92b66f4b 100644 --- a/federationapi/routing/eventauth.go +++ b/federationapi/routing/eventauth.go @@ -16,6 +16,7 @@ import ( "context" "net/http" + "github.com/matrix-org/dendrite/clientapi/jsonerror" "github.com/matrix-org/dendrite/roomserver/api" "github.com/matrix-org/gomatrixserverlib" "github.com/matrix-org/util" @@ -29,15 +30,42 @@ func GetEventAuth( roomID string, eventID string, ) util.JSONResponse { - // TODO: Optimisation: we shouldn't be querying all the room state - // that is in state.StateEvents - we just ignore it. - state, err := getState(ctx, request, rsAPI, roomID, eventID) + event, resErr := fetchEvent(ctx, rsAPI, eventID) + if resErr != nil { + return *resErr + } + + if event.RoomID() != roomID { + return util.JSONResponse{Code: http.StatusNotFound, JSON: jsonerror.NotFound("event does not belong to this room")} + } + resErr = allowedToSeeEvent(ctx, request.Origin(), rsAPI, eventID) + if resErr != nil { + return *resErr + } + + var response api.QueryStateAndAuthChainResponse + err := rsAPI.QueryStateAndAuthChain( + ctx, + &api.QueryStateAndAuthChainRequest{ + RoomID: roomID, + PrevEventIDs: []string{eventID}, + AuthEventIDs: event.AuthEventIDs(), + OnlyFetchAuthChain: true, + }, + &response, + ) if err != nil { - return *err + return util.ErrorResponse(err) + } + + if !response.RoomExists { + return util.JSONResponse{Code: http.StatusNotFound, JSON: nil} } return util.JSONResponse{ Code: http.StatusOK, - JSON: gomatrixserverlib.RespEventAuth{AuthEvents: state.AuthEvents}, + JSON: gomatrixserverlib.RespEventAuth{ + AuthEvents: gomatrixserverlib.UnwrapEventHeaders(response.AuthChainEvents), + }, } } diff --git a/federationapi/routing/join.go b/federationapi/routing/join.go index f0e1ae0d6..7f8d31505 100644 --- a/federationapi/routing/join.go +++ b/federationapi/routing/join.go @@ -194,6 +194,12 @@ func SendJoin( JSON: jsonerror.BadJSON("No state key was provided in the join event."), } } + if !event.StateKeyEquals(event.Sender()) { + return util.JSONResponse{ + Code: http.StatusBadRequest, + JSON: jsonerror.BadJSON("Event state key must match the event sender."), + } + } // Check that the room ID is correct. if event.RoomID() != roomID { @@ -318,7 +324,6 @@ func SendJoin( { Kind: api.KindNew, Event: event.Headered(stateAndAuthChainResponse.RoomVersion), - AuthEventIDs: event.AuthEventIDs(), SendAsServer: string(cfg.Matrix.ServerName), TransactionID: nil, }, diff --git a/federationapi/routing/leave.go b/federationapi/routing/leave.go index 38f4ca76f..0b83f04ae 100644 --- a/federationapi/routing/leave.go +++ b/federationapi/routing/leave.go @@ -175,10 +175,16 @@ func SendLeave( } } - if event.StateKey() == nil { + if event.StateKey() == nil || event.StateKeyEquals("") { return util.JSONResponse{ Code: http.StatusBadRequest, - JSON: jsonerror.InvalidArgumentValue("missing state_key"), + JSON: jsonerror.BadJSON("No state key was provided in the leave event."), + } + } + if !event.StateKeyEquals(event.Sender()) { + return util.JSONResponse{ + Code: http.StatusBadRequest, + JSON: jsonerror.BadJSON("Event state key must match the event sender."), } } @@ -269,7 +275,6 @@ func SendLeave( { Kind: api.KindNew, Event: event.Headered(verRes.RoomVersion), - AuthEventIDs: event.AuthEventIDs(), SendAsServer: string(cfg.Matrix.ServerName), TransactionID: nil, }, diff --git a/federationapi/routing/publicrooms.go b/federationapi/routing/publicrooms.go index 5b9be8807..a253f86eb 100644 --- a/federationapi/routing/publicrooms.go +++ b/federationapi/routing/publicrooms.go @@ -133,8 +133,6 @@ func fillInRooms(ctx context.Context, roomIDs []string, rsAPI roomserverAPI.Room util.GetLogger(ctx).WithError(err).Error("QueryBulkStateContent failed") return nil, err } - util.GetLogger(ctx).Infof("room IDs: %+v", roomIDs) - util.GetLogger(ctx).Infof("State res: %+v", stateRes.Rooms) chunk := make([]gomatrixserverlib.PublicRoom, len(roomIDs)) i := 0 for roomID, data := range stateRes.Rooms { diff --git a/federationapi/routing/send.go b/federationapi/routing/send.go index 4b5f0d660..dbfd3ff92 100644 --- a/federationapi/routing/send.go +++ b/federationapi/routing/send.go @@ -17,7 +17,6 @@ package routing import ( "context" "encoding/json" - "errors" "fmt" "net/http" "sync" @@ -34,7 +33,6 @@ import ( "github.com/matrix-org/util" "github.com/prometheus/client_golang/prometheus" "github.com/sirupsen/logrus" - "go.uber.org/atomic" ) const ( @@ -72,84 +70,15 @@ var ( Help: "Number of incoming EDUs from remote servers", }, ) - processEventSummary = prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Namespace: "dendrite", - Subsystem: "federationapi", - Name: "process_event", - Help: "How long it takes to process an incoming event and what work had to be done for it", - }, - []string{"work", "outcome"}, - ) ) func init() { prometheus.MustRegister( - pduCountTotal, eduCountTotal, processEventSummary, + pduCountTotal, eduCountTotal, ) } -type sendFIFOQueue struct { - tasks []*inputTask - count int - mutex sync.Mutex - notifs chan struct{} -} - -func newSendFIFOQueue() *sendFIFOQueue { - q := &sendFIFOQueue{ - notifs: make(chan struct{}, 1), - } - return q -} - -func (q *sendFIFOQueue) push(frame *inputTask) { - q.mutex.Lock() - defer q.mutex.Unlock() - q.tasks = append(q.tasks, frame) - q.count++ - select { - case q.notifs <- struct{}{}: - default: - } -} - -// pop returns the first item of the queue, if there is one. -// The second return value will indicate if a task was returned. -func (q *sendFIFOQueue) pop() (*inputTask, bool) { - q.mutex.Lock() - defer q.mutex.Unlock() - if q.count == 0 { - return nil, false - } - frame := q.tasks[0] - q.tasks[0] = nil - q.tasks = q.tasks[1:] - q.count-- - if q.count == 0 { - // Force a GC of the underlying array, since it might have - // grown significantly if the queue was hammered for some reason - q.tasks = nil - } - return frame, true -} - -type inputTask struct { - ctx context.Context - t *txnReq - event *gomatrixserverlib.Event - wg *sync.WaitGroup - err error // written back by worker, only safe to read when all tasks are done - duration time.Duration // written back by worker, only safe to read when all tasks are done -} - -type inputWorker struct { - running atomic.Bool - input *sendFIFOQueue -} - var inFlightTxnsPerOrigin sync.Map // transaction ID -> chan util.JSONResponse -var inputWorkers sync.Map // room ID -> *inputWorker // Send implements /_matrix/federation/v1/send/{txnID} func Send( @@ -201,8 +130,6 @@ func Send( eduAPI: eduAPI, keys: keys, federation: federation, - hadEvents: make(map[string]bool), - haveEvents: make(map[string]*gomatrixserverlib.HeaderedEvent), servers: servers, keyAPI: keyAPI, roomsMu: mu, @@ -237,7 +164,7 @@ func Send( util.GetLogger(httpReq.Context()).Infof("Received transaction %q from %q containing %d PDUs, %d EDUs", txnID, request.Origin(), len(t.PDUs), len(t.EDUs)) - resp, jsonErr := t.processTransaction(context.Background()) + resp, jsonErr := t.processTransaction(httpReq.Context()) if jsonErr != nil { util.GetLogger(httpReq.Context()).WithField("jsonErr", jsonErr).Error("t.processTransaction failed") return *jsonErr @@ -263,22 +190,7 @@ type txnReq struct { keys gomatrixserverlib.JSONVerifier federation txnFederationClient roomsMu *internal.MutexByRoom - // something that can tell us about which servers are in a room right now - servers federationAPI.ServersInRoomProvider - // a list of events from the auth and prev events which we already had - hadEvents map[string]bool - hadEventsMutex sync.Mutex - // local cache of events for auth checks, etc - this may include events - // which the roomserver is unaware of. - haveEvents map[string]*gomatrixserverlib.HeaderedEvent - haveEventsMutex sync.Mutex - work string // metrics -} - -func (t *txnReq) hadEvent(eventID string, had bool) { - t.hadEventsMutex.Lock() - defer t.hadEventsMutex.Unlock() - t.hadEvents[eventID] = had + servers federationAPI.ServersInRoomProvider } // A subset of FederationClient functionality that txn requires. Useful for testing. @@ -293,9 +205,28 @@ type txnFederationClient interface { } func (t *txnReq) processTransaction(ctx context.Context) (*gomatrixserverlib.RespSend, *util.JSONResponse) { - results := make(map[string]gomatrixserverlib.PDUResult) var wg sync.WaitGroup - var tasks []*inputTask + wg.Add(1) + go func() { + defer wg.Done() + t.processEDUs(ctx) + }() + + results := make(map[string]gomatrixserverlib.PDUResult) + roomVersions := make(map[string]gomatrixserverlib.RoomVersion) + getRoomVersion := func(roomID string) gomatrixserverlib.RoomVersion { + if v, ok := roomVersions[roomID]; ok { + return v + } + verReq := api.QueryRoomVersionForRoomRequest{RoomID: roomID} + verRes := api.QueryRoomVersionForRoomResponse{} + if err := t.rsAPI.QueryRoomVersionForRoom(ctx, &verReq, &verRes); err != nil { + util.GetLogger(ctx).WithError(err).Warn("Transaction: Failed to query room version for room", verReq.RoomID) + return "" + } + roomVersions[roomID] = verRes.RoomVersion + return verRes.RoomVersion + } for _, pdu := range t.PDUs { pduCountTotal.WithLabelValues("total").Inc() @@ -308,15 +239,8 @@ func (t *txnReq) processTransaction(ctx context.Context) (*gomatrixserverlib.Res // failure in the PDU results continue } - verReq := api.QueryRoomVersionForRoomRequest{RoomID: header.RoomID} - verRes := api.QueryRoomVersionForRoomResponse{} - if err := t.rsAPI.QueryRoomVersionForRoom(ctx, &verReq, &verRes); err != nil { - util.GetLogger(ctx).WithError(err).Warn("Transaction: Failed to query room version for room", verReq.RoomID) - // We don't know the event ID at this point so we can't return the - // failure in the PDU results - continue - } - event, err := gomatrixserverlib.NewEventFromUntrustedJSON(pdu, verRes.RoomVersion) + roomVersion := getRoomVersion(header.RoomID) + event, err := gomatrixserverlib.NewEventFromUntrustedJSON(pdu, roomVersion) if err != nil { if _, ok := err.(gomatrixserverlib.BadJSONError); ok { // Room version 6 states that homeservers should strictly enforce canonical JSON @@ -347,114 +271,35 @@ func (t *txnReq) processTransaction(ctx context.Context) (*gomatrixserverlib.Res } continue } - v, _ := inputWorkers.LoadOrStore(event.RoomID(), &inputWorker{ - input: newSendFIFOQueue(), - }) - worker := v.(*inputWorker) - wg.Add(1) - task := &inputTask{ - ctx: ctx, - t: t, - event: event, - wg: &wg, - } - tasks = append(tasks, task) - worker.input.push(task) - if worker.running.CAS(false, true) { - go worker.run() - } - } - t.processEDUs(ctx) - wg.Wait() - - for _, task := range tasks { - if task.err != nil { - results[task.event.EventID()] = gomatrixserverlib.PDUResult{ - Error: task.err.Error(), + // pass the event to the roomserver which will do auth checks + // If the event fail auth checks, gmsl.NotAllowed error will be returned which we be silently + // discarded by the caller of this function + if err = api.SendEvents( + ctx, + t.rsAPI, + api.KindNew, + []*gomatrixserverlib.HeaderedEvent{ + event.Headered(roomVersion), + }, + t.Origin, + api.DoNotSendToOtherServers, + nil, + true, + ); err != nil { + util.GetLogger(ctx).WithError(err).Warnf("Transaction: Couldn't submit event %q to input queue: %s", event.EventID(), err) + results[event.EventID()] = gomatrixserverlib.PDUResult{ + Error: err.Error(), } - } else { - results[task.event.EventID()] = gomatrixserverlib.PDUResult{} - } - } - - if c := len(results); c > 0 { - util.GetLogger(ctx).Infof("Processed %d PDUs from %v in transaction %q", c, t.Origin, t.TransactionID) - } - return &gomatrixserverlib.RespSend{PDUs: results}, nil -} - -func (t *inputWorker) run() { - defer t.running.Store(false) - for { - task, ok := t.input.pop() - if !ok { - return - } - if task == nil { continue } - func() { - defer task.wg.Done() - select { - case <-task.ctx.Done(): - task.err = context.DeadlineExceeded - pduCountTotal.WithLabelValues("expired").Inc() - return - default: - evStart := time.Now() - // TODO: Is 5 minutes too long? - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) - task.err = task.t.processEvent(ctx, task.event) - cancel() - task.duration = time.Since(evStart) - if err := task.err; err != nil { - switch err.(type) { - case *gomatrixserverlib.NotAllowed: - processEventSummary.WithLabelValues(task.t.work, MetricsOutcomeRejected).Observe( - float64(time.Since(evStart).Nanoseconds()) / 1000., - ) - util.GetLogger(task.ctx).WithError(err).WithField("event_id", task.event.EventID()).WithField("rejected", true).Warn( - "Failed to process incoming federation event, skipping", - ) - task.err = nil // make "rejected" failures silent - default: - processEventSummary.WithLabelValues(task.t.work, MetricsOutcomeFail).Observe( - float64(time.Since(evStart).Nanoseconds()) / 1000., - ) - util.GetLogger(task.ctx).WithError(err).WithField("event_id", task.event.EventID()).WithField("rejected", false).Warn( - "Failed to process incoming federation event, skipping", - ) - } - } else { - pduCountTotal.WithLabelValues("success").Inc() - processEventSummary.WithLabelValues(task.t.work, MetricsOutcomeOK).Observe( - float64(time.Since(evStart).Nanoseconds()) / 1000., - ) - } - } - }() + + results[event.EventID()] = gomatrixserverlib.PDUResult{} + pduCountTotal.WithLabelValues("success").Inc() } -} -type roomNotFoundError struct { - roomID string -} -type verifySigError struct { - eventID string - err error -} -type missingPrevEventsError struct { - eventID string - err error -} - -func (e roomNotFoundError) Error() string { return fmt.Sprintf("room %q not found", e.roomID) } -func (e verifySigError) Error() string { - return fmt.Sprintf("unable to verify signature of event %q: %s", e.eventID, e.err) -} -func (e missingPrevEventsError) Error() string { - return fmt.Sprintf("unable to get prev_events for event %q: %s", e.eventID, e.err) + wg.Wait() + return &gomatrixserverlib.RespSend{PDUs: results}, nil } func (t *txnReq) processEDUs(ctx context.Context) { @@ -598,803 +443,3 @@ func (t *txnReq) processDeviceListUpdate(ctx context.Context, e gomatrixserverli util.GetLogger(ctx).WithError(inputRes.Error).WithField("user_id", payload.UserID).Error("failed to InputDeviceListUpdate") } } - -func (t *txnReq) getServers(ctx context.Context, roomID string, event *gomatrixserverlib.Event) []gomatrixserverlib.ServerName { - // The server that sent us the event should be sufficient to tell us about missing - // prev and auth events. - servers := []gomatrixserverlib.ServerName{t.Origin} - // If the event origin is different to the transaction origin then we can use - // this as a last resort. The origin server that created the event would have - // had to know the auth and prev events. - if event != nil { - if origin := event.Origin(); origin != t.Origin { - servers = append(servers, origin) - } - } - // If a specific room-to-server provider exists then use that. This will primarily - // be used for the P2P demos. - if t.servers != nil { - servers = append(servers, t.servers.GetServersForRoom(ctx, roomID, event)...) - } - return servers -} - -func (t *txnReq) processEvent(ctx context.Context, e *gomatrixserverlib.Event) error { - logger := util.GetLogger(ctx).WithField("event_id", e.EventID()).WithField("room_id", e.RoomID()) - t.work = "" // reset from previous event - - // Ask the roomserver if we know about the room and/or if we're joined - // to it. If we aren't then we won't bother processing the event. - joinedReq := api.QueryServerJoinedToRoomRequest{ - RoomID: e.RoomID(), - } - var joinedRes api.QueryServerJoinedToRoomResponse - if err := t.rsAPI.QueryServerJoinedToRoom(ctx, &joinedReq, &joinedRes); err != nil { - return fmt.Errorf("t.rsAPI.QueryServerJoinedToRoom: %w", err) - } - - if !joinedRes.RoomExists || !joinedRes.IsInRoom { - // We don't believe we're a member of this room, therefore there's - // no point in wasting work trying to figure out what to do with - // missing auth or prev events. Drop the event. - return roomNotFoundError{e.RoomID()} - } - - // Work out if the roomserver knows everything it needs to know to auth - // the event. This includes the prev_events and auth_events. - // NOTE! This is going to include prev_events that have an empty state - // snapshot. This is because we will need to re-request the event, and - // it's /state_ids, in order for it to exist in the roomserver correctly - // before the roomserver tries to work out - stateReq := api.QueryMissingAuthPrevEventsRequest{ - RoomID: e.RoomID(), - AuthEventIDs: e.AuthEventIDs(), - PrevEventIDs: e.PrevEventIDs(), - } - var stateResp api.QueryMissingAuthPrevEventsResponse - if err := t.rsAPI.QueryMissingAuthPrevEvents(ctx, &stateReq, &stateResp); err != nil { - return fmt.Errorf("t.rsAPI.QueryMissingAuthPrevEvents: %w", err) - } - - // Prepare a map of all the events we already had before this point, so - // that we don't send them to the roomserver again. - for _, eventID := range append(e.AuthEventIDs(), e.PrevEventIDs()...) { - t.hadEvent(eventID, true) - } - for _, eventID := range append(stateResp.MissingAuthEventIDs, stateResp.MissingPrevEventIDs...) { - t.hadEvent(eventID, false) - } - - if len(stateResp.MissingAuthEventIDs) > 0 { - t.work = MetricsWorkMissingAuthEvents - logger.Infof("Event refers to %d unknown auth_events", len(stateResp.MissingAuthEventIDs)) - if err := t.retrieveMissingAuthEvents(ctx, e, &stateResp); err != nil { - return fmt.Errorf("t.retrieveMissingAuthEvents: %w", err) - } - } - - if len(stateResp.MissingPrevEventIDs) > 0 { - t.work = MetricsWorkMissingPrevEvents - logger.Infof("Event refers to %d unknown prev_events", len(stateResp.MissingPrevEventIDs)) - return t.processEventWithMissingState(ctx, e, stateResp.RoomVersion) - } - t.work = MetricsWorkDirect - - // pass the event to the roomserver which will do auth checks - // If the event fail auth checks, gmsl.NotAllowed error will be returned which we be silently - // discarded by the caller of this function - return api.SendEvents( - context.Background(), - t.rsAPI, - api.KindNew, - []*gomatrixserverlib.HeaderedEvent{ - e.Headered(stateResp.RoomVersion), - }, - api.DoNotSendToOtherServers, - nil, - ) -} - -func (t *txnReq) retrieveMissingAuthEvents( - ctx context.Context, e *gomatrixserverlib.Event, stateResp *api.QueryMissingAuthPrevEventsResponse, -) error { - logger := util.GetLogger(ctx).WithField("event_id", e.EventID()).WithField("room_id", e.RoomID()) - - missingAuthEvents := make(map[string]struct{}) - for _, missingAuthEventID := range stateResp.MissingAuthEventIDs { - missingAuthEvents[missingAuthEventID] = struct{}{} - } - -withNextEvent: - for missingAuthEventID := range missingAuthEvents { - withNextServer: - for _, server := range t.getServers(ctx, e.RoomID(), e) { - logger.Infof("Retrieving missing auth event %q from %q", missingAuthEventID, server) - tx, err := t.federation.GetEvent(ctx, server, missingAuthEventID) - if err != nil { - logger.WithError(err).Warnf("Failed to retrieve auth event %q", missingAuthEventID) - if errors.Is(err, context.DeadlineExceeded) { - return err - } - continue withNextServer - } - ev, err := gomatrixserverlib.NewEventFromUntrustedJSON(tx.PDUs[0], stateResp.RoomVersion) - if err != nil { - logger.WithError(err).Warnf("Failed to unmarshal auth event %q", missingAuthEventID) - continue withNextServer - } - if err = api.SendInputRoomEvents( - context.Background(), - t.rsAPI, - []api.InputRoomEvent{ - { - Kind: api.KindOutlier, - Event: ev.Headered(stateResp.RoomVersion), - AuthEventIDs: ev.AuthEventIDs(), - SendAsServer: api.DoNotSendToOtherServers, - }, - }, - ); err != nil { - return fmt.Errorf("api.SendEvents: %w", err) - } - t.hadEvent(ev.EventID(), true) // if the roomserver didn't know about the event before, it does now - t.cacheAndReturn(ev.Headered(stateResp.RoomVersion)) - delete(missingAuthEvents, missingAuthEventID) - continue withNextEvent - } - } - - if missing := len(missingAuthEvents); missing > 0 { - return fmt.Errorf("event refers to %d auth_events which we failed to fetch", missing) - } - return nil -} - -func checkAllowedByState(e *gomatrixserverlib.Event, stateEvents []*gomatrixserverlib.Event) error { - authUsingState := gomatrixserverlib.NewAuthEvents(nil) - for i := range stateEvents { - err := authUsingState.AddEvent(stateEvents[i]) - if err != nil { - return err - } - } - return gomatrixserverlib.Allowed(e, &authUsingState) -} - -func (t *txnReq) processEventWithMissingState( - ctx context.Context, e *gomatrixserverlib.Event, roomVersion gomatrixserverlib.RoomVersion, -) error { - // We are missing the previous events for this events. - // This means that there is a gap in our view of the history of the - // room. There two ways that we can handle such a gap: - // 1) We can fill in the gap using /get_missing_events - // 2) We can leave the gap and request the state of the room at - // this event from the remote server using either /state_ids - // or /state. - // Synapse will attempt to do 1 and if that fails or if the gap is - // too large then it will attempt 2. - // Synapse will use /state_ids if possible since usually the state - // is largely unchanged and it is more efficient to fetch a list of - // event ids and then use /event to fetch the individual events. - // However not all version of synapse support /state_ids so you may - // need to fallback to /state. - - // Attempt to fill in the gap using /get_missing_events - // This will either: - // - fill in the gap completely then process event `e` returning no backwards extremity - // - fail to fill in the gap and tell us to terminate the transaction err=not nil - // - fail to fill in the gap and tell us to fetch state at the new backwards extremity, and to not terminate the transaction - newEvents, err := t.getMissingEvents(ctx, e, roomVersion) - if err != nil { - return err - } - if len(newEvents) == 0 { - return nil - } - - backwardsExtremity := newEvents[0] - newEvents = newEvents[1:] - - type respState struct { - // A snapshot is considered trustworthy if it came from our own roomserver. - // That's because the state will have been through state resolution once - // already in QueryStateAfterEvent. - trustworthy bool - *gomatrixserverlib.RespState - } - - // at this point we know we're going to have a gap: we need to work out the room state at the new backwards extremity. - // Therefore, we cannot just query /state_ids with this event to get the state before. Instead, we need to query - // the state AFTER all the prev_events for this event, then apply state resolution to that to get the state before the event. - var states []*respState - for _, prevEventID := range backwardsExtremity.PrevEventIDs() { - // Look up what the state is after the backward extremity. This will either - // come from the roomserver, if we know all the required events, or it will - // come from a remote server via /state_ids if not. - prevState, trustworthy, lerr := t.lookupStateAfterEvent(ctx, roomVersion, backwardsExtremity.RoomID(), prevEventID) - if lerr != nil { - util.GetLogger(ctx).WithError(lerr).Errorf("Failed to lookup state after prev_event: %s", prevEventID) - return lerr - } - // Append the state onto the collected state. We'll run this through the - // state resolution next. - states = append(states, &respState{trustworthy, prevState}) - } - - // Now that we have collected all of the state from the prev_events, we'll - // run the state through the appropriate state resolution algorithm for the - // room if needed. This does a couple of things: - // 1. Ensures that the state is deduplicated fully for each state-key tuple - // 2. Ensures that we pick the latest events from both sets, in the case that - // one of the prev_events is quite a bit older than the others - resolvedState := &gomatrixserverlib.RespState{} - switch len(states) { - case 0: - extremityIsCreate := backwardsExtremity.Type() == gomatrixserverlib.MRoomCreate && backwardsExtremity.StateKeyEquals("") - if !extremityIsCreate { - // There are no previous states and this isn't the beginning of the - // room - this is an error condition! - util.GetLogger(ctx).Errorf("Failed to lookup any state after prev_events") - return fmt.Errorf("expected %d states but got %d", len(backwardsExtremity.PrevEventIDs()), len(states)) - } - case 1: - // There's only one previous state - if it's trustworthy (came from a - // local state snapshot which will already have been through state res), - // use it as-is. There's no point in resolving it again. - if states[0].trustworthy { - resolvedState = states[0].RespState - break - } - // Otherwise, if it isn't trustworthy (came from federation), run it through - // state resolution anyway for safety, in case there are duplicates. - fallthrough - default: - respStates := make([]*gomatrixserverlib.RespState, len(states)) - for i := range states { - respStates[i] = states[i].RespState - } - // There's more than one previous state - run them all through state res - t.roomsMu.Lock(e.RoomID()) - resolvedState, err = t.resolveStatesAndCheck(ctx, roomVersion, respStates, backwardsExtremity) - t.roomsMu.Unlock(e.RoomID()) - if err != nil { - util.GetLogger(ctx).WithError(err).Errorf("Failed to resolve state conflicts for event %s", backwardsExtremity.EventID()) - return err - } - } - - // First of all, send the backward extremity into the roomserver with the - // newly resolved state. This marks the "oldest" point in the backfill and - // sets the baseline state for any new events after this. We'll make a - // copy of the hadEvents map so that it can be taken downstream without - // worrying about concurrent map reads/writes, since t.hadEvents is meant - // to be protected by a mutex. - hadEvents := map[string]bool{} - t.hadEventsMutex.Lock() - for k, v := range t.hadEvents { - hadEvents[k] = v - } - t.hadEventsMutex.Unlock() - err = api.SendEventWithState( - context.Background(), - t.rsAPI, - api.KindOld, - resolvedState, - backwardsExtremity.Headered(roomVersion), - hadEvents, - ) - if err != nil { - return fmt.Errorf("api.SendEventWithState: %w", err) - } - - // Then send all of the newer backfilled events, of which will all be newer - // than the backward extremity, into the roomserver without state. This way - // they will automatically fast-forward based on the room state at the - // extremity in the last step. - headeredNewEvents := make([]*gomatrixserverlib.HeaderedEvent, len(newEvents)) - for i, newEvent := range newEvents { - headeredNewEvents[i] = newEvent.Headered(roomVersion) - } - if err = api.SendEvents( - context.Background(), - t.rsAPI, - api.KindOld, - append(headeredNewEvents, e.Headered(roomVersion)), - api.DoNotSendToOtherServers, - nil, - ); err != nil { - return fmt.Errorf("api.SendEvents: %w", err) - } - - return nil -} - -// lookupStateAfterEvent returns the room state after `eventID`, which is the state before eventID with the state of `eventID` (if it's a state event) -// added into the mix. -func (t *txnReq) lookupStateAfterEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, roomID, eventID string) (*gomatrixserverlib.RespState, bool, error) { - // try doing all this locally before we resort to querying federation - respState := t.lookupStateAfterEventLocally(ctx, roomID, eventID) - if respState != nil { - return respState, true, nil - } - - respState, err := t.lookupStateBeforeEvent(ctx, roomVersion, roomID, eventID) - if err != nil { - return nil, false, fmt.Errorf("t.lookupStateBeforeEvent: %w", err) - } - - // fetch the event we're missing and add it to the pile - h, err := t.lookupEvent(ctx, roomVersion, roomID, eventID, false) - switch err.(type) { - case verifySigError: - return respState, false, nil - case nil: - // do nothing - default: - return nil, false, fmt.Errorf("t.lookupEvent: %w", err) - } - h = t.cacheAndReturn(h) - if h.StateKey() != nil { - addedToState := false - for i := range respState.StateEvents { - se := respState.StateEvents[i] - if se.Type() == h.Type() && se.StateKeyEquals(*h.StateKey()) { - respState.StateEvents[i] = h.Unwrap() - addedToState = true - break - } - } - if !addedToState { - respState.StateEvents = append(respState.StateEvents, h.Unwrap()) - } - } - - return respState, false, nil -} - -func (t *txnReq) cacheAndReturn(ev *gomatrixserverlib.HeaderedEvent) *gomatrixserverlib.HeaderedEvent { - t.haveEventsMutex.Lock() - defer t.haveEventsMutex.Unlock() - if cached, exists := t.haveEvents[ev.EventID()]; exists { - return cached - } - t.haveEvents[ev.EventID()] = ev - return ev -} - -func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, eventID string) *gomatrixserverlib.RespState { - var res api.QueryStateAfterEventsResponse - err := t.rsAPI.QueryStateAfterEvents(ctx, &api.QueryStateAfterEventsRequest{ - RoomID: roomID, - PrevEventIDs: []string{eventID}, - }, &res) - if err != nil || !res.PrevEventsExist { - util.GetLogger(ctx).WithField("room_id", roomID).WithError(err).Warnf("failed to query state after %s locally, prev exists=%v", eventID, res.PrevEventsExist) - return nil - } - stateEvents := make([]*gomatrixserverlib.HeaderedEvent, len(res.StateEvents)) - for i, ev := range res.StateEvents { - // set the event from the haveEvents cache - this means we will share pointers with other prev_event branches for this - // processEvent request, which is better for memory. - stateEvents[i] = t.cacheAndReturn(ev) - t.hadEvent(ev.EventID(), true) - } - // we should never access res.StateEvents again so we delete it here to make GC faster - res.StateEvents = nil - - var authEvents []*gomatrixserverlib.Event - missingAuthEvents := map[string]bool{} - for _, ev := range stateEvents { - t.haveEventsMutex.Lock() - for _, ae := range ev.AuthEventIDs() { - if aev, ok := t.haveEvents[ae]; ok { - authEvents = append(authEvents, aev.Unwrap()) - } else { - missingAuthEvents[ae] = true - } - } - t.haveEventsMutex.Unlock() - } - // QueryStateAfterEvents does not return the auth events, so fetch them now. We know the roomserver has them else it wouldn't - // have stored the event. - if len(missingAuthEvents) > 0 { - var missingEventList []string - for evID := range missingAuthEvents { - missingEventList = append(missingEventList, evID) - } - queryReq := api.QueryEventsByIDRequest{ - EventIDs: missingEventList, - } - util.GetLogger(ctx).WithField("count", len(missingEventList)).Infof("Fetching missing auth events") - var queryRes api.QueryEventsByIDResponse - if err = t.rsAPI.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil { - return nil - } - for i, ev := range queryRes.Events { - authEvents = append(authEvents, t.cacheAndReturn(queryRes.Events[i]).Unwrap()) - t.hadEvent(ev.EventID(), true) - } - queryRes.Events = nil - } - - return &gomatrixserverlib.RespState{ - StateEvents: gomatrixserverlib.UnwrapEventHeaders(stateEvents), - AuthEvents: authEvents, - } -} - -// lookuptStateBeforeEvent returns the room state before the event e, which is just /state_ids and/or /state depending on what -// the server supports. -func (t *txnReq) lookupStateBeforeEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, roomID, eventID string) ( - *gomatrixserverlib.RespState, error) { - - // Attempt to fetch the missing state using /state_ids and /events - return t.lookupMissingStateViaStateIDs(ctx, roomID, eventID, roomVersion) -} - -func (t *txnReq) resolveStatesAndCheck(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, states []*gomatrixserverlib.RespState, backwardsExtremity *gomatrixserverlib.Event) (*gomatrixserverlib.RespState, error) { - var authEventList []*gomatrixserverlib.Event - var stateEventList []*gomatrixserverlib.Event - for _, state := range states { - authEventList = append(authEventList, state.AuthEvents...) - stateEventList = append(stateEventList, state.StateEvents...) - } - resolvedStateEvents, err := gomatrixserverlib.ResolveConflicts(roomVersion, stateEventList, authEventList) - if err != nil { - return nil, err - } - // apply the current event -retryAllowedState: - if err = checkAllowedByState(backwardsExtremity, resolvedStateEvents); err != nil { - switch missing := err.(type) { - case gomatrixserverlib.MissingAuthEventError: - h, err2 := t.lookupEvent(ctx, roomVersion, backwardsExtremity.RoomID(), missing.AuthEventID, true) - switch err2.(type) { - case verifySigError: - return &gomatrixserverlib.RespState{ - AuthEvents: authEventList, - StateEvents: resolvedStateEvents, - }, nil - case nil: - // do nothing - default: - return nil, fmt.Errorf("missing auth event %s and failed to look it up: %w", missing.AuthEventID, err2) - } - util.GetLogger(ctx).Infof("fetched event %s", missing.AuthEventID) - resolvedStateEvents = append(resolvedStateEvents, h.Unwrap()) - goto retryAllowedState - default: - } - return nil, err - } - return &gomatrixserverlib.RespState{ - AuthEvents: authEventList, - StateEvents: resolvedStateEvents, - }, nil -} - -func (t *txnReq) getMissingEvents(ctx context.Context, e *gomatrixserverlib.Event, roomVersion gomatrixserverlib.RoomVersion) (newEvents []*gomatrixserverlib.Event, err error) { - logger := util.GetLogger(ctx).WithField("event_id", e.EventID()).WithField("room_id", e.RoomID()) - needed := gomatrixserverlib.StateNeededForAuth([]*gomatrixserverlib.Event{e}) - // query latest events (our trusted forward extremities) - req := api.QueryLatestEventsAndStateRequest{ - RoomID: e.RoomID(), - StateToFetch: needed.Tuples(), - } - var res api.QueryLatestEventsAndStateResponse - if err = t.rsAPI.QueryLatestEventsAndState(ctx, &req, &res); err != nil { - logger.WithError(err).Warn("Failed to query latest events") - return nil, err - } - latestEvents := make([]string, len(res.LatestEvents)) - for i, ev := range res.LatestEvents { - latestEvents[i] = res.LatestEvents[i].EventID - t.hadEvent(ev.EventID, true) - } - - var missingResp *gomatrixserverlib.RespMissingEvents - servers := t.getServers(ctx, e.RoomID(), e) - for _, server := range servers { - var m gomatrixserverlib.RespMissingEvents - if m, err = t.federation.LookupMissingEvents(ctx, server, e.RoomID(), gomatrixserverlib.MissingEvents{ - Limit: 20, - // The latest event IDs that the sender already has. These are skipped when retrieving the previous events of latest_events. - EarliestEvents: latestEvents, - // The event IDs to retrieve the previous events for. - LatestEvents: []string{e.EventID()}, - }, roomVersion); err == nil { - missingResp = &m - break - } else { - logger.WithError(err).Errorf("%s pushed us an event but %q did not respond to /get_missing_events", t.Origin, server) - if errors.Is(err, context.DeadlineExceeded) { - break - } - } - } - - if missingResp == nil { - logger.WithError(err).Errorf( - "%s pushed us an event but %d server(s) couldn't give us details about prev_events via /get_missing_events - dropping this event until it can", - t.Origin, len(servers), - ) - return nil, missingPrevEventsError{ - eventID: e.EventID(), - err: err, - } - } - - // security: how we handle failures depends on whether or not this event will become the new forward extremity for the room. - // There's 2 scenarios to consider: - // - Case A: We got pushed an event and are now fetching missing prev_events. (isInboundTxn=true) - // - Case B: We are fetching missing prev_events already and now fetching some more (isInboundTxn=false) - // In Case B, we know for sure that the event we are currently processing will not become the new forward extremity for the room, - // as it was called in response to an inbound txn which had it as a prev_event. - // In Case A, the event is a forward extremity, and could eventually become the _only_ forward extremity in the room. This is bad - // because it means we would trust the state at that event to be the state for the entire room, and allows rooms to be hijacked. - // https://github.com/matrix-org/synapse/pull/3456 - // https://github.com/matrix-org/synapse/blob/229eb81498b0fe1da81e9b5b333a0285acde9446/synapse/handlers/federation.py#L335 - // For now, we do not allow Case B, so reject the event. - logger.Infof("get_missing_events returned %d events", len(missingResp.Events)) - - // Make sure events from the missingResp are using the cache - missing events - // will be added and duplicates will be removed. - for i, ev := range missingResp.Events { - missingResp.Events[i] = t.cacheAndReturn(ev.Headered(roomVersion)).Unwrap() - } - - // topologically sort and sanity check that we are making forward progress - newEvents = gomatrixserverlib.ReverseTopologicalOrdering(missingResp.Events, gomatrixserverlib.TopologicalOrderByPrevEvents) - shouldHaveSomeEventIDs := e.PrevEventIDs() - hasPrevEvent := false -Event: - for _, pe := range shouldHaveSomeEventIDs { - for _, ev := range newEvents { - if ev.EventID() == pe { - hasPrevEvent = true - break Event - } - } - } - if !hasPrevEvent { - err = fmt.Errorf("called /get_missing_events but server %s didn't return any prev_events with IDs %v", t.Origin, shouldHaveSomeEventIDs) - logger.WithError(err).Errorf( - "%s pushed us an event but couldn't give us details about prev_events via /get_missing_events - dropping this event until it can", - t.Origin, - ) - return nil, missingPrevEventsError{ - eventID: e.EventID(), - err: err, - } - } - - return newEvents, nil -} - -func (t *txnReq) lookupMissingStateViaState(ctx context.Context, roomID, eventID string, roomVersion gomatrixserverlib.RoomVersion) ( - respState *gomatrixserverlib.RespState, err error) { - state, err := t.federation.LookupState(ctx, t.Origin, roomID, eventID, roomVersion) - if err != nil { - return nil, err - } - // Check that the returned state is valid. - if err := state.Check(ctx, t.keys, nil); err != nil { - return nil, err - } - // Cache the results of this state lookup and deduplicate anything we already - // have in the cache, freeing up memory. - for i, ev := range state.AuthEvents { - state.AuthEvents[i] = t.cacheAndReturn(ev.Headered(roomVersion)).Unwrap() - } - for i, ev := range state.StateEvents { - state.StateEvents[i] = t.cacheAndReturn(ev.Headered(roomVersion)).Unwrap() - } - return &state, nil -} - -func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, eventID string, roomVersion gomatrixserverlib.RoomVersion) ( - *gomatrixserverlib.RespState, error) { - util.GetLogger(ctx).WithField("room_id", roomID).Infof("lookupMissingStateViaStateIDs %s", eventID) - // fetch the state event IDs at the time of the event - stateIDs, err := t.federation.LookupStateIDs(ctx, t.Origin, roomID, eventID) - if err != nil { - return nil, err - } - // work out which auth/state IDs are missing - wantIDs := append(stateIDs.StateEventIDs, stateIDs.AuthEventIDs...) - missing := make(map[string]bool) - var missingEventList []string - t.haveEventsMutex.Lock() - for _, sid := range wantIDs { - if _, ok := t.haveEvents[sid]; !ok { - if !missing[sid] { - missing[sid] = true - missingEventList = append(missingEventList, sid) - } - } - } - t.haveEventsMutex.Unlock() - - // fetch as many as we can from the roomserver - queryReq := api.QueryEventsByIDRequest{ - EventIDs: missingEventList, - } - var queryRes api.QueryEventsByIDResponse - if err = t.rsAPI.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil { - return nil, err - } - for i, ev := range queryRes.Events { - queryRes.Events[i] = t.cacheAndReturn(queryRes.Events[i]) - t.hadEvent(ev.EventID(), true) - evID := queryRes.Events[i].EventID() - if missing[evID] { - delete(missing, evID) - } - } - queryRes.Events = nil // allow it to be GCed - - concurrentRequests := 8 - missingCount := len(missing) - util.GetLogger(ctx).WithField("room_id", roomID).WithField("event_id", eventID).Infof("lookupMissingStateViaStateIDs missing %d/%d events", missingCount, len(wantIDs)) - - // If over 50% of the auth/state events from /state_ids are missing - // then we'll just call /state instead, otherwise we'll just end up - // hammering the remote side with /event requests unnecessarily. - if missingCount > concurrentRequests && missingCount > len(wantIDs)/2 { - util.GetLogger(ctx).WithFields(logrus.Fields{ - "missing": missingCount, - "event_id": eventID, - "room_id": roomID, - "total_state": len(stateIDs.StateEventIDs), - "total_auth_events": len(stateIDs.AuthEventIDs), - }).Info("Fetching all state at event") - return t.lookupMissingStateViaState(ctx, roomID, eventID, roomVersion) - } - - if missingCount > 0 { - util.GetLogger(ctx).WithFields(logrus.Fields{ - "missing": missingCount, - "event_id": eventID, - "room_id": roomID, - "total_state": len(stateIDs.StateEventIDs), - "total_auth_events": len(stateIDs.AuthEventIDs), - "concurrent_requests": concurrentRequests, - }).Info("Fetching missing state at event") - - // Create a queue containing all of the missing event IDs that we want - // to retrieve. - pending := make(chan string, missingCount) - for missingEventID := range missing { - pending <- missingEventID - } - close(pending) - - // Define how many workers we should start to do this. - if missingCount < concurrentRequests { - concurrentRequests = missingCount - } - - // Create the wait group. - var fetchgroup sync.WaitGroup - fetchgroup.Add(concurrentRequests) - - // This is the only place where we'll write to t.haveEvents from - // multiple goroutines, and everywhere else is blocked on this - // synchronous function anyway. - var haveEventsMutex sync.Mutex - - // Define what we'll do in order to fetch the missing event ID. - fetch := func(missingEventID string) { - var h *gomatrixserverlib.HeaderedEvent - h, err = t.lookupEvent(ctx, roomVersion, roomID, missingEventID, false) - switch err.(type) { - case verifySigError: - return - case nil: - break - default: - util.GetLogger(ctx).WithFields(logrus.Fields{ - "event_id": missingEventID, - "room_id": roomID, - }).Info("Failed to fetch missing event") - return - } - haveEventsMutex.Lock() - t.cacheAndReturn(h) - haveEventsMutex.Unlock() - } - - // Create the worker. - worker := func(ch <-chan string) { - defer fetchgroup.Done() - for missingEventID := range ch { - fetch(missingEventID) - } - } - - // Start the workers. - for i := 0; i < concurrentRequests; i++ { - go worker(pending) - } - - // Wait for the workers to finish. - fetchgroup.Wait() - } - - resp, err := t.createRespStateFromStateIDs(stateIDs) - return resp, err -} - -func (t *txnReq) createRespStateFromStateIDs(stateIDs gomatrixserverlib.RespStateIDs) ( - *gomatrixserverlib.RespState, error) { // nolint:unparam - t.haveEventsMutex.Lock() - defer t.haveEventsMutex.Unlock() - - // create a RespState response using the response to /state_ids as a guide - respState := gomatrixserverlib.RespState{} - - for i := range stateIDs.StateEventIDs { - ev, ok := t.haveEvents[stateIDs.StateEventIDs[i]] - if !ok { - logrus.Warnf("Missing state event in createRespStateFromStateIDs: %s", stateIDs.StateEventIDs[i]) - continue - } - respState.StateEvents = append(respState.StateEvents, ev.Unwrap()) - } - for i := range stateIDs.AuthEventIDs { - ev, ok := t.haveEvents[stateIDs.AuthEventIDs[i]] - if !ok { - logrus.Warnf("Missing auth event in createRespStateFromStateIDs: %s", stateIDs.AuthEventIDs[i]) - continue - } - respState.AuthEvents = append(respState.AuthEvents, ev.Unwrap()) - } - // We purposefully do not do auth checks on the returned events, as they will still - // be processed in the exact same way, just as a 'rejected' event - // TODO: Add a field to HeaderedEvent to indicate if the event is rejected. - return &respState, nil -} - -func (t *txnReq) lookupEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, roomID, missingEventID string, localFirst bool) (*gomatrixserverlib.HeaderedEvent, error) { - if localFirst { - // fetch from the roomserver - queryReq := api.QueryEventsByIDRequest{ - EventIDs: []string{missingEventID}, - } - var queryRes api.QueryEventsByIDResponse - if err := t.rsAPI.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil { - util.GetLogger(ctx).Warnf("Failed to query roomserver for missing event %s: %s - falling back to remote", missingEventID, err) - } else if len(queryRes.Events) == 1 { - return queryRes.Events[0], nil - } - } - var event *gomatrixserverlib.Event - found := false - servers := t.getServers(ctx, roomID, nil) - for _, serverName := range servers { - txn, err := t.federation.GetEvent(ctx, serverName, missingEventID) - if err != nil || len(txn.PDUs) == 0 { - util.GetLogger(ctx).WithError(err).WithField("event_id", missingEventID).Warn("Failed to get missing /event for event ID") - if errors.Is(err, context.DeadlineExceeded) { - break - } - continue - } - event, err = gomatrixserverlib.NewEventFromUntrustedJSON(txn.PDUs[0], roomVersion) - if err != nil { - util.GetLogger(ctx).WithError(err).WithField("event_id", missingEventID).Warnf("Transaction: Failed to parse event JSON of event") - continue - } - found = true - break - } - if !found { - util.GetLogger(ctx).WithField("event_id", missingEventID).Warnf("Failed to get missing /event for event ID from %d server(s)", len(servers)) - return nil, fmt.Errorf("wasn't able to find event via %d server(s)", len(servers)) - } - if err := event.VerifyEventSignatures(ctx, t.keys); err != nil { - util.GetLogger(ctx).WithError(err).Warnf("Transaction: Couldn't validate signature of event %q", event.EventID()) - return nil, verifySigError{event.EventID(), err} - } - return t.cacheAndReturn(event.Headered(roomVersion)), nil -} diff --git a/federationapi/routing/send_test.go b/federationapi/routing/send_test.go index 702884613..f1f6169d9 100644 --- a/federationapi/routing/send_test.go +++ b/federationapi/routing/send_test.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "reflect" "testing" "time" @@ -244,8 +243,6 @@ func mustCreateTransaction(rsAPI api.RoomserverInternalAPI, fedClient txnFederat eduAPI: &testEDUProducer{}, keys: &test.NopJSONVerifier{}, federation: fedClient, - haveEvents: make(map[string]*gomatrixserverlib.HeaderedEvent), - hadEvents: make(map[string]bool), roomsMu: internal.NewMutexByRoom(), } t.PDUs = pdus @@ -279,6 +276,7 @@ NextPDU: } } +/* func fromStateTuples(tuples []gomatrixserverlib.StateKeyTuple, omitTuples []gomatrixserverlib.StateKeyTuple) (result []*gomatrixserverlib.HeaderedEvent) { NextTuple: for _, t := range tuples { @@ -294,6 +292,7 @@ NextTuple: } return } +*/ func assertInputRoomEvents(t *testing.T, got []api.InputRoomEvent, want []*gomatrixserverlib.HeaderedEvent) { for _, g := range got { @@ -355,6 +354,7 @@ func TestTransactionFailAuthChecks(t *testing.T) { // we request them from /get_missing_events. It works by setting PrevEventsExist=false in the roomserver query response, // resulting in a call to /get_missing_events which returns the missing prev event. Both events should be processed in // topological order and sent to the roomserver. +/* func TestTransactionFetchMissingPrevEvents(t *testing.T) { haveEvent := testEvents[len(testEvents)-3] prevEvent := testEvents[len(testEvents)-2] @@ -619,3 +619,4 @@ func TestTransactionFetchMissingStateByStateIDs(t *testing.T) { mustProcessTransaction(t, txn, nil) assertInputRoomEvents(t, rsAPI.inputRoomEvents, []*gomatrixserverlib.HeaderedEvent{eventB, eventC, eventD}) } +*/ diff --git a/federationapi/routing/threepid.go b/federationapi/routing/threepid.go index 5ba28881c..b16c68d25 100644 --- a/federationapi/routing/threepid.go +++ b/federationapi/routing/threepid.go @@ -89,7 +89,7 @@ func CreateInvitesFrom3PIDInvites( } // Send all the events - if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, evs, cfg.Matrix.ServerName, nil); err != nil { + if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, evs, "TODO", cfg.Matrix.ServerName, nil, false); err != nil { util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed") return jsonerror.InternalServerError() } @@ -178,8 +178,10 @@ func ExchangeThirdPartyInvite( []*gomatrixserverlib.HeaderedEvent{ signedEvent.Event.Headered(verRes.RoomVersion), }, + request.Origin(), cfg.Matrix.ServerName, nil, + false, ); err != nil { util.GetLogger(httpReq.Context()).WithError(err).Error("SendEvents failed") return jsonerror.InternalServerError() diff --git a/federationapi/storage/interface.go b/federationapi/storage/interface.go index a36f51528..21a919f6a 100644 --- a/federationapi/storage/interface.go +++ b/federationapi/storage/interface.go @@ -32,7 +32,7 @@ type Database interface { GetJoinedHosts(ctx context.Context, roomID string) ([]types.JoinedHost, error) GetAllJoinedHosts(ctx context.Context) ([]gomatrixserverlib.ServerName, error) // GetJoinedHostsForRooms returns the complete set of servers in the rooms given. - GetJoinedHostsForRooms(ctx context.Context, roomIDs []string) ([]gomatrixserverlib.ServerName, error) + GetJoinedHostsForRooms(ctx context.Context, roomIDs []string, excludeSelf bool) ([]gomatrixserverlib.ServerName, error) PurgeRoomState(ctx context.Context, roomID string) error StoreJSON(ctx context.Context, js string) (*shared.Receipt, error) diff --git a/federationapi/storage/postgres/storage.go b/federationapi/storage/postgres/storage.go index 1f6afe37c..2e2c08911 100644 --- a/federationapi/storage/postgres/storage.go +++ b/federationapi/storage/postgres/storage.go @@ -24,6 +24,7 @@ import ( "github.com/matrix-org/dendrite/internal/caching" "github.com/matrix-org/dendrite/internal/sqlutil" "github.com/matrix-org/dendrite/setup/config" + "github.com/matrix-org/gomatrixserverlib" ) // Database stores information needed by the federation sender @@ -35,7 +36,7 @@ type Database struct { } // NewDatabase opens a new database -func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationCache) (*Database, error) { +func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationCache, serverName gomatrixserverlib.ServerName) (*Database, error) { var d Database var err error if d.db, err = sqlutil.Open(dbProperties); err != nil { @@ -89,6 +90,7 @@ func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationC } d.Database = shared.Database{ DB: d.db, + ServerName: serverName, Cache: cache, Writer: d.writer, FederationJoinedHosts: joinedHosts, diff --git a/federationapi/storage/shared/storage.go b/federationapi/storage/shared/storage.go index ddd770e2e..160c7f6fa 100644 --- a/federationapi/storage/shared/storage.go +++ b/federationapi/storage/shared/storage.go @@ -29,6 +29,7 @@ import ( type Database struct { DB *sql.DB + ServerName gomatrixserverlib.ServerName Cache caching.FederationCache Writer sqlutil.Writer FederationQueuePDUs tables.FederationQueuePDUs @@ -102,8 +103,19 @@ func (d *Database) GetAllJoinedHosts(ctx context.Context) ([]gomatrixserverlib.S return d.FederationJoinedHosts.SelectAllJoinedHosts(ctx) } -func (d *Database) GetJoinedHostsForRooms(ctx context.Context, roomIDs []string) ([]gomatrixserverlib.ServerName, error) { - return d.FederationJoinedHosts.SelectJoinedHostsForRooms(ctx, roomIDs) +func (d *Database) GetJoinedHostsForRooms(ctx context.Context, roomIDs []string, excludeSelf bool) ([]gomatrixserverlib.ServerName, error) { + servers, err := d.FederationJoinedHosts.SelectJoinedHostsForRooms(ctx, roomIDs) + if err != nil { + return nil, err + } + if excludeSelf { + for i, server := range servers { + if server == d.ServerName { + servers = append(servers[:i], servers[i+1:]...) + } + } + } + return servers, nil } // StoreJSON adds a JSON blob into the queue JSON table and returns diff --git a/federationapi/storage/sqlite3/storage.go b/federationapi/storage/sqlite3/storage.go index 0fe6df5da..978dd7136 100644 --- a/federationapi/storage/sqlite3/storage.go +++ b/federationapi/storage/sqlite3/storage.go @@ -23,6 +23,7 @@ import ( "github.com/matrix-org/dendrite/internal/caching" "github.com/matrix-org/dendrite/internal/sqlutil" "github.com/matrix-org/dendrite/setup/config" + "github.com/matrix-org/gomatrixserverlib" ) // Database stores information needed by the federation sender @@ -34,7 +35,7 @@ type Database struct { } // NewDatabase opens a new database -func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationCache) (*Database, error) { +func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationCache, serverName gomatrixserverlib.ServerName) (*Database, error) { var d Database var err error if d.db, err = sqlutil.Open(dbProperties); err != nil { @@ -88,6 +89,7 @@ func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationC } d.Database = shared.Database{ DB: d.db, + ServerName: serverName, Cache: cache, Writer: d.writer, FederationJoinedHosts: joinedHosts, diff --git a/federationapi/storage/storage.go b/federationapi/storage/storage.go index 083f0b302..4b52ca206 100644 --- a/federationapi/storage/storage.go +++ b/federationapi/storage/storage.go @@ -24,15 +24,16 @@ import ( "github.com/matrix-org/dendrite/federationapi/storage/sqlite3" "github.com/matrix-org/dendrite/internal/caching" "github.com/matrix-org/dendrite/setup/config" + "github.com/matrix-org/gomatrixserverlib" ) // NewDatabase opens a new database -func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationCache) (Database, error) { +func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationCache, serverName gomatrixserverlib.ServerName) (Database, error) { switch { case dbProperties.ConnectionString.IsSQLite(): - return sqlite3.NewDatabase(dbProperties, cache) + return sqlite3.NewDatabase(dbProperties, cache, serverName) case dbProperties.ConnectionString.IsPostgres(): - return postgres.NewDatabase(dbProperties, cache) + return postgres.NewDatabase(dbProperties, cache, serverName) default: return nil, fmt.Errorf("unexpected database type") } diff --git a/federationapi/storage/storage_wasm.go b/federationapi/storage/storage_wasm.go index 455464e7c..09abed63e 100644 --- a/federationapi/storage/storage_wasm.go +++ b/federationapi/storage/storage_wasm.go @@ -20,13 +20,14 @@ import ( "github.com/matrix-org/dendrite/federationapi/storage/sqlite3" "github.com/matrix-org/dendrite/internal/caching" "github.com/matrix-org/dendrite/setup/config" + "github.com/matrix-org/gomatrixserverlib" ) // NewDatabase opens a new database -func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationCache) (Database, error) { +func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationCache, serverName gomatrixserverlib.ServerName) (Database, error) { switch { case dbProperties.ConnectionString.IsSQLite(): - return sqlite3.NewDatabase(dbProperties, cache) + return sqlite3.NewDatabase(dbProperties, cache, serverName) case dbProperties.ConnectionString.IsPostgres(): return nil, fmt.Errorf("can't use Postgres implementation") default: diff --git a/go.mod b/go.mod index b19a5adc6..d3e05d0ec 100644 --- a/go.mod +++ b/go.mod @@ -1,25 +1,30 @@ module github.com/matrix-org/dendrite +replace github.com/nats-io/nats-server/v2 => github.com/neilalexander/nats-server/v2 v2.3.3-0.20220104162330-c76d5fd70423 + +replace github.com/nats-io/nats.go => github.com/neilalexander/nats.go v1.11.1-0.20220104162523-f4ddebe1061c + require ( github.com/Arceliar/ironwood v0.0.0-20211125050254-8951369625d0 + github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979 github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect github.com/MFAshby/stdemuxerhook v1.0.0 github.com/Masterminds/semver/v3 v3.1.1 - github.com/Shopify/sarama v1.29.1 + github.com/S7evinK/saramajetstream v0.0.0-20210709110708-de6efc8c4a32 + github.com/Shopify/sarama v1.31.0 github.com/codeclysm/extract v2.2.0+incompatible - github.com/containerd/containerd v1.5.8 // indirect + github.com/containerd/containerd v1.5.9 // indirect github.com/docker/docker v20.10.12+incompatible github.com/docker/go-connections v0.4.0 - github.com/getsentry/sentry-go v0.11.0 - github.com/golang/snappy v0.0.4 // indirect + github.com/getsentry/sentry-go v0.12.0 github.com/gologme/log v1.3.0 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.4.2 github.com/h2non/filetype v1.1.3 // indirect github.com/hashicorp/golang-lru v0.5.4 github.com/juju/testing v0.0.0-20211215003918-77eb13d6cad2 // indirect - github.com/klauspost/compress v1.13.6 // indirect + github.com/klauspost/compress v1.14.2 // indirect github.com/lib/pq v1.10.4 github.com/libp2p/go-libp2p v0.13.0 github.com/libp2p/go-libp2p-circuit v0.4.0 @@ -35,34 +40,33 @@ require ( github.com/matrix-org/go-http-js-libp2p v0.0.0-20200518170932-783164aeeda4 github.com/matrix-org/go-sqlite3-js v0.0.0-20210709140738-b0d1ba599a6d github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 - github.com/matrix-org/gomatrixserverlib v0.0.0-20211115192839-15a64d244aa2 - github.com/matrix-org/naffka v0.0.0-20210623111924-14ff508b58e0 - github.com/matrix-org/pinecone v0.0.0-20211216094739-095c5ea64d02 + github.com/matrix-org/gomatrixserverlib v0.0.0-20220128100033-8d79e0c35e32 + github.com/matrix-org/pinecone v0.0.0-20220121094951-351265543ddf github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 - github.com/mattn/go-sqlite3 v1.14.9 + github.com/mattn/go-sqlite3 v1.14.10 github.com/morikuni/aec v1.0.0 // indirect + github.com/nats-io/nats-server/v2 v2.3.2 + github.com/nats-io/nats.go v1.13.1-0.20211122170419-d7c1d78a50fc github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9 github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 - github.com/ngrok/sqlmw v0.0.0-20211214174426-35873b76a395 + github.com/ngrok/sqlmw v0.0.0-20211220175533-9d16fdc47b31 github.com/opentracing/opentracing-go v1.2.0 github.com/patrickmn/go-cache v2.1.0+incompatible - github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pkg/errors v0.9.1 github.com/pressly/goose v2.7.0+incompatible - github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/client_golang v1.12.0 github.com/sirupsen/logrus v1.8.1 - github.com/tidwall/gjson v1.9.3 - github.com/tidwall/sjson v1.2.2 + github.com/tidwall/gjson v1.13.0 + github.com/tidwall/sjson v1.2.4 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/uber/jaeger-lib v2.4.1+incompatible github.com/yggdrasil-network/yggdrasil-go v0.4.2 go.uber.org/atomic v1.9.0 - golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 + golang.org/x/crypto v0.0.0-20220126234351-aa10faf2a1f8 golang.org/x/image v0.0.0-20211028202545-6944b10bf410 - golang.org/x/mobile v0.0.0-20211207041440-4e6c2922fdee - golang.org/x/net v0.0.0-20211216030914-fe4d6282115f + golang.org/x/mobile v0.0.0-20220112015953-858099ff7816 + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 - golang.org/x/text v0.3.7 // indirect gopkg.in/h2non/bimg.v1 v1.1.5 gopkg.in/yaml.v2 v2.4.0 nhooyr.io/websocket v1.8.7 diff --git a/go.sum b/go.sum index 24198ed60..f04754295 100644 --- a/go.sum +++ b/go.sum @@ -12,17 +12,27 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= @@ -85,19 +95,22 @@ github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5 github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w= github.com/RyanCarrier/dijkstra v1.0.0/go.mod h1:5agGUBNEtUAGIANmbw09fuO3a2htPEkc1jNH01qxCWA= github.com/RyanCarrier/dijkstra-1 v0.0.0-20170512020943-0e5801a26345/go.mod h1:OK4EvWJ441LQqGzed5NGB6vKBAE34n3z7iayPcEwr30= +github.com/S7evinK/saramajetstream v0.0.0-20210709110708-de6efc8c4a32 h1:i3fOph9Hjleo6LbuqN9ODFxnwt7mOtYMpCGeC8qJN50= +github.com/S7evinK/saramajetstream v0.0.0-20210709110708-de6efc8c4a32/go.mod h1:ne+jkLlzafIzaE4Q0Ze81T27dNgXe1wxovVEoAtSHTc= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= -github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= -github.com/Shopify/sarama v1.29.1 h1:wBAacXbYVLmWieEA/0X/JagDdCZ8NVFOfS6l6+2u5S0= -github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE= +github.com/Shopify/sarama v1.29.0/go.mod h1:2QpgD79wpdAESqNQMxNc0KYMkycd4slxGdV3TWSVqrU= +github.com/Shopify/sarama v1.31.0 h1:gObk7jCPutDxf+E6GA5G21noAZsi1SvP9ftCQYqpzus= +github.com/Shopify/sarama v1.31.0/go.mod h1:BeW3gXRc/CxgAsrSly2RE9nIXUfC9ezb7QHBPVhvzjI= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/Shopify/toxiproxy/v2 v2.3.0 h1:62YkpiP4bzdhKMH+6uC5E95y608k3zDwdzuBMsnn3uQ= +github.com/Shopify/toxiproxy/v2 v2.3.0/go.mod h1:KvQTtB6RjCJY4zqNJn7C7JDFgsG5uoHYDirfUfpIm0c= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= @@ -121,7 +134,6 @@ github.com/anacrolix/missinggo v1.2.1 h1:0IE3TqX5y5D0IxeMwTyIgqdDew4QrzcXaaEnJQy github.com/anacrolix/missinggo v1.2.1/go.mod h1:J5cMhif8jPmFoC3+Uvob3OXXNIhOUikzMt+uUjeM21Y= github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5urunoLS0Cbvb4V0uMQ= github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= -github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -169,8 +181,9 @@ github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInq github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= @@ -222,8 +235,8 @@ github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7 github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.8 h1:NmkCC1/QxyZFBny8JogwLpOy2f+VEbO/f6bV2Mqtwuw= -github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= +github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4= +github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -371,16 +384,17 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/frankban/quicktest v1.0.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBavT6B6CuGq2k= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= -github.com/getsentry/sentry-go v0.11.0 h1:qro8uttJGvNAMr5CLcFI9CHR0aDzXl0Vs3Pmw/oTPg8= -github.com/getsentry/sentry-go v0.11.0/go.mod h1:KBQIxiZAetw62Cj8Ri964vAEWVdgfaUCn30Q3bCvANo= +github.com/getsentry/sentry-go v0.12.0 h1:era7g0re5iY13bHSdN/xMkyV+5zZppjRVQhZrXCaEIk= +github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= @@ -446,6 +460,7 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -460,6 +475,7 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= @@ -482,7 +498,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -496,14 +511,15 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -512,11 +528,14 @@ github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8v github.com/google/gopacket v1.1.18 h1:lum7VRA9kdlvBi7/v2p7/zcbkduHaCH/SVVyurs7OpY= github.com/google/gopacket v1.1.18/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -675,8 +694,9 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -727,17 +747,17 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= +github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ= @@ -745,15 +765,16 @@ github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfo github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= +github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= @@ -777,7 +798,6 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= @@ -933,7 +953,6 @@ github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdm github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw= github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= @@ -978,7 +997,6 @@ github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJ github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1006,12 +1024,10 @@ github.com/matrix-org/go-sqlite3-js v0.0.0-20210709140738-b0d1ba599a6d/go.mod h1 github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26/go.mod h1:3fxX6gUjWyI/2Bt7J1OLhpCzOfO/bB3AiX0cJtEKud0= github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 h1:ZtO5uywdd5dLDCud4r0r55eP4j9FuUNpl60Gmntcop4= github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s= -github.com/matrix-org/gomatrixserverlib v0.0.0-20211115192839-15a64d244aa2 h1:RFsBN3509Ql6NJ7TDVkcKoN3bb/tmqUqzur5c0AwIHQ= -github.com/matrix-org/gomatrixserverlib v0.0.0-20211115192839-15a64d244aa2/go.mod h1:rB8tBUUUo1rzUqpzklRDSooxZ6YMhoaEPx4SO5fGeUc= -github.com/matrix-org/naffka v0.0.0-20210623111924-14ff508b58e0 h1:HZCzy4oVzz55e+cOMiX/JtSF2UOY1evBl2raaE7ACcU= -github.com/matrix-org/naffka v0.0.0-20210623111924-14ff508b58e0/go.mod h1:sjyPyRxKM5uw1nD2cJ6O2OxI6GOqyVBfNXqKjBZTBZE= -github.com/matrix-org/pinecone v0.0.0-20211216094739-095c5ea64d02 h1:tLn95Nqq3KPOZAjogGZTKMEkn4mMIzKu09biRTz/Ack= -github.com/matrix-org/pinecone v0.0.0-20211216094739-095c5ea64d02/go.mod h1:r6dsL+ylE0yXe/7zh8y/Bdh6aBYI1r+u4yZni9A4iyk= +github.com/matrix-org/gomatrixserverlib v0.0.0-20220128100033-8d79e0c35e32 h1:DiWPsGAYMlBQq/urm7TJkIeSf9FnfzegcaQUpgwIbUs= +github.com/matrix-org/gomatrixserverlib v0.0.0-20220128100033-8d79e0c35e32/go.mod h1:qFvhfbQ5orQxlH9vCiFnP4dW27xxnWHdNUBKyj/fbiY= +github.com/matrix-org/pinecone v0.0.0-20220121094951-351265543ddf h1:/nqfHUdQHr3WVdbZieaYFvHF1rin5pvDTa/NOZ/qCyE= +github.com/matrix-org/pinecone v0.0.0-20220121094951-351265543ddf/go.mod h1:r6dsL+ylE0yXe/7zh8y/Bdh6aBYI1r+u4yZni9A4iyk= github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U= github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 h1:eCEHXWDv9Rm335MSuB49mFUK44bwZPFSDde3ORE3syk= github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U= @@ -1020,6 +1036,7 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.0-20160806122752-66b8e73f3f5c/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -1027,15 +1044,15 @@ github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-sqlite3 v1.14.2/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= -github.com/mattn/go-sqlite3 v1.14.9 h1:10HX2Td0ocZpYEjhilsuo6WWtUqttj2Kb0KtD86/KYA= -github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.10 h1:MLn+5bFRlWMGoSRmJour3CL1w/qL96mvipqpwQW/Sfk= +github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/mattomatic/dijkstra v0.0.0-20130617153013-6f6d134eb237/go.mod h1:UOnLAUmVG5paym8pD3C4B9BQylUDC2vXFJJpT7JrlEA= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -1053,6 +1070,8 @@ github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7 github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= @@ -1074,8 +1093,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= @@ -1143,22 +1163,28 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/jwt/v2 v2.2.0 h1:Yg/4WFK6vsqMudRg91eBb7Dh6XeVcDMPHycDE8CfltE= +github.com/nats-io/jwt/v2 v2.2.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= +github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/neilalexander/nats-server/v2 v2.3.3-0.20220104162330-c76d5fd70423 h1:BLQVdjMH5XD4BYb0fa+c2Oh2Nr1vrO7GKvRnIJDxChc= +github.com/neilalexander/nats-server/v2 v2.3.3-0.20220104162330-c76d5fd70423/go.mod h1:9sdEkBhyZMQG1M9TevnlYUwMusRACn2vlgOeqoHKwVo= +github.com/neilalexander/nats.go v1.11.1-0.20220104162523-f4ddebe1061c h1:G2qsv7D0rY94HAu8pXmElMluuMHQ85waxIDQBhIzV2Q= +github.com/neilalexander/nats.go v1.11.1-0.20220104162523-f4ddebe1061c/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/neilalexander/utp v0.1.1-0.20210622132614-ee9a34a30488/go.mod h1:NPHGhPc0/wudcaCqL/H5AOddkRf8GPRhzOujuUKGQu8= github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9 h1:lrVQzBtkeQEGGYUHwSX1XPe1E5GL6U3KYCNe2G4bncQ= github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9/go.mod h1:NPHGhPc0/wudcaCqL/H5AOddkRf8GPRhzOujuUKGQu8= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= -github.com/ngrok/sqlmw v0.0.0-20211214174426-35873b76a395 h1:K2PaGJmUSJRywUw0T5tborClDpQDUwcpzayaWABjfQM= -github.com/ngrok/sqlmw v0.0.0-20211214174426-35873b76a395/go.mod h1:E26fwEtRNigBfFfHDWsklmo0T7Ixbg0XXgck+Hq4O9k= +github.com/ngrok/sqlmw v0.0.0-20211220175533-9d16fdc47b31 h1:FFHgfAIoAXCCL4xBoAugZVpekfGmZ/fBBueneUKBv7I= +github.com/ngrok/sqlmw v0.0.0-20211220175533-9d16fdc47b31/go.mod h1:E26fwEtRNigBfFfHDWsklmo0T7Ixbg0XXgck+Hq4O9k= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -1198,8 +1224,9 @@ github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go. github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1227,7 +1254,6 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -1250,8 +1276,9 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg= +github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1265,8 +1292,9 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1278,16 +1306,18 @@ github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -1380,17 +1410,16 @@ github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tidwall/gjson v1.9.1/go.mod h1:jydLKE7s8J0+1/5jC4eXcuFlzKizGrCKvLmBVX/5oXc= -github.com/tidwall/gjson v1.9.3 h1:hqzS9wAHMO+KVBBkLxYdkEeeFHuqr95GfClRLKlgK0E= -github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.13.0 h1:3TFY9yxOQShrvmjdM76K+jc66zJeT6D3/VFFYCGQf7M= +github.com/tidwall/gjson v1.13.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.0.3/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y= -github.com/tidwall/sjson v1.2.2 h1:H1Llj/C9G+BoUN2DsybLHjWvr9dx4Uazavf0sXQ+rOs= -github.com/tidwall/sjson v1.2.2/go.mod h1:jmW2RZpbKuExPFUHeFSBMiovT9ZyOziEHDRkbsdp0B0= +github.com/tidwall/sjson v1.2.4 h1:cuiLzLnaMeBhRmEv00Lpk3tkYrcxpmbU81tAY4Dw0tc= +github.com/tidwall/sjson v1.2.4/go.mod h1:098SZ494YoMWPmMO6ct4dcFnqxwj9r/gF0Etp19pSNM= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1408,10 +1437,12 @@ github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= @@ -1437,9 +1468,10 @@ github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPyS github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= @@ -1453,7 +1485,9 @@ github.com/yggdrasil-network/yggdrasil-go v0.4.2/go.mod h1:/iMJjOrXRsjlFgqhWOPhe github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -1515,7 +1549,6 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1524,14 +1557,15 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220126234351-aa10faf2a1f8 h1:kACShD3qhmr/3rLmg1yXyt+N4HcwutKyPRB93s54TIU= +golang.org/x/crypto v0.0.0-20220126234351-aa10faf2a1f8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1566,8 +1600,8 @@ golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7 golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20211207041440-4e6c2922fdee h1:/tShaw8UTf0XzI8DOZwQHzC7d6Vi3EtrBnftiZ4vAvU= -golang.org/x/mobile v0.0.0-20211207041440-4e6c2922fdee/go.mod h1:pe2sM7Uk+2Su1y7u/6Z8KJ24D7lepUjFZbhFOrmDfuQ= +golang.org/x/mobile v0.0.0-20220112015953-858099ff7816 h1:jhDgkcu3yQ4tasBZ+1YwDmK7eFmuVf1w1k+NGGGxfmE= +golang.org/x/mobile v0.0.0-20220112015953-858099ff7816/go.mod h1:pe2sM7Uk+2Su1y7u/6Z8KJ24D7lepUjFZbhFOrmDfuQ= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -1576,7 +1610,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1600,6 +1633,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1612,9 +1646,14 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -1622,15 +1661,18 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210927181540-4e4d966f7476/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1638,6 +1680,7 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1658,6 +1701,7 @@ golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1708,12 +1752,18 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1737,9 +1787,13 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6 h1:foEbQz/B0Oz6YIqu/69kfXPYeFQAuuMYFkjaqXzl5Wo= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= @@ -1750,6 +1804,7 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7-0.20210503195748-5c7c50ebbd4f/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= @@ -1758,8 +1813,10 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1806,8 +1863,18 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -1841,7 +1908,13 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1849,6 +1922,7 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -1873,8 +1947,18 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -1892,8 +1976,11 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= @@ -1918,7 +2005,6 @@ gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -1937,11 +2023,6 @@ gopkg.in/h2non/gock.v1 v1.0.14/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdOD gopkg.in/httprequest.v1 v1.1.1/go.mod h1:/CkavNL+g3qLOrpFHVrEx4NKepeqR4XTZWNj4sGGjz0= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= -gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/macaroon.v2 v2.1.0 h1:HZcsjBCzq9t0eBPMKqTN/uSN6JOm78ZJ2INbqcBQOUI= gopkg.in/macaroon.v2 v2.1.0/go.mod h1:OUb+TQP/OP0WOerC2Jp/3CwhIKyIa9kQjuc7H24e6/o= gopkg.in/mgo.v2 v2.0.0-20160818015218-f2b6f6c918c4/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= @@ -1961,6 +2042,7 @@ gopkg.in/yaml.v2 v2.0.0-20170712054546-1be3d31502d6/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1984,8 +2066,9 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= diff --git a/internal/test/config.go b/internal/test/config.go index 826d2ce00..bb2f8a4c6 100644 --- a/internal/test/config.go +++ b/internal/test/config.go @@ -81,7 +81,7 @@ func MakeConfig(configDir, kafkaURI, database, host string, startPort int) (*con cfg.MediaAPI.BasePath = config.Path(mediaBasePath) - cfg.Global.Kafka.Addresses = []string{kafkaURI} + cfg.Global.JetStream.Addresses = []string{kafkaURI} // TODO: Use different databases for the different schemas. // Using the same database for every schema currently works because diff --git a/keyserver/api/api.go b/keyserver/api/api.go index 5a109cc65..0eea2f0fa 100644 --- a/keyserver/api/api.go +++ b/keyserver/api/api.go @@ -69,7 +69,8 @@ type DeviceMessage struct { *DeviceKeys `json:"DeviceKeys,omitempty"` *eduapi.OutputCrossSigningKeyUpdate `json:"CrossSigningKeyUpdate,omitempty"` // A monotonically increasing number which represents device changes for this user. - StreamID int + StreamID int + DeviceChangeID int64 } // DeviceKeys represents a set of device keys for a single device @@ -224,8 +225,6 @@ type QueryKeysResponse struct { } type QueryKeyChangesRequest struct { - // The partition which had key events sent to - Partition int32 // The offset of the last received key event, or sarama.OffsetOldest if this is from the beginning Offset int64 // The inclusive offset where to track key changes up to. Messages with this offset are included in the response. @@ -236,8 +235,6 @@ type QueryKeyChangesRequest struct { type QueryKeyChangesResponse struct { // The set of users who have had their keys change. UserIDs []string - // The partition being served - useful if the partition is unknown at request time - Partition int32 // The latest offset represented in this response. Offset int64 // Set if there was a problem handling the request. diff --git a/keyserver/consumers/cross_signing.go b/keyserver/consumers/cross_signing.go index 2ba627976..4b2bd4a9a 100644 --- a/keyserver/consumers/cross_signing.go +++ b/keyserver/consumers/cross_signing.go @@ -22,6 +22,7 @@ import ( "github.com/matrix-org/dendrite/keyserver/api" "github.com/matrix-org/dendrite/keyserver/storage" "github.com/matrix-org/dendrite/setup/config" + "github.com/matrix-org/dendrite/setup/jetstream" "github.com/matrix-org/dendrite/setup/process" "github.com/matrix-org/gomatrixserverlib" "github.com/sirupsen/logrus" @@ -50,7 +51,7 @@ func NewOutputCrossSigningKeyUpdateConsumer( consumer := internal.ContinualConsumer{ Process: process, ComponentName: "keyserver/keyserver", - Topic: cfg.Global.Kafka.TopicFor(config.TopicOutputKeyChangeEvent), + Topic: cfg.Global.JetStream.TopicFor(jetstream.OutputKeyChangeEvent), Consumer: kafkaConsumer, PartitionStore: keyDB, } diff --git a/keyserver/internal/internal.go b/keyserver/internal/internal.go index 3e91962ed..259249217 100644 --- a/keyserver/internal/internal.go +++ b/keyserver/internal/internal.go @@ -59,17 +59,13 @@ func (a *KeyInternalAPI) InputDeviceListUpdate( } func (a *KeyInternalAPI) QueryKeyChanges(ctx context.Context, req *api.QueryKeyChangesRequest, res *api.QueryKeyChangesResponse) { - if req.Partition < 0 { - req.Partition = a.Producer.DefaultPartition() - } - userIDs, latest, err := a.DB.KeyChanges(ctx, req.Partition, req.Offset, req.ToOffset) + userIDs, latest, err := a.DB.KeyChanges(ctx, req.Offset, req.ToOffset) if err != nil { res.Error = &api.KeyError{ Err: err.Error(), } } res.Offset = latest - res.Partition = req.Partition res.UserIDs = userIDs } diff --git a/keyserver/keyserver.go b/keyserver/keyserver.go index 477efafd6..8cc50ea0d 100644 --- a/keyserver/keyserver.go +++ b/keyserver/keyserver.go @@ -25,7 +25,7 @@ import ( "github.com/matrix-org/dendrite/keyserver/storage" "github.com/matrix-org/dendrite/setup/base" "github.com/matrix-org/dendrite/setup/config" - "github.com/matrix-org/dendrite/setup/kafka" + "github.com/matrix-org/dendrite/setup/jetstream" "github.com/sirupsen/logrus" ) @@ -40,16 +40,16 @@ func AddInternalRoutes(router *mux.Router, intAPI api.KeyInternalAPI) { func NewInternalAPI( base *base.BaseDendrite, cfg *config.KeyServer, fedClient fedsenderapi.FederationClient, ) api.KeyInternalAPI { - consumer, producer := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka) + js, consumer, _ := jetstream.Prepare(&cfg.Matrix.JetStream) db, err := storage.NewDatabase(&cfg.Database) if err != nil { logrus.WithError(err).Panicf("failed to connect to key server database") } keyChangeProducer := &producers.KeyChange{ - Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)), - Producer: producer, - DB: db, + Topic: string(cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent)), + JetStream: js, + DB: db, } ap := &internal.KeyInternalAPI{ DB: db, diff --git a/keyserver/producers/keychange.go b/keyserver/producers/keychange.go index 782675c2a..fd143c6cf 100644 --- a/keyserver/producers/keychange.go +++ b/keyserver/producers/keychange.go @@ -18,52 +18,47 @@ import ( "context" "encoding/json" - "github.com/Shopify/sarama" eduapi "github.com/matrix-org/dendrite/eduserver/api" "github.com/matrix-org/dendrite/keyserver/api" "github.com/matrix-org/dendrite/keyserver/storage" + "github.com/matrix-org/dendrite/setup/jetstream" + "github.com/nats-io/nats.go" "github.com/sirupsen/logrus" ) // KeyChange produces key change events for the sync API and federation sender to consume type KeyChange struct { - Topic string - Producer sarama.SyncProducer - DB storage.Database -} - -// DefaultPartition returns the default partition this process is sending key changes to. -// NB: A keyserver MUST send key changes to only 1 partition or else query operations will -// become inconsistent. Partitions can be sharded (e.g by hash of user ID of key change) but -// then all keyservers must be queried to calculate the entire set of key changes between -// two sync tokens. -func (p *KeyChange) DefaultPartition() int32 { - return 0 + Topic string + JetStream nats.JetStreamContext + DB storage.Database } // ProduceKeyChanges creates new change events for each key func (p *KeyChange) ProduceKeyChanges(keys []api.DeviceMessage) error { userToDeviceCount := make(map[string]int) for _, key := range keys { - var m sarama.ProducerMessage - + id, err := p.DB.StoreKeyChange(context.Background(), key.UserID) + if err != nil { + return err + } + key.DeviceChangeID = id value, err := json.Marshal(key) if err != nil { return err } - m.Topic = string(p.Topic) - m.Key = sarama.StringEncoder(key.UserID) - m.Value = sarama.ByteEncoder(value) + m := &nats.Msg{ + Subject: p.Topic, + Header: nats.Header{}, + } + m.Header.Set(jetstream.UserID, key.UserID) + m.Data = value - partition, offset, err := p.Producer.SendMessage(&m) - if err != nil { - return err - } - err = p.DB.StoreKeyChange(context.Background(), partition, offset, key.UserID) + _, err = p.JetStream.PublishMsg(m) if err != nil { return err } + userToDeviceCount[key.UserID]++ } for userID, count := range userToDeviceCount { @@ -76,7 +71,6 @@ func (p *KeyChange) ProduceKeyChanges(keys []api.DeviceMessage) error { } func (p *KeyChange) ProduceSigningKeyUpdate(key eduapi.CrossSigningKeyUpdate) error { - var m sarama.ProducerMessage output := &api.DeviceMessage{ Type: api.TypeCrossSigningUpdate, OutputCrossSigningKeyUpdate: &eduapi.OutputCrossSigningKeyUpdate{ @@ -84,20 +78,25 @@ func (p *KeyChange) ProduceSigningKeyUpdate(key eduapi.CrossSigningKeyUpdate) er }, } + id, err := p.DB.StoreKeyChange(context.Background(), key.UserID) + if err != nil { + return err + } + output.DeviceChangeID = id + value, err := json.Marshal(output) if err != nil { return err } - m.Topic = string(p.Topic) - m.Key = sarama.StringEncoder(key.UserID) - m.Value = sarama.ByteEncoder(value) - - partition, offset, err := p.Producer.SendMessage(&m) - if err != nil { - return err + m := &nats.Msg{ + Subject: p.Topic, + Header: nats.Header{}, } - err = p.DB.StoreKeyChange(context.Background(), partition, offset, key.UserID) + m.Header.Set(jetstream.UserID, key.UserID) + m.Data = value + + _, err = p.JetStream.PublishMsg(m) if err != nil { return err } diff --git a/keyserver/storage/interface.go b/keyserver/storage/interface.go index 99842bc58..87feae47d 100644 --- a/keyserver/storage/interface.go +++ b/keyserver/storage/interface.go @@ -66,14 +66,14 @@ type Database interface { // cannot be claimed or if none exist for this (user, device, algorithm), instead it is omitted from the returned slice. ClaimKeys(ctx context.Context, userToDeviceToAlgorithm map[string]map[string]string) ([]api.OneTimeKeys, error) - // StoreKeyChange stores key change metadata after the change has been sent to Kafka. `userID` is the the user who has changed - // their keys in some way. - StoreKeyChange(ctx context.Context, partition int32, offset int64, userID string) error + // StoreKeyChange stores key change metadata and returns the device change ID which represents the position in the /sync stream for this device change. + // `userID` is the the user who has changed their keys in some way. + StoreKeyChange(ctx context.Context, userID string) (int64, error) // KeyChanges returns a list of user IDs who have modified their keys from the offset given (exclusive) to the offset given (inclusive). // A to offset of sarama.OffsetNewest means no upper limit. // Returns the offset of the latest key change. - KeyChanges(ctx context.Context, partition int32, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error) + KeyChanges(ctx context.Context, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error) // StaleDeviceLists returns a list of user IDs ending with the domains provided who have stale device lists. // If no domains are given, all user IDs with stale device lists are returned. diff --git a/keyserver/storage/postgres/deltas/2022012016470000_key_changes.go b/keyserver/storage/postgres/deltas/2022012016470000_key_changes.go new file mode 100644 index 000000000..e5bcf08d1 --- /dev/null +++ b/keyserver/storage/postgres/deltas/2022012016470000_key_changes.go @@ -0,0 +1,79 @@ +// Copyright 2022 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package deltas + +import ( + "database/sql" + "fmt" + + "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/pressly/goose" +) + +func LoadFromGoose() { + goose.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges) +} + +func LoadRefactorKeyChanges(m *sqlutil.Migrations) { + m.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges) +} + +func UpRefactorKeyChanges(tx *sql.Tx) error { + // start counting from the last max offset, else 0. We need to do a count(*) first to see if there + // even are entries in this table to know if we can query for log_offset. Without the count then + // the query to SELECT the max log offset fails on new Dendrite instances as log_offset doesn't + // exist on that table. Even though we discard the error, the txn is tainted and gets aborted :/ + var count int + _ = tx.QueryRow(`SELECT count(*) FROM keyserver_key_changes`).Scan(&count) + if count > 0 { + var maxOffset int64 + _ = tx.QueryRow(`SELECT coalesce(MAX(log_offset), 0) AS offset FROM keyserver_key_changes`).Scan(&maxOffset) + if _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE IF NOT EXISTS keyserver_key_changes_seq START %d`, maxOffset)); err != nil { + return fmt.Errorf("failed to CREATE SEQUENCE for key changes, starting at %d: %s", maxOffset, err) + } + } + + _, err := tx.Exec(` + -- make the new table + DROP TABLE IF EXISTS keyserver_key_changes; + CREATE TABLE IF NOT EXISTS keyserver_key_changes ( + change_id BIGINT PRIMARY KEY DEFAULT nextval('keyserver_key_changes_seq'), + user_id TEXT NOT NULL, + CONSTRAINT keyserver_key_changes_unique_per_user UNIQUE (user_id) + ); + `) + if err != nil { + return fmt.Errorf("failed to execute upgrade: %w", err) + } + return nil +} + +func DownRefactorKeyChanges(tx *sql.Tx) error { + _, err := tx.Exec(` + -- Drop all data and revert back, we can't keep the data as Kafka offsets determine the numbers + DROP SEQUENCE IF EXISTS keyserver_key_changes_seq; + DROP TABLE IF EXISTS keyserver_key_changes; + CREATE TABLE IF NOT EXISTS keyserver_key_changes ( + partition BIGINT NOT NULL, + log_offset BIGINT NOT NULL, + user_id TEXT NOT NULL, + CONSTRAINT keyserver_key_changes_unique UNIQUE (partition, log_offset) + ); + `) + if err != nil { + return fmt.Errorf("failed to execute downgrade: %w", err) + } + return nil +} diff --git a/keyserver/storage/postgres/key_changes_table.go b/keyserver/storage/postgres/key_changes_table.go index df4b47e79..20d227c24 100644 --- a/keyserver/storage/postgres/key_changes_table.go +++ b/keyserver/storage/postgres/key_changes_table.go @@ -26,27 +26,25 @@ import ( var keyChangesSchema = ` -- Stores key change information about users. Used to determine when to send updated device lists to clients. +CREATE SEQUENCE IF NOT EXISTS keyserver_key_changes_seq; CREATE TABLE IF NOT EXISTS keyserver_key_changes ( - partition BIGINT NOT NULL, - log_offset BIGINT NOT NULL, + change_id BIGINT PRIMARY KEY DEFAULT nextval('keyserver_key_changes_seq'), user_id TEXT NOT NULL, - CONSTRAINT keyserver_key_changes_unique UNIQUE (partition, log_offset) + CONSTRAINT keyserver_key_changes_unique_per_user UNIQUE (user_id) ); ` -// Replace based on partition|offset - we should never insert duplicates unless the kafka logs are wiped. -// Rather than falling over, just overwrite (though this will mean clients with an existing sync token will -// miss out on updates). TODO: Ideally we would detect when kafka logs are purged then purge this table too. +// Replace based on user ID. We don't care how many times the user's keys have changed, only that they +// have changed, hence we can just keep bumping the change ID for this user. const upsertKeyChangeSQL = "" + - "INSERT INTO keyserver_key_changes (partition, log_offset, user_id)" + - " VALUES ($1, $2, $3)" + - " ON CONFLICT ON CONSTRAINT keyserver_key_changes_unique" + - " DO UPDATE SET user_id = $3" + "INSERT INTO keyserver_key_changes (user_id)" + + " VALUES ($1)" + + " ON CONFLICT ON CONSTRAINT keyserver_key_changes_unique_per_user" + + " DO UPDATE SET change_id = nextval('keyserver_key_changes_seq')" + + " RETURNING change_id" -// select the highest offset for each user in the range. The grouping by user gives distinct entries and then we just -// take the max offset value as the latest offset. const selectKeyChangesSQL = "" + - "SELECT user_id, MAX(log_offset) FROM keyserver_key_changes WHERE partition = $1 AND log_offset > $2 AND log_offset <= $3 GROUP BY user_id" + "SELECT user_id, change_id FROM keyserver_key_changes WHERE change_id > $1 AND change_id <= $2" type keyChangesStatements struct { db *sql.DB @@ -59,31 +57,32 @@ func NewPostgresKeyChangesTable(db *sql.DB) (tables.KeyChanges, error) { db: db, } _, err := db.Exec(keyChangesSchema) - if err != nil { - return nil, err - } - if s.upsertKeyChangeStmt, err = db.Prepare(upsertKeyChangeSQL); err != nil { - return nil, err - } - if s.selectKeyChangesStmt, err = db.Prepare(selectKeyChangesSQL); err != nil { - return nil, err - } - return s, nil + return s, err } -func (s *keyChangesStatements) InsertKeyChange(ctx context.Context, partition int32, offset int64, userID string) error { - _, err := s.upsertKeyChangeStmt.ExecContext(ctx, partition, offset, userID) - return err +func (s *keyChangesStatements) Prepare() (err error) { + if s.upsertKeyChangeStmt, err = s.db.Prepare(upsertKeyChangeSQL); err != nil { + return err + } + if s.selectKeyChangesStmt, err = s.db.Prepare(selectKeyChangesSQL); err != nil { + return err + } + return nil +} + +func (s *keyChangesStatements) InsertKeyChange(ctx context.Context, userID string) (changeID int64, err error) { + err = s.upsertKeyChangeStmt.QueryRowContext(ctx, userID).Scan(&changeID) + return } func (s *keyChangesStatements) SelectKeyChanges( - ctx context.Context, partition int32, fromOffset, toOffset int64, + ctx context.Context, fromOffset, toOffset int64, ) (userIDs []string, latestOffset int64, err error) { if toOffset == sarama.OffsetNewest { toOffset = math.MaxInt64 } latestOffset = fromOffset - rows, err := s.selectKeyChangesStmt.QueryContext(ctx, partition, fromOffset, toOffset) + rows, err := s.selectKeyChangesStmt.QueryContext(ctx, fromOffset, toOffset) if err != nil { return nil, 0, err } diff --git a/keyserver/storage/postgres/storage.go b/keyserver/storage/postgres/storage.go index 52f3a7f6b..b71cc1a7a 100644 --- a/keyserver/storage/postgres/storage.go +++ b/keyserver/storage/postgres/storage.go @@ -16,6 +16,7 @@ package postgres import ( "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/matrix-org/dendrite/keyserver/storage/postgres/deltas" "github.com/matrix-org/dendrite/keyserver/storage/shared" "github.com/matrix-org/dendrite/setup/config" ) @@ -51,6 +52,14 @@ func NewDatabase(dbProperties *config.DatabaseOptions) (*shared.Database, error) if err != nil { return nil, err } + m := sqlutil.NewMigrations() + deltas.LoadRefactorKeyChanges(m) + if err = m.RunDeltas(db, dbProperties); err != nil { + return nil, err + } + if err = kc.Prepare(); err != nil { + return nil, err + } d := &shared.Database{ DB: db, Writer: sqlutil.NewDummyWriter(), diff --git a/keyserver/storage/shared/storage.go b/keyserver/storage/shared/storage.go index 5bd8be368..5914d28e1 100644 --- a/keyserver/storage/shared/storage.go +++ b/keyserver/storage/shared/storage.go @@ -135,14 +135,16 @@ func (d *Database) ClaimKeys(ctx context.Context, userToDeviceToAlgorithm map[st return result, err } -func (d *Database) StoreKeyChange(ctx context.Context, partition int32, offset int64, userID string) error { - return d.Writer.Do(nil, nil, func(_ *sql.Tx) error { - return d.KeyChangesTable.InsertKeyChange(ctx, partition, offset, userID) +func (d *Database) StoreKeyChange(ctx context.Context, userID string) (id int64, err error) { + err = d.Writer.Do(nil, nil, func(_ *sql.Tx) error { + id, err = d.KeyChangesTable.InsertKeyChange(ctx, userID) + return err }) + return } -func (d *Database) KeyChanges(ctx context.Context, partition int32, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error) { - return d.KeyChangesTable.SelectKeyChanges(ctx, partition, fromOffset, toOffset) +func (d *Database) KeyChanges(ctx context.Context, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error) { + return d.KeyChangesTable.SelectKeyChanges(ctx, fromOffset, toOffset) } // StaleDeviceLists returns a list of user IDs ending with the domains provided who have stale device lists. diff --git a/keyserver/storage/sqlite3/deltas/2022012016470000_key_changes.go b/keyserver/storage/sqlite3/deltas/2022012016470000_key_changes.go new file mode 100644 index 000000000..fbc548c38 --- /dev/null +++ b/keyserver/storage/sqlite3/deltas/2022012016470000_key_changes.go @@ -0,0 +1,76 @@ +// Copyright 2022 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package deltas + +import ( + "database/sql" + "fmt" + + "github.com/matrix-org/dendrite/internal/sqlutil" + "github.com/pressly/goose" +) + +func LoadFromGoose() { + goose.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges) +} + +func LoadRefactorKeyChanges(m *sqlutil.Migrations) { + m.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges) +} + +func UpRefactorKeyChanges(tx *sql.Tx) error { + // start counting from the last max offset, else 0. + var maxOffset int64 + var userID string + _ = tx.QueryRow(`SELECT user_id, MAX(log_offset) FROM keyserver_key_changes GROUP BY user_id`).Scan(&userID, &maxOffset) + + _, err := tx.Exec(` + -- make the new table + DROP TABLE IF EXISTS keyserver_key_changes; + CREATE TABLE IF NOT EXISTS keyserver_key_changes ( + change_id INTEGER PRIMARY KEY AUTOINCREMENT, + -- The key owner + user_id TEXT NOT NULL, + UNIQUE (user_id) + ); + `) + if err != nil { + return fmt.Errorf("failed to execute upgrade: %w", err) + } + // to start counting from maxOffset, insert a row with that value + if userID != "" { + _, err = tx.Exec(`INSERT INTO keyserver_key_changes(change_id, user_id) VALUES($1, $2)`, maxOffset, userID) + return err + } + return nil +} + +func DownRefactorKeyChanges(tx *sql.Tx) error { + _, err := tx.Exec(` + -- Drop all data and revert back, we can't keep the data as Kafka offsets determine the numbers + DROP TABLE IF EXISTS keyserver_key_changes; + CREATE TABLE IF NOT EXISTS keyserver_key_changes ( + partition BIGINT NOT NULL, + offset BIGINT NOT NULL, + -- The key owner + user_id TEXT NOT NULL, + UNIQUE (partition, offset) + ); + `) + if err != nil { + return fmt.Errorf("failed to execute downgrade: %w", err) + } + return nil +} diff --git a/keyserver/storage/sqlite3/key_changes_table.go b/keyserver/storage/sqlite3/key_changes_table.go index b4753ccc5..d43c15ca9 100644 --- a/keyserver/storage/sqlite3/key_changes_table.go +++ b/keyserver/storage/sqlite3/key_changes_table.go @@ -27,27 +27,22 @@ import ( var keyChangesSchema = ` -- Stores key change information about users. Used to determine when to send updated device lists to clients. CREATE TABLE IF NOT EXISTS keyserver_key_changes ( - partition BIGINT NOT NULL, - offset BIGINT NOT NULL, + change_id INTEGER PRIMARY KEY AUTOINCREMENT, -- The key owner user_id TEXT NOT NULL, - UNIQUE (partition, offset) + UNIQUE (user_id) ); ` -// Replace based on partition|offset - we should never insert duplicates unless the kafka logs are wiped. -// Rather than falling over, just overwrite (though this will mean clients with an existing sync token will -// miss out on updates). TODO: Ideally we would detect when kafka logs are purged then purge this table too. +// Replace based on user ID. We don't care how many times the user's keys have changed, only that they +// have changed, hence we can just keep bumping the change ID for this user. const upsertKeyChangeSQL = "" + - "INSERT INTO keyserver_key_changes (partition, offset, user_id)" + - " VALUES ($1, $2, $3)" + - " ON CONFLICT (partition, offset)" + - " DO UPDATE SET user_id = $3" + "INSERT OR REPLACE INTO keyserver_key_changes (user_id)" + + " VALUES ($1)" + + " RETURNING change_id" -// select the highest offset for each user in the range. The grouping by user gives distinct entries and then we just -// take the max offset value as the latest offset. const selectKeyChangesSQL = "" + - "SELECT user_id, MAX(offset) FROM keyserver_key_changes WHERE partition = $1 AND offset > $2 AND offset <= $3 GROUP BY user_id" + "SELECT user_id, change_id FROM keyserver_key_changes WHERE change_id > $1 AND change_id <= $2" type keyChangesStatements struct { db *sql.DB @@ -60,31 +55,32 @@ func NewSqliteKeyChangesTable(db *sql.DB) (tables.KeyChanges, error) { db: db, } _, err := db.Exec(keyChangesSchema) - if err != nil { - return nil, err - } - if s.upsertKeyChangeStmt, err = db.Prepare(upsertKeyChangeSQL); err != nil { - return nil, err - } - if s.selectKeyChangesStmt, err = db.Prepare(selectKeyChangesSQL); err != nil { - return nil, err - } - return s, nil + return s, err } -func (s *keyChangesStatements) InsertKeyChange(ctx context.Context, partition int32, offset int64, userID string) error { - _, err := s.upsertKeyChangeStmt.ExecContext(ctx, partition, offset, userID) - return err +func (s *keyChangesStatements) Prepare() (err error) { + if s.upsertKeyChangeStmt, err = s.db.Prepare(upsertKeyChangeSQL); err != nil { + return err + } + if s.selectKeyChangesStmt, err = s.db.Prepare(selectKeyChangesSQL); err != nil { + return err + } + return nil +} + +func (s *keyChangesStatements) InsertKeyChange(ctx context.Context, userID string) (changeID int64, err error) { + err = s.upsertKeyChangeStmt.QueryRowContext(ctx, userID).Scan(&changeID) + return } func (s *keyChangesStatements) SelectKeyChanges( - ctx context.Context, partition int32, fromOffset, toOffset int64, + ctx context.Context, fromOffset, toOffset int64, ) (userIDs []string, latestOffset int64, err error) { if toOffset == sarama.OffsetNewest { toOffset = math.MaxInt64 } latestOffset = fromOffset - rows, err := s.selectKeyChangesStmt.QueryContext(ctx, partition, fromOffset, toOffset) + rows, err := s.selectKeyChangesStmt.QueryContext(ctx, fromOffset, toOffset) if err != nil { return nil, 0, err } diff --git a/keyserver/storage/sqlite3/storage.go b/keyserver/storage/sqlite3/storage.go index ee1746cd6..50ce00d05 100644 --- a/keyserver/storage/sqlite3/storage.go +++ b/keyserver/storage/sqlite3/storage.go @@ -17,6 +17,7 @@ package sqlite3 import ( "github.com/matrix-org/dendrite/internal/sqlutil" "github.com/matrix-org/dendrite/keyserver/storage/shared" + "github.com/matrix-org/dendrite/keyserver/storage/sqlite3/deltas" "github.com/matrix-org/dendrite/setup/config" ) @@ -49,6 +50,15 @@ func NewDatabase(dbProperties *config.DatabaseOptions) (*shared.Database, error) if err != nil { return nil, err } + + m := sqlutil.NewMigrations() + deltas.LoadRefactorKeyChanges(m) + if err = m.RunDeltas(db, dbProperties); err != nil { + return nil, err + } + if err = kc.Prepare(); err != nil { + return nil, err + } d := &shared.Database{ DB: db, Writer: sqlutil.NewExclusiveWriter(), diff --git a/keyserver/storage/storage_test.go b/keyserver/storage/storage_test.go index 4e0a8af1d..2f8cf809b 100644 --- a/keyserver/storage/storage_test.go +++ b/keyserver/storage/storage_test.go @@ -44,15 +44,18 @@ func MustNotError(t *testing.T, err error) { func TestKeyChanges(t *testing.T) { db, clean := MustCreateDatabase(t) defer clean() - MustNotError(t, db.StoreKeyChange(ctx, 0, 0, "@alice:localhost")) - MustNotError(t, db.StoreKeyChange(ctx, 0, 1, "@bob:localhost")) - MustNotError(t, db.StoreKeyChange(ctx, 0, 2, "@charlie:localhost")) - userIDs, latest, err := db.KeyChanges(ctx, 0, 1, sarama.OffsetNewest) + _, err := db.StoreKeyChange(ctx, "@alice:localhost") + MustNotError(t, err) + deviceChangeIDB, err := db.StoreKeyChange(ctx, "@bob:localhost") + MustNotError(t, err) + deviceChangeIDC, err := db.StoreKeyChange(ctx, "@charlie:localhost") + MustNotError(t, err) + userIDs, latest, err := db.KeyChanges(ctx, deviceChangeIDB, sarama.OffsetNewest) if err != nil { t.Fatalf("Failed to KeyChanges: %s", err) } - if latest != 2 { - t.Fatalf("KeyChanges: got latest=%d want 2", latest) + if latest != deviceChangeIDC { + t.Fatalf("KeyChanges: got latest=%d want %d", latest, deviceChangeIDC) } if !reflect.DeepEqual(userIDs, []string{"@charlie:localhost"}) { t.Fatalf("KeyChanges: wrong user_ids: %v", userIDs) @@ -62,15 +65,21 @@ func TestKeyChanges(t *testing.T) { func TestKeyChangesNoDupes(t *testing.T) { db, clean := MustCreateDatabase(t) defer clean() - MustNotError(t, db.StoreKeyChange(ctx, 0, 0, "@alice:localhost")) - MustNotError(t, db.StoreKeyChange(ctx, 0, 1, "@alice:localhost")) - MustNotError(t, db.StoreKeyChange(ctx, 0, 2, "@alice:localhost")) - userIDs, latest, err := db.KeyChanges(ctx, 0, 0, sarama.OffsetNewest) + deviceChangeIDA, err := db.StoreKeyChange(ctx, "@alice:localhost") + MustNotError(t, err) + deviceChangeIDB, err := db.StoreKeyChange(ctx, "@alice:localhost") + MustNotError(t, err) + if deviceChangeIDA == deviceChangeIDB { + t.Fatalf("Expected change ID to be different even when inserting key change for the same user, got %d for both changes", deviceChangeIDA) + } + deviceChangeID, err := db.StoreKeyChange(ctx, "@alice:localhost") + MustNotError(t, err) + userIDs, latest, err := db.KeyChanges(ctx, 0, sarama.OffsetNewest) if err != nil { t.Fatalf("Failed to KeyChanges: %s", err) } - if latest != 2 { - t.Fatalf("KeyChanges: got latest=%d want 2", latest) + if latest != deviceChangeID { + t.Fatalf("KeyChanges: got latest=%d want %d", latest, deviceChangeID) } if !reflect.DeepEqual(userIDs, []string{"@alice:localhost"}) { t.Fatalf("KeyChanges: wrong user_ids: %v", userIDs) @@ -80,15 +89,18 @@ func TestKeyChangesNoDupes(t *testing.T) { func TestKeyChangesUpperLimit(t *testing.T) { db, clean := MustCreateDatabase(t) defer clean() - MustNotError(t, db.StoreKeyChange(ctx, 0, 0, "@alice:localhost")) - MustNotError(t, db.StoreKeyChange(ctx, 0, 1, "@bob:localhost")) - MustNotError(t, db.StoreKeyChange(ctx, 0, 2, "@charlie:localhost")) - userIDs, latest, err := db.KeyChanges(ctx, 0, 0, 1) + deviceChangeIDA, err := db.StoreKeyChange(ctx, "@alice:localhost") + MustNotError(t, err) + deviceChangeIDB, err := db.StoreKeyChange(ctx, "@bob:localhost") + MustNotError(t, err) + _, err = db.StoreKeyChange(ctx, "@charlie:localhost") + MustNotError(t, err) + userIDs, latest, err := db.KeyChanges(ctx, deviceChangeIDA, deviceChangeIDB) if err != nil { t.Fatalf("Failed to KeyChanges: %s", err) } - if latest != 1 { - t.Fatalf("KeyChanges: got latest=%d want 1", latest) + if latest != deviceChangeIDB { + t.Fatalf("KeyChanges: got latest=%d want %d", latest, deviceChangeIDB) } if !reflect.DeepEqual(userIDs, []string{"@bob:localhost"}) { t.Fatalf("KeyChanges: wrong user_ids: %v", userIDs) diff --git a/keyserver/storage/tables/interface.go b/keyserver/storage/tables/interface.go index 612eeb867..0d94c94cc 100644 --- a/keyserver/storage/tables/interface.go +++ b/keyserver/storage/tables/interface.go @@ -44,10 +44,12 @@ type DeviceKeys interface { } type KeyChanges interface { - InsertKeyChange(ctx context.Context, partition int32, offset int64, userID string) error + InsertKeyChange(ctx context.Context, userID string) (int64, error) // SelectKeyChanges returns the set (de-duplicated) of users who have changed their keys between the two offsets. // Results are exclusive of fromOffset and inclusive of toOffset. A toOffset of sarama.OffsetNewest means no upper offset. - SelectKeyChanges(ctx context.Context, partition int32, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error) + SelectKeyChanges(ctx context.Context, fromOffset, toOffset int64) (userIDs []string, latestOffset int64, err error) + + Prepare() error } type StaleDeviceLists interface { diff --git a/roomserver/api/input.go b/roomserver/api/input.go index 8e6e4ac7b..4b0704b9f 100644 --- a/roomserver/api/input.go +++ b/roomserver/api/input.go @@ -54,12 +54,8 @@ type InputRoomEvent struct { Kind Kind `json:"kind"` // The event JSON for the event to add. Event *gomatrixserverlib.HeaderedEvent `json:"event"` - // List of state event IDs that authenticate this event. - // These are likely derived from the "auth_events" JSON key of the event. - // But can be different because the "auth_events" key can be incomplete or wrong. - // For example many matrix events forget to reference the m.room.create event even though it is needed for auth. - // (since synapse allows this to happen we have to allow it as well.) - AuthEventIDs []string `json:"auth_event_ids"` + // Which server told us about this event. + Origin gomatrixserverlib.ServerName `json:"origin"` // Whether the state is supplied as a list of event IDs or whether it // should be derived from the state at the previous events. HasState bool `json:"has_state"` @@ -86,6 +82,7 @@ type TransactionID struct { // InputRoomEventsRequest is a request to InputRoomEvents type InputRoomEventsRequest struct { InputRoomEvents []InputRoomEvent `json:"input_room_events"` + Asynchronous bool `json:"async"` } // InputRoomEventsResponse is a response to InputRoomEvents diff --git a/roomserver/api/query.go b/roomserver/api/query.go index 599156bb1..283217157 100644 --- a/roomserver/api/query.go +++ b/roomserver/api/query.go @@ -226,6 +226,10 @@ type QueryStateAndAuthChainRequest struct { PrevEventIDs []string `json:"prev_event_ids"` // The list of auth events for the event. Used to calculate the auth chain AuthEventIDs []string `json:"auth_event_ids"` + // If true, the auth chain events for the auth event IDs given will be fetched only. Prev event IDs are ignored. + // If false, state and auth chain events for the prev event IDs and entire current state will be included. + // TODO: not a great API shape. It serves 2 main uses: false=>response for send_join, true=>response for /event_auth + OnlyFetchAuthChain bool `json:"only_fetch_auth_chain"` // Should state resolution be ran on the result events? // TODO: check call sites and remove if we always want to do state res ResolveState bool `json:"resolve_state"` diff --git a/roomserver/api/wrapper.go b/roomserver/api/wrapper.go index de66df803..e9b94e48c 100644 --- a/roomserver/api/wrapper.go +++ b/roomserver/api/wrapper.go @@ -26,19 +26,21 @@ import ( func SendEvents( ctx context.Context, rsAPI RoomserverInternalAPI, kind Kind, events []*gomatrixserverlib.HeaderedEvent, + origin gomatrixserverlib.ServerName, sendAsServer gomatrixserverlib.ServerName, txnID *TransactionID, + async bool, ) error { ires := make([]InputRoomEvent, len(events)) for i, event := range events { ires[i] = InputRoomEvent{ Kind: kind, Event: event, - AuthEventIDs: event.AuthEventIDs(), + Origin: origin, SendAsServer: string(sendAsServer), TransactionID: txnID, } } - return SendInputRoomEvents(ctx, rsAPI, ires) + return SendInputRoomEvents(ctx, rsAPI, ires, async) } // SendEventWithState writes an event with the specified kind to the roomserver @@ -47,7 +49,7 @@ func SendEvents( func SendEventWithState( ctx context.Context, rsAPI RoomserverInternalAPI, kind Kind, state *gomatrixserverlib.RespState, event *gomatrixserverlib.HeaderedEvent, - haveEventIDs map[string]bool, + origin gomatrixserverlib.ServerName, haveEventIDs map[string]bool, async bool, ) error { outliers, err := state.Events() if err != nil { @@ -60,9 +62,9 @@ func SendEventWithState( continue } ires = append(ires, InputRoomEvent{ - Kind: KindOutlier, - Event: outlier.Headered(event.RoomVersion), - AuthEventIDs: outlier.AuthEventIDs(), + Kind: KindOutlier, + Event: outlier.Headered(event.RoomVersion), + Origin: origin, }) } @@ -74,19 +76,23 @@ func SendEventWithState( ires = append(ires, InputRoomEvent{ Kind: kind, Event: event, - AuthEventIDs: event.AuthEventIDs(), + Origin: origin, HasState: true, StateEventIDs: stateEventIDs, }) - return SendInputRoomEvents(ctx, rsAPI, ires) + return SendInputRoomEvents(ctx, rsAPI, ires, async) } // SendInputRoomEvents to the roomserver. func SendInputRoomEvents( - ctx context.Context, rsAPI RoomserverInternalAPI, ires []InputRoomEvent, + ctx context.Context, rsAPI RoomserverInternalAPI, + ires []InputRoomEvent, async bool, ) error { - request := InputRoomEventsRequest{InputRoomEvents: ires} + request := InputRoomEventsRequest{ + InputRoomEvents: ires, + Asynchronous: async, + } var response InputRoomEventsResponse rsAPI.InputRoomEvents(ctx, &request, &response) return response.Err() diff --git a/roomserver/internal/api.go b/roomserver/internal/api.go index 67bbc7aba..5b87e623d 100644 --- a/roomserver/internal/api.go +++ b/roomserver/internal/api.go @@ -3,7 +3,6 @@ package internal import ( "context" - "github.com/Shopify/sarama" "github.com/getsentry/sentry-go" asAPI "github.com/matrix-org/dendrite/appservice/api" fsAPI "github.com/matrix-org/dendrite/federationapi/api" @@ -16,6 +15,8 @@ import ( "github.com/matrix-org/dendrite/roomserver/storage" "github.com/matrix-org/dendrite/setup/config" "github.com/matrix-org/gomatrixserverlib" + "github.com/nats-io/nats.go" + "github.com/sirupsen/logrus" ) // RoomserverInternalAPI is an implementation of api.RoomserverInternalAPI @@ -33,19 +34,22 @@ type RoomserverInternalAPI struct { *perform.Forgetter DB storage.Database Cfg *config.RoomServer - Producer sarama.SyncProducer Cache caching.RoomServerCaches ServerName gomatrixserverlib.ServerName KeyRing gomatrixserverlib.JSONVerifier + ServerACLs *acls.ServerACLs fsAPI fsAPI.FederationInternalAPI asAPI asAPI.AppServiceQueryAPI - OutputRoomEventTopic string // Kafka topic for new output room events + JetStream nats.JetStreamContext + Durable nats.SubOpt + InputRoomEventTopic string // JetStream topic for new input room events + OutputRoomEventTopic string // JetStream topic for new output room events PerspectiveServerNames []gomatrixserverlib.ServerName } func NewRoomserverAPI( - cfg *config.RoomServer, roomserverDB storage.Database, producer sarama.SyncProducer, - outputRoomEventTopic string, caches caching.RoomServerCaches, + cfg *config.RoomServer, roomserverDB storage.Database, consumer nats.JetStreamContext, + inputRoomEventTopic, outputRoomEventTopic string, caches caching.RoomServerCaches, perspectiveServerNames []gomatrixserverlib.ServerName, ) *RoomserverInternalAPI { serverACLs := acls.NewServerACLs(roomserverDB) @@ -55,19 +59,17 @@ func NewRoomserverAPI( Cache: caches, ServerName: cfg.Matrix.ServerName, PerspectiveServerNames: perspectiveServerNames, + InputRoomEventTopic: inputRoomEventTopic, + OutputRoomEventTopic: outputRoomEventTopic, + JetStream: consumer, + Durable: cfg.Matrix.JetStream.Durable("RoomserverInputConsumer"), + ServerACLs: serverACLs, Queryer: &query.Queryer{ DB: roomserverDB, Cache: caches, ServerName: cfg.Matrix.ServerName, ServerACLs: serverACLs, }, - Inputer: &input.Inputer{ - DB: roomserverDB, - OutputRoomEventTopic: outputRoomEventTopic, - Producer: producer, - ServerName: cfg.Matrix.ServerName, - ACLs: serverACLs, - }, // perform-er structs get initialised when we have a federation sender to use } return a @@ -80,6 +82,18 @@ func (r *RoomserverInternalAPI) SetFederationAPI(fsAPI fsAPI.FederationInternalA r.fsAPI = fsAPI r.KeyRing = keyRing + r.Inputer = &input.Inputer{ + DB: r.DB, + InputRoomEventTopic: r.InputRoomEventTopic, + OutputRoomEventTopic: r.OutputRoomEventTopic, + JetStream: r.JetStream, + Durable: r.Durable, + ServerName: r.Cfg.Matrix.ServerName, + FSAPI: fsAPI, + KeyRing: keyRing, + ACLs: r.ServerACLs, + Queryer: r.Queryer, + } r.Inviter = &perform.Inviter{ DB: r.DB, Cfg: r.Cfg, @@ -135,6 +149,10 @@ func (r *RoomserverInternalAPI) SetFederationAPI(fsAPI fsAPI.FederationInternalA r.Forgetter = &perform.Forgetter{ DB: r.DB, } + + if err := r.Inputer.Start(); err != nil { + logrus.WithError(err).Panic("failed to start roomserver input API") + } } func (r *RoomserverInternalAPI) SetAppserviceAPI(asAPI asAPI.AppServiceQueryAPI) { diff --git a/roomserver/internal/helpers/auth.go b/roomserver/internal/helpers/auth.go index 1f4215e74..ddda8081c 100644 --- a/roomserver/internal/helpers/auth.go +++ b/roomserver/internal/helpers/auth.go @@ -56,7 +56,7 @@ func CheckForSoftFail( // Then get the state entries for the current state snapshot. // We'll use this to check if the event is allowed right now. - roomState := state.NewStateResolution(db, *roomInfo) + roomState := state.NewStateResolution(db, roomInfo) authStateEntries, err = roomState.LoadStateAtSnapshot(ctx, roomInfo.StateSnapshotNID) if err != nil { return true, fmt.Errorf("roomState.LoadStateAtSnapshot: %w", err) diff --git a/roomserver/internal/helpers/helpers.go b/roomserver/internal/helpers/helpers.go index a389cc898..78a875c76 100644 --- a/roomserver/internal/helpers/helpers.go +++ b/roomserver/internal/helpers/helpers.go @@ -179,7 +179,7 @@ func GetMembershipsAtState( return events, nil } -func StateBeforeEvent(ctx context.Context, db storage.Database, info types.RoomInfo, eventNID types.EventNID) ([]types.StateEntry, error) { +func StateBeforeEvent(ctx context.Context, db storage.Database, info *types.RoomInfo, eventNID types.EventNID) ([]types.StateEntry, error) { roomState := state.NewStateResolution(db, info) // Lookup the event NID eIDs, err := db.EventIDs(ctx, []types.EventNID{eventNID}) @@ -223,7 +223,7 @@ func LoadStateEvents( } func CheckServerAllowedToSeeEvent( - ctx context.Context, db storage.Database, info types.RoomInfo, eventID string, serverName gomatrixserverlib.ServerName, isServerInRoom bool, + ctx context.Context, db storage.Database, info *types.RoomInfo, eventID string, serverName gomatrixserverlib.ServerName, isServerInRoom bool, ) (bool, error) { roomState := state.NewStateResolution(db, info) stateEntries, err := roomState.LoadStateAtEvent(ctx, eventID) @@ -279,7 +279,7 @@ func CheckServerAllowedToSeeEvent( // TODO: Remove this when we have tests to assert correctness of this function func ScanEventTree( - ctx context.Context, db storage.Database, info types.RoomInfo, front []string, visited map[string]bool, limit int, + ctx context.Context, db storage.Database, info *types.RoomInfo, front []string, visited map[string]bool, limit int, serverName gomatrixserverlib.ServerName, ) ([]types.EventNID, error) { var resultNIDs []types.EventNID @@ -387,7 +387,7 @@ func QueryLatestEventsAndState( return nil } - roomState := state.NewStateResolution(db, *roomInfo) + roomState := state.NewStateResolution(db, roomInfo) response.RoomExists = true response.RoomVersion = roomInfo.RoomVersion diff --git a/roomserver/internal/input/input.go b/roomserver/internal/input/input.go index de40e133d..9601e018d 100644 --- a/roomserver/internal/input/input.go +++ b/roomserver/internal/input/input.go @@ -21,17 +21,20 @@ import ( "sync" "time" - "github.com/Shopify/sarama" + "github.com/Arceliar/phony" "github.com/getsentry/sentry-go" + fedapi "github.com/matrix-org/dendrite/federationapi/api" "github.com/matrix-org/dendrite/internal/hooks" "github.com/matrix-org/dendrite/roomserver/acls" "github.com/matrix-org/dendrite/roomserver/api" + "github.com/matrix-org/dendrite/roomserver/internal/query" "github.com/matrix-org/dendrite/roomserver/storage" + "github.com/matrix-org/dendrite/setup/jetstream" "github.com/matrix-org/gomatrixserverlib" + "github.com/nats-io/nats.go" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "github.com/tidwall/gjson" - "go.uber.org/atomic" ) var keyContentFields = map[string]string{ @@ -42,105 +45,207 @@ var keyContentFields = map[string]string{ type Inputer struct { DB storage.Database - Producer sarama.SyncProducer + JetStream nats.JetStreamContext + Durable nats.SubOpt ServerName gomatrixserverlib.ServerName + FSAPI fedapi.FederationInternalAPI + KeyRing gomatrixserverlib.JSONVerifier ACLs *acls.ServerACLs + InputRoomEventTopic string OutputRoomEventTopic string - workers sync.Map // room ID -> *inputWorker + workers sync.Map // room ID -> *phony.Inbox + + Queryer *query.Queryer } -type inputTask struct { - ctx context.Context - event *api.InputRoomEvent - wg *sync.WaitGroup - err error // written back by worker, only safe to read when all tasks are done +func (r *Inputer) workerForRoom(roomID string) *phony.Inbox { + inbox, _ := r.workers.LoadOrStore(roomID, &phony.Inbox{}) + return inbox.(*phony.Inbox) } -type inputWorker struct { - r *Inputer - running atomic.Bool - input *fifoQueue -} +// eventsInProgress is an in-memory map to keep a track of which events we have +// queued up for processing. If we get a redelivery from NATS and we still have +// the queued up item then we won't do anything with the redelivered message. If +// we've restarted Dendrite and now this map is empty then it means that we will +// reload pending work from NATS. +var eventsInProgress sync.Map -// Guarded by a CAS on w.running -func (w *inputWorker) start() { - defer w.running.Store(false) - for { - select { - case <-w.input.wait(): - task, ok := w.input.pop() - if !ok { - continue +// onMessage is called when a new event arrives in the roomserver input stream. +func (r *Inputer) Start() error { + _, err := r.JetStream.Subscribe( + r.InputRoomEventTopic, + // We specifically don't use jetstream.WithJetStreamMessage here because we + // queue the task off to a room-specific queue and the ACK needs to be sent + // later, possibly with an error response to the inputter if synchronous. + func(msg *nats.Msg) { + roomID := msg.Header.Get("room_id") + var inputRoomEvent api.InputRoomEvent + if err := json.Unmarshal(msg.Data, &inputRoomEvent); err != nil { + _ = msg.Term() + return } - roomserverInputBackpressure.With(prometheus.Labels{ - "room_id": task.event.Event.RoomID(), - }).Dec() - hooks.Run(hooks.KindNewEventReceived, task.event.Event) - _, task.err = w.r.processRoomEvent(task.ctx, task.event) - if task.err == nil { - hooks.Run(hooks.KindNewEventPersisted, task.event.Event) - } else { - sentry.CaptureException(task.err) + + _ = msg.InProgress() + index := roomID + "\000" + inputRoomEvent.Event.EventID() + if _, ok := eventsInProgress.LoadOrStore(index, struct{}{}); ok { + // We're already waiting to deal with this event, so there's no + // point in queuing it up again. We've notified NATS that we're + // working on the message still, so that will have deferred the + // redelivery by a bit. + return + } + + roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Inc() + r.workerForRoom(roomID).Act(nil, func() { + _ = msg.InProgress() // resets the acknowledgement wait timer + defer eventsInProgress.Delete(index) + defer roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Dec() + if err := r.processRoomEvent(context.Background(), &inputRoomEvent); err != nil { + sentry.CaptureException(err) + } else { + hooks.Run(hooks.KindNewEventPersisted, inputRoomEvent.Event) + } + _ = msg.Ack() + }) + }, + // NATS wants to acknowledge automatically by default when the message is + // read from the stream, but we want to override that behaviour by making + // sure that we only acknowledge when we're happy we've done everything we + // can. This ensures we retry things when it makes sense to do so. + nats.ManualAck(), + // Use a durable named consumer. + r.Durable, + // If we've missed things in the stream, e.g. we restarted, then replay + // all of the queued messages that were waiting for us. + nats.DeliverAll(), + // Ensure that NATS doesn't try to resend us something that wasn't done + // within the period of time that we might still be processing it. + nats.AckWait(MaximumProcessingTime+(time.Second*10)), + ) + return err +} + +// InputRoomEvents implements api.RoomserverInternalAPI +func (r *Inputer) InputRoomEvents( + ctx context.Context, + request *api.InputRoomEventsRequest, + response *api.InputRoomEventsResponse, +) { + if request.Asynchronous { + var err error + for _, e := range request.InputRoomEvents { + msg := &nats.Msg{ + Subject: r.InputRoomEventTopic, + Header: nats.Header{}, + } + roomID := e.Event.RoomID() + msg.Header.Set("room_id", roomID) + msg.Data, err = json.Marshal(e) + if err != nil { + response.ErrMsg = err.Error() + return + } + if _, err = r.JetStream.PublishMsg(msg); err != nil { + return + } + } + } else { + responses := make(chan error, len(request.InputRoomEvents)) + defer close(responses) + for _, e := range request.InputRoomEvents { + inputRoomEvent := e + roomID := inputRoomEvent.Event.RoomID() + index := roomID + "\000" + inputRoomEvent.Event.EventID() + if _, ok := eventsInProgress.LoadOrStore(index, struct{}{}); ok { + // We're already waiting to deal with this event, so there's no + // point in queuing it up again. We've notified NATS that we're + // working on the message still, so that will have deferred the + // redelivery by a bit. + return + } + roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Inc() + worker := r.workerForRoom(roomID) + worker.Act(nil, func() { + defer eventsInProgress.Delete(index) + defer roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Dec() + err := r.processRoomEvent(ctx, &inputRoomEvent) + if err != nil { + sentry.CaptureException(err) + } else { + hooks.Run(hooks.KindNewEventPersisted, inputRoomEvent.Event) + } + select { + case <-ctx.Done(): + default: + responses <- err + } + }) + } + for i := 0; i < len(request.InputRoomEvents); i++ { + select { + case <-ctx.Done(): + response.ErrMsg = context.DeadlineExceeded.Error() + return + case err := <-responses: + if err != nil { + response.ErrMsg = err.Error() + return + } } - task.wg.Done() - case <-time.After(time.Second * 5): - return } } } // WriteOutputEvents implements OutputRoomEventWriter func (r *Inputer) WriteOutputEvents(roomID string, updates []api.OutputEvent) error { - messages := make([]*sarama.ProducerMessage, len(updates)) - for i := range updates { - value, err := json.Marshal(updates[i]) + var err error + for _, update := range updates { + msg := &nats.Msg{ + Subject: r.OutputRoomEventTopic, + Header: nats.Header{}, + } + msg.Header.Set(jetstream.RoomID, roomID) + msg.Data, err = json.Marshal(update) if err != nil { return err } logger := log.WithFields(log.Fields{ "room_id": roomID, - "type": updates[i].Type, + "type": update.Type, }) - if updates[i].NewRoomEvent != nil { - eventType := updates[i].NewRoomEvent.Event.Type() + if update.NewRoomEvent != nil { + eventType := update.NewRoomEvent.Event.Type() logger = logger.WithFields(log.Fields{ "event_type": eventType, - "event_id": updates[i].NewRoomEvent.Event.EventID(), - "adds_state": len(updates[i].NewRoomEvent.AddsStateEventIDs), - "removes_state": len(updates[i].NewRoomEvent.RemovesStateEventIDs), - "send_as_server": updates[i].NewRoomEvent.SendAsServer, - "sender": updates[i].NewRoomEvent.Event.Sender(), + "event_id": update.NewRoomEvent.Event.EventID(), + "adds_state": len(update.NewRoomEvent.AddsStateEventIDs), + "removes_state": len(update.NewRoomEvent.RemovesStateEventIDs), + "send_as_server": update.NewRoomEvent.SendAsServer, + "sender": update.NewRoomEvent.Event.Sender(), }) - if updates[i].NewRoomEvent.Event.StateKey() != nil { - logger = logger.WithField("state_key", *updates[i].NewRoomEvent.Event.StateKey()) + if update.NewRoomEvent.Event.StateKey() != nil { + logger = logger.WithField("state_key", *update.NewRoomEvent.Event.StateKey()) } contentKey := keyContentFields[eventType] if contentKey != "" { - value := gjson.GetBytes(updates[i].NewRoomEvent.Event.Content(), contentKey) + value := gjson.GetBytes(update.NewRoomEvent.Event.Content(), contentKey) if value.Exists() { logger = logger.WithField("content_value", value.String()) } } - if eventType == "m.room.server_acl" && updates[i].NewRoomEvent.Event.StateKeyEquals("") { - ev := updates[i].NewRoomEvent.Event.Unwrap() + if eventType == "m.room.server_acl" && update.NewRoomEvent.Event.StateKeyEquals("") { + ev := update.NewRoomEvent.Event.Unwrap() defer r.ACLs.OnServerACLUpdate(ev) } } - logger.Infof("Producing to topic '%s'", r.OutputRoomEventTopic) - messages[i] = &sarama.ProducerMessage{ - Topic: r.OutputRoomEventTopic, - Key: sarama.StringEncoder(roomID), - Value: sarama.ByteEncoder(value), + logger.Tracef("Producing to topic '%s'", r.OutputRoomEventTopic) + if _, err := r.JetStream.PublishMsg(msg); err != nil { + logger.WithError(err).Errorf("Failed to produce to topic '%s': %s", r.OutputRoomEventTopic, err) + return err } } - errs := r.Producer.SendMessages(messages) - if errs != nil { - for _, err := range errs.(sarama.ProducerErrors) { - log.WithError(err).WithField("message_bytes", err.Msg.Value.Length()).Error("Write to kafka failed") - } - } - return errs + return nil } func init() { @@ -156,67 +261,3 @@ var roomserverInputBackpressure = prometheus.NewGaugeVec( }, []string{"room_id"}, ) - -// InputRoomEvents implements api.RoomserverInternalAPI -func (r *Inputer) InputRoomEvents( - _ context.Context, - request *api.InputRoomEventsRequest, - response *api.InputRoomEventsResponse, -) { - // Create a wait group. Each task that we dispatch will call Done on - // this wait group so that we know when all of our events have been - // processed. - wg := &sync.WaitGroup{} - wg.Add(len(request.InputRoomEvents)) - tasks := make([]*inputTask, len(request.InputRoomEvents)) - - for i, e := range request.InputRoomEvents { - // Work out if we are running per-room workers or if we're just doing - // it on a global basis (e.g. SQLite). - roomID := "global" - if r.DB.SupportsConcurrentRoomInputs() { - roomID = e.Event.RoomID() - } - - // Look up the worker, or create it if it doesn't exist. This channel - // is buffered to reduce the chance that we'll be blocked by another - // room - the channel will be quite small as it's just pointer types. - w, _ := r.workers.LoadOrStore(roomID, &inputWorker{ - r: r, - input: newFIFOQueue(), - }) - worker := w.(*inputWorker) - - // Create a task. This contains the input event and a reference to - // the wait group, so that the worker can notify us when this specific - // task has been finished. - tasks[i] = &inputTask{ - ctx: context.Background(), - event: &request.InputRoomEvents[i], - wg: wg, - } - - // Send the task to the worker. - if worker.running.CAS(false, true) { - go worker.start() - } - worker.input.push(tasks[i]) - roomserverInputBackpressure.With(prometheus.Labels{ - "room_id": roomID, - }).Inc() - } - - // Wait for all of the workers to return results about our tasks. - wg.Wait() - - // If any of the tasks returned an error, we should probably report - // that back to the caller. - for _, task := range tasks { - if task.err != nil { - response.ErrMsg = task.err.Error() - _, rejected := task.err.(*gomatrixserverlib.NotAllowed) - response.NotAllowed = rejected - return - } - } -} diff --git a/roomserver/internal/input/input_events.go b/roomserver/internal/input/input_events.go index fc712f47b..5f9115223 100644 --- a/roomserver/internal/input/input_events.go +++ b/roomserver/internal/input/input_events.go @@ -22,6 +22,8 @@ import ( "fmt" "time" + fedapi "github.com/matrix-org/dendrite/federationapi/api" + "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/internal/eventutil" "github.com/matrix-org/dendrite/roomserver/api" "github.com/matrix-org/dendrite/roomserver/internal/helpers" @@ -37,6 +39,9 @@ func init() { prometheus.MustRegister(processRoomEventDuration) } +// TODO: Does this value make sense? +const MaximumProcessingTime = time.Minute * 2 + var processRoomEventDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: "dendrite", @@ -60,9 +65,25 @@ var processRoomEventDuration = prometheus.NewHistogramVec( // TODO: Break up function - we should probably do transaction ID checks before calling this. // nolint:gocyclo func (r *Inputer) processRoomEvent( - ctx context.Context, + inctx context.Context, input *api.InputRoomEvent, -) (eventID string, err error) { +) (err error) { + select { + case <-inctx.Done(): + // Before we do anything, make sure the context hasn't expired for this pending task. + // If it has then we'll give up straight away — it's probably a synchronous input + // request and the caller has already given up, but the inbox task was still queued. + return context.DeadlineExceeded + default: + } + + // Wrap the context with a time limit. We'll allow no more than MaximumProcessingTime for + // everything that we need to do for this event, or it's possible that we could end up wedging + // the roomserver for a very long time. + var cancel context.CancelFunc + ctx, cancel := context.WithTimeout(inctx, MaximumProcessingTime) + defer cancel() + // Measure how long it takes to process this event. started := time.Now() defer func() { @@ -75,6 +96,11 @@ func (r *Inputer) processRoomEvent( // Parse and validate the event JSON headered := input.Event event := headered.Unwrap() + logger := util.GetLogger(ctx).WithFields(logrus.Fields{ + "event_id": event.EventID(), + "room_id": event.RoomID(), + "type": event.Type(), + }) // if we have already got this event then do not process it again, if the input kind is an outlier. // Outliers contain no extra information which may warrant a re-processing. @@ -87,24 +113,67 @@ func (r *Inputer) processRoomEvent( switch idFormat { case gomatrixserverlib.EventIDFormatV1: if bytes.Equal(event.EventReference().EventSHA256, evs[0].EventReference().EventSHA256) { - util.GetLogger(ctx).WithField("event_id", event.EventID()).Infof("Already processed event; ignoring") - return event.EventID(), nil + logger.Debugf("Already processed event; ignoring") + return nil } default: - util.GetLogger(ctx).WithField("event_id", event.EventID()).Infof("Already processed event; ignoring") - return event.EventID(), nil + logger.Debugf("Already processed event; ignoring") + return nil } } } } - // Check that the event passes authentication checks and work out - // the numeric IDs for the auth events. + missingRes := &api.QueryMissingAuthPrevEventsResponse{} + serverRes := &fedapi.QueryJoinedHostServerNamesInRoomResponse{} + if event.Type() != gomatrixserverlib.MRoomCreate || !event.StateKeyEquals("") { + missingReq := &api.QueryMissingAuthPrevEventsRequest{ + RoomID: event.RoomID(), + AuthEventIDs: event.AuthEventIDs(), + PrevEventIDs: event.PrevEventIDs(), + } + if err = r.Queryer.QueryMissingAuthPrevEvents(ctx, missingReq, missingRes); err != nil { + return fmt.Errorf("r.Queryer.QueryMissingAuthPrevEvents: %w", err) + } + } + if len(missingRes.MissingAuthEventIDs) > 0 || len(missingRes.MissingPrevEventIDs) > 0 { + serverReq := &fedapi.QueryJoinedHostServerNamesInRoomRequest{ + RoomID: event.RoomID(), + ExcludeSelf: true, + } + if err = r.FSAPI.QueryJoinedHostServerNamesInRoom(ctx, serverReq, serverRes); err != nil { + return fmt.Errorf("r.FSAPI.QueryJoinedHostServerNamesInRoom: %w", err) + } + } + if input.Origin != "" { + serverRes.ServerNames = append(serverRes.ServerNames, input.Origin) + } + + // First of all, check that the auth events of the event are known. + // If they aren't then we will ask the federation API for them. isRejected := false - authEventNIDs, rejectionErr := helpers.CheckAuthEvents(ctx, r.DB, headered, input.AuthEventIDs) - if rejectionErr != nil { - logrus.WithError(rejectionErr).WithField("event_id", event.EventID()).WithField("auth_event_ids", input.AuthEventIDs).Error("helpers.CheckAuthEvents failed for event, rejecting event") + authEvents := gomatrixserverlib.NewAuthEvents(nil) + knownEvents := map[string]*types.Event{} + if err = r.fetchAuthEvents(ctx, logger, headered, &authEvents, knownEvents, serverRes.ServerNames); err != nil { + return fmt.Errorf("r.checkForMissingAuthEvents: %w", err) + } + + // Check if the event is allowed by its auth events. If it isn't then + // we consider the event to be "rejected" — it will still be persisted. + var rejectionErr error + if rejectionErr = gomatrixserverlib.Allowed(event, &authEvents); rejectionErr != nil { isRejected = true + logger.WithError(rejectionErr).Warnf("Event %s rejected", event.EventID()) + } + + // Accumulate the auth event NIDs. + authEventIDs := event.AuthEventIDs() + authEventNIDs := make([]types.EventNID, 0, len(authEventIDs)) + for _, authEventID := range authEventIDs { + if _, ok := knownEvents[authEventID]; !ok { + return fmt.Errorf("missing auth event %s", authEventID) + } + authEventNIDs = append(authEventNIDs, knownEvents[authEventID].EventNID) } var softfail bool @@ -113,25 +182,64 @@ func (r *Inputer) processRoomEvent( // current room state. softfail, err = helpers.CheckForSoftFail(ctx, r.DB, headered, input.StateEventIDs) if err != nil { - logrus.WithFields(logrus.Fields{ - "event_id": event.EventID(), - "type": event.Type(), - "room": event.RoomID(), - }).WithError(err).Info("Error authing soft-failed event") + logger.WithError(err).Info("Error authing soft-failed event") + } + } + + // At this point we are checking whether we know all of the prev events, and + // if we know the state before the prev events. This is necessary before we + // try to do `calculateAndSetState` on the event later, otherwise it will fail + // with missing event NIDs. If there's anything missing then we'll go and fetch + // the prev events and state from the federation. Note that we only do this if + // we weren't already told what the state before the event should be — if the + // HasState option was set and a state set was provided (as is the case in a + // typical federated room join) then we won't bother trying to fetch prev events + // because we may not be allowed to see them and we have no choice but to trust + // the state event IDs provided to us in the join instead. + missingPrev := !input.HasState && len(missingRes.MissingPrevEventIDs) > 0 + if missingPrev && input.Kind == api.KindNew { + // Don't do this for KindOld events, otherwise old events that we fetch + // to satisfy missing prev events/state will end up recursively calling + // processRoomEvent. + if len(serverRes.ServerNames) > 0 { + missingState := missingStateReq{ + origin: input.Origin, + inputer: r, + queryer: r.Queryer, + db: r.DB, + federation: r.FSAPI, + keys: r.KeyRing, + roomsMu: internal.NewMutexByRoom(), + servers: map[gomatrixserverlib.ServerName]struct{}{}, + hadEvents: map[string]bool{}, + haveEvents: map[string]*gomatrixserverlib.HeaderedEvent{}, + } + for _, serverName := range serverRes.ServerNames { + missingState.servers[serverName] = struct{}{} + } + if err = missingState.processEventWithMissingState(ctx, event, headered.RoomVersion); err != nil { + isRejected = true + rejectionErr = fmt.Errorf("missingState.processEventWithMissingState: %w", err) + } else { + missingPrev = false + } + } else { + isRejected = true + rejectionErr = fmt.Errorf("missing prev events and no other servers to ask") } } // Store the event. _, _, stateAtEvent, redactionEvent, redactedEventID, err := r.DB.StoreEvent(ctx, event, authEventNIDs, isRejected) if err != nil { - return "", fmt.Errorf("r.DB.StoreEvent: %w", err) + return fmt.Errorf("r.DB.StoreEvent: %w", err) } // if storing this event results in it being redacted then do so. if !isRejected && redactedEventID == event.EventID() { r, rerr := eventutil.RedactEvent(redactionEvent, event) if rerr != nil { - return "", fmt.Errorf("eventutil.RedactEvent: %w", rerr) + return fmt.Errorf("eventutil.RedactEvent: %w", rerr) } event = r } @@ -140,42 +248,31 @@ func (r *Inputer) processRoomEvent( // doesn't have any associated state to store and we don't need to // notify anyone about it. if input.Kind == api.KindOutlier { - logrus.WithFields(logrus.Fields{ - "event_id": event.EventID(), - "type": event.Type(), - "room": event.RoomID(), - "sender": event.Sender(), - }).Debug("Stored outlier") - return event.EventID(), nil + logger.Debug("Stored outlier") + return nil } roomInfo, err := r.DB.RoomInfo(ctx, event.RoomID()) if err != nil { - return "", fmt.Errorf("r.DB.RoomInfo: %w", err) + return fmt.Errorf("r.DB.RoomInfo: %w", err) } if roomInfo == nil { - return "", fmt.Errorf("r.DB.RoomInfo missing for room %s", event.RoomID()) + return fmt.Errorf("r.DB.RoomInfo missing for room %s", event.RoomID()) } - if stateAtEvent.BeforeStateSnapshotNID == 0 { + if !missingPrev && stateAtEvent.BeforeStateSnapshotNID == 0 { // We haven't calculated a state for this event yet. // Lets calculate one. - err = r.calculateAndSetState(ctx, input, *roomInfo, &stateAtEvent, event, isRejected) - if err != nil && input.Kind != api.KindOld { - return "", fmt.Errorf("r.calculateAndSetState: %w", err) + err = r.calculateAndSetState(ctx, input, roomInfo, &stateAtEvent, event, isRejected) + if err != nil { + return fmt.Errorf("r.calculateAndSetState: %w", err) } } // We stop here if the event is rejected: We've stored it but won't update forward extremities or notify anyone about it. if isRejected || softfail { - logrus.WithFields(logrus.Fields{ - "event_id": event.EventID(), - "type": event.Type(), - "room": event.RoomID(), - "soft_fail": softfail, - "sender": event.Sender(), - }).Debug("Stored rejected event") - return event.EventID(), rejectionErr + logger.WithError(rejectionErr).WithField("soft_fail", softfail).Debug("Stored rejected event") + return rejectionErr } switch input.Kind { @@ -189,7 +286,7 @@ func (r *Inputer) processRoomEvent( input.TransactionID, // transaction ID input.HasState, // rewrites state? ); err != nil { - return "", fmt.Errorf("r.updateLatestEvents: %w", err) + return fmt.Errorf("r.updateLatestEvents: %w", err) } case api.KindOld: err = r.WriteOutputEvents(event.RoomID(), []api.OutputEvent{ @@ -201,7 +298,7 @@ func (r *Inputer) processRoomEvent( }, }) if err != nil { - return "", fmt.Errorf("r.WriteOutputEvents (old): %w", err) + return fmt.Errorf("r.WriteOutputEvents (old): %w", err) } } @@ -220,18 +317,135 @@ func (r *Inputer) processRoomEvent( }, }) if err != nil { - return "", fmt.Errorf("r.WriteOutputEvents (redactions): %w", err) + return fmt.Errorf("r.WriteOutputEvents (redactions): %w", err) } } // Update the extremities of the event graph for the room - return event.EventID(), nil + return nil +} + +// fetchAuthEvents will check to see if any of the +// auth events specified by the given event are unknown. If they are +// then we will go off and request them from the federation and then +// store them in the database. By the time this function ends, either +// we've failed to retrieve the auth chain altogether (in which case +// an error is returned) or we've successfully retrieved them all and +// they are now in the database. +func (r *Inputer) fetchAuthEvents( + ctx context.Context, + logger *logrus.Entry, + event *gomatrixserverlib.HeaderedEvent, + auth *gomatrixserverlib.AuthEvents, + known map[string]*types.Event, + servers []gomatrixserverlib.ServerName, +) error { + unknown := map[string]struct{}{} + authEventIDs := event.AuthEventIDs() + if len(authEventIDs) == 0 { + return nil + } + + for _, authEventID := range authEventIDs { + authEvents, err := r.DB.EventsFromIDs(ctx, []string{authEventID}) + if err != nil || len(authEvents) == 0 || authEvents[0].Event == nil { + unknown[authEventID] = struct{}{} + continue + } + ev := authEvents[0] + known[authEventID] = &ev // don't take the pointer of the iterated event + if err = auth.AddEvent(ev.Event); err != nil { + return fmt.Errorf("auth.AddEvent: %w", err) + } + } + + // If there are no missing auth events then there is nothing more + // to do — we've loaded everything that we need. + if len(unknown) == 0 { + return nil + } + + var err error + var res gomatrixserverlib.RespEventAuth + var found bool + for _, serverName := range servers { + // Request the entire auth chain for the event in question. This should + // contain all of the auth events — including ones that we already know — + // so we'll need to filter through those in the next section. + res, err = r.FSAPI.GetEventAuth(ctx, serverName, event.RoomVersion, event.RoomID(), event.EventID()) + if err != nil { + logger.WithError(err).Warnf("Failed to get event auth from federation for %q: %s", event.EventID(), err) + continue + } + found = true + break + } + if !found { + return fmt.Errorf("no servers provided event auth for event ID %q, tried servers %v", event.EventID(), servers) + } + + for _, authEvent := range gomatrixserverlib.ReverseTopologicalOrdering( + res.AuthEvents, + gomatrixserverlib.TopologicalOrderByAuthEvents, + ) { + // If we already know about this event from the database then we don't + // need to store it again or do anything further with it, so just skip + // over it rather than wasting cycles. + if ev, ok := known[authEvent.EventID()]; ok && ev != nil { + continue + } + + // Check the signatures of the event. + // TODO: It really makes sense for the federation API to be doing this, + // because then it can attempt another server if one serves up an event + // with an invalid signature. For now this will do. + if err := authEvent.VerifyEventSignatures(ctx, r.FSAPI.KeyRing()); err != nil { + return fmt.Errorf("event.VerifyEventSignatures: %w", err) + } + + // In order to store the new auth event, we need to know its auth chain + // as NIDs for the `auth_event_nids` column. Let's see if we can find those. + authEventNIDs := make([]types.EventNID, 0, len(authEvent.AuthEventIDs())) + for _, eventID := range authEvent.AuthEventIDs() { + knownEvent, ok := known[eventID] + if !ok { + return fmt.Errorf("missing auth event %s for %s", eventID, authEvent.EventID()) + } + authEventNIDs = append(authEventNIDs, knownEvent.EventNID) + } + + // Let's take a note of the fact that we now know about this event. + if err := auth.AddEvent(authEvent); err != nil { + return fmt.Errorf("auth.AddEvent: %w", err) + } + + // Check if the auth event should be rejected. + isRejected := false + if err := gomatrixserverlib.Allowed(authEvent, auth); err != nil { + isRejected = true + logger.WithError(err).Warnf("Auth event %s rejected", authEvent.EventID()) + } + + // Finally, store the event in the database. + eventNID, _, _, _, _, err := r.DB.StoreEvent(ctx, authEvent, authEventNIDs, isRejected) + if err != nil { + return fmt.Errorf("r.DB.StoreEvent: %w", err) + } + + // Now we know about this event, it was stored and the signatures were OK. + known[authEvent.EventID()] = &types.Event{ + EventNID: eventNID, + Event: authEvent, + } + } + + return nil } func (r *Inputer) calculateAndSetState( ctx context.Context, input *api.InputRoomEvent, - roomInfo types.RoomInfo, + roomInfo *types.RoomInfo, stateAtEvent *types.StateAtEvent, event *gomatrixserverlib.Event, isRejected bool, diff --git a/roomserver/internal/input/input_fifo.go b/roomserver/internal/input/input_fifo.go deleted file mode 100644 index 694b17245..000000000 --- a/roomserver/internal/input/input_fifo.go +++ /dev/null @@ -1,64 +0,0 @@ -package input - -import ( - "sync" -) - -type fifoQueue struct { - tasks []*inputTask - count int - mutex sync.Mutex - notifs chan struct{} -} - -func newFIFOQueue() *fifoQueue { - q := &fifoQueue{ - notifs: make(chan struct{}, 1), - } - return q -} - -func (q *fifoQueue) push(frame *inputTask) { - q.mutex.Lock() - defer q.mutex.Unlock() - q.tasks = append(q.tasks, frame) - q.count++ - select { - case q.notifs <- struct{}{}: - default: - } -} - -// pop returns the first item of the queue, if there is one. -// The second return value will indicate if a task was returned. -// You must check this value, even after calling wait(). -func (q *fifoQueue) pop() (*inputTask, bool) { - q.mutex.Lock() - defer q.mutex.Unlock() - if q.count == 0 { - return nil, false - } - frame := q.tasks[0] - q.tasks[0] = nil - q.tasks = q.tasks[1:] - q.count-- - if q.count == 0 { - // Force a GC of the underlying array, since it might have - // grown significantly if the queue was hammered for some reason - q.tasks = nil - } - return frame, true -} - -// wait returns a channel which can be used to detect when an -// item is waiting in the queue. -func (q *fifoQueue) wait() <-chan struct{} { - q.mutex.Lock() - defer q.mutex.Unlock() - if q.count > 0 && len(q.notifs) == 0 { - ch := make(chan struct{}) - close(ch) - return ch - } - return q.notifs -} diff --git a/roomserver/internal/input/input_latest_events.go b/roomserver/internal/input/input_latest_events.go index c9264a27d..6137941e1 100644 --- a/roomserver/internal/input/input_latest_events.go +++ b/roomserver/internal/input/input_latest_events.go @@ -199,7 +199,7 @@ func (u *latestEventsUpdater) doUpdateLatestEvents() error { func (u *latestEventsUpdater) latestState() error { var err error - roomState := state.NewStateResolution(u.api.DB, *u.roomInfo) + roomState := state.NewStateResolution(u.api.DB, u.roomInfo) // Work out if the state at the extremities has actually changed // or not. If they haven't then we won't bother doing all of the diff --git a/roomserver/internal/input/input_missing.go b/roomserver/internal/input/input_missing.go new file mode 100644 index 000000000..44710962c --- /dev/null +++ b/roomserver/internal/input/input_missing.go @@ -0,0 +1,765 @@ +package input + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + fedapi "github.com/matrix-org/dendrite/federationapi/api" + "github.com/matrix-org/dendrite/internal" + "github.com/matrix-org/dendrite/roomserver/api" + "github.com/matrix-org/dendrite/roomserver/internal/query" + "github.com/matrix-org/dendrite/roomserver/storage" + "github.com/matrix-org/gomatrixserverlib" + "github.com/matrix-org/util" + "github.com/sirupsen/logrus" +) + +type missingStateReq struct { + origin gomatrixserverlib.ServerName + db storage.Database + inputer *Inputer + queryer *query.Queryer + keys gomatrixserverlib.JSONVerifier + federation fedapi.FederationInternalAPI + roomsMu *internal.MutexByRoom + servers map[gomatrixserverlib.ServerName]struct{} + hadEvents map[string]bool + hadEventsMutex sync.Mutex + haveEvents map[string]*gomatrixserverlib.HeaderedEvent + haveEventsMutex sync.Mutex +} + +// processEventWithMissingState is the entrypoint for a missingStateReq +// request, as called from processRoomEvent. +func (t *missingStateReq) processEventWithMissingState( + ctx context.Context, e *gomatrixserverlib.Event, roomVersion gomatrixserverlib.RoomVersion, +) error { + // We are missing the previous events for this events. + // This means that there is a gap in our view of the history of the + // room. There two ways that we can handle such a gap: + // 1) We can fill in the gap using /get_missing_events + // 2) We can leave the gap and request the state of the room at + // this event from the remote server using either /state_ids + // or /state. + // Synapse will attempt to do 1 and if that fails or if the gap is + // too large then it will attempt 2. + // Synapse will use /state_ids if possible since usually the state + // is largely unchanged and it is more efficient to fetch a list of + // event ids and then use /event to fetch the individual events. + // However not all version of synapse support /state_ids so you may + // need to fallback to /state. + logger := util.GetLogger(ctx).WithFields(map[string]interface{}{ + "txn_event": e.EventID(), + "room_id": e.RoomID(), + "txn_prev_events": e.PrevEventIDs(), + }) + + // Attempt to fill in the gap using /get_missing_events + // This will either: + // - fill in the gap completely then process event `e` returning no backwards extremity + // - fail to fill in the gap and tell us to terminate the transaction err=not nil + // - fail to fill in the gap and tell us to fetch state at the new backwards extremity, and to not terminate the transaction + newEvents, isGapFilled, err := t.getMissingEvents(ctx, e, roomVersion) + if err != nil { + return fmt.Errorf("t.getMissingEvents: %w", err) + } + if len(newEvents) == 0 { + return fmt.Errorf("expected to find missing events but didn't") + } + if isGapFilled { + logger.Infof("gap filled by /get_missing_events, injecting %d new events", len(newEvents)) + // we can just inject all the newEvents as new as we may have only missed 1 or 2 events and have filled + // in the gap in the DAG + for _, newEvent := range newEvents { + err = t.inputer.processRoomEvent(ctx, &api.InputRoomEvent{ + Kind: api.KindNew, + Event: newEvent.Headered(roomVersion), + Origin: t.origin, + SendAsServer: api.DoNotSendToOtherServers, + }) + if err != nil { + return fmt.Errorf("t.inputer.processRoomEvent: %w", err) + } + } + return nil + } + + backwardsExtremity := newEvents[0] + newEvents = newEvents[1:] + + type respState struct { + // A snapshot is considered trustworthy if it came from our own roomserver. + // That's because the state will have been through state resolution once + // already in QueryStateAfterEvent. + trustworthy bool + *gomatrixserverlib.RespState + } + + // at this point we know we're going to have a gap: we need to work out the room state at the new backwards extremity. + // Therefore, we cannot just query /state_ids with this event to get the state before. Instead, we need to query + // the state AFTER all the prev_events for this event, then apply state resolution to that to get the state before the event. + var states []*respState + for _, prevEventID := range backwardsExtremity.PrevEventIDs() { + // Look up what the state is after the backward extremity. This will either + // come from the roomserver, if we know all the required events, or it will + // come from a remote server via /state_ids if not. + prevState, trustworthy, lerr := t.lookupStateAfterEvent(ctx, roomVersion, backwardsExtremity.RoomID(), prevEventID) + if lerr != nil { + logger.WithError(lerr).Errorf("Failed to lookup state after prev_event: %s", prevEventID) + return lerr + } + // Append the state onto the collected state. We'll run this through the + // state resolution next. + states = append(states, &respState{trustworthy, prevState}) + } + + // Now that we have collected all of the state from the prev_events, we'll + // run the state through the appropriate state resolution algorithm for the + // room if needed. This does a couple of things: + // 1. Ensures that the state is deduplicated fully for each state-key tuple + // 2. Ensures that we pick the latest events from both sets, in the case that + // one of the prev_events is quite a bit older than the others + resolvedState := &gomatrixserverlib.RespState{} + switch len(states) { + case 0: + extremityIsCreate := backwardsExtremity.Type() == gomatrixserverlib.MRoomCreate && backwardsExtremity.StateKeyEquals("") + if !extremityIsCreate { + // There are no previous states and this isn't the beginning of the + // room - this is an error condition! + logger.Errorf("Failed to lookup any state after prev_events") + return fmt.Errorf("expected %d states but got %d", len(backwardsExtremity.PrevEventIDs()), len(states)) + } + case 1: + // There's only one previous state - if it's trustworthy (came from a + // local state snapshot which will already have been through state res), + // use it as-is. There's no point in resolving it again. + if states[0].trustworthy { + resolvedState = states[0].RespState + break + } + // Otherwise, if it isn't trustworthy (came from federation), run it through + // state resolution anyway for safety, in case there are duplicates. + fallthrough + default: + respStates := make([]*gomatrixserverlib.RespState, len(states)) + for i := range states { + respStates[i] = states[i].RespState + } + // There's more than one previous state - run them all through state res + t.roomsMu.Lock(e.RoomID()) + resolvedState, err = t.resolveStatesAndCheck(ctx, roomVersion, respStates, backwardsExtremity) + t.roomsMu.Unlock(e.RoomID()) + if err != nil { + logger.WithError(err).Errorf("Failed to resolve state conflicts for event %s", backwardsExtremity.EventID()) + return err + } + } + + hadEvents := map[string]bool{} + t.hadEventsMutex.Lock() + for k, v := range t.hadEvents { + hadEvents[k] = v + } + t.hadEventsMutex.Unlock() + + // Send outliers first so we can send the new backwards extremity without causing errors + outliers, err := resolvedState.Events() + if err != nil { + return err + } + var outlierRoomEvents []api.InputRoomEvent + for _, outlier := range outliers { + if hadEvents[outlier.EventID()] { + continue + } + outlierRoomEvents = append(outlierRoomEvents, api.InputRoomEvent{ + Kind: api.KindOutlier, + Event: outlier.Headered(roomVersion), + Origin: t.origin, + }) + } + // TODO: we could do this concurrently? + for _, ire := range outlierRoomEvents { + if err = t.inputer.processRoomEvent(ctx, &ire); err != nil { + return fmt.Errorf("t.inputer.processRoomEvent[outlier]: %w", err) + } + } + + // Now send the backward extremity into the roomserver with the + // newly resolved state. This marks the "oldest" point in the backfill and + // sets the baseline state for any new events after this. + stateIDs := make([]string, 0, len(resolvedState.StateEvents)) + for _, event := range resolvedState.StateEvents { + stateIDs = append(stateIDs, event.EventID()) + } + + err = t.inputer.processRoomEvent(ctx, &api.InputRoomEvent{ + Kind: api.KindOld, + Event: backwardsExtremity.Headered(roomVersion), + Origin: t.origin, + HasState: true, + StateEventIDs: stateIDs, + SendAsServer: api.DoNotSendToOtherServers, + }) + if err != nil { + return fmt.Errorf("t.inputer.processRoomEvent: %w", err) + } + + // Then send all of the newer backfilled events, of which will all be newer + // than the backward extremity, into the roomserver without state. This way + // they will automatically fast-forward based on the room state at the + // extremity in the last step. + for _, newEvent := range newEvents { + err = t.inputer.processRoomEvent(ctx, &api.InputRoomEvent{ + Kind: api.KindOld, + Event: newEvent.Headered(roomVersion), + Origin: t.origin, + SendAsServer: api.DoNotSendToOtherServers, + }) + if err != nil { + return fmt.Errorf("t.inputer.processRoomEvent: %w", err) + } + } + + return nil +} + +// lookupStateAfterEvent returns the room state after `eventID`, which is the state before eventID with the state of `eventID` (if it's a state event) +// added into the mix. +func (t *missingStateReq) lookupStateAfterEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, roomID, eventID string) (*gomatrixserverlib.RespState, bool, error) { + // try doing all this locally before we resort to querying federation + respState := t.lookupStateAfterEventLocally(ctx, roomID, eventID) + if respState != nil { + return respState, true, nil + } + + respState, err := t.lookupStateBeforeEvent(ctx, roomVersion, roomID, eventID) + if err != nil { + return nil, false, fmt.Errorf("t.lookupStateBeforeEvent: %w", err) + } + + // fetch the event we're missing and add it to the pile + h, err := t.lookupEvent(ctx, roomVersion, roomID, eventID, false) + switch err.(type) { + case verifySigError: + return respState, false, nil + case nil: + // do nothing + default: + return nil, false, fmt.Errorf("t.lookupEvent: %w", err) + } + h = t.cacheAndReturn(h) + if h.StateKey() != nil { + addedToState := false + for i := range respState.StateEvents { + se := respState.StateEvents[i] + if se.Type() == h.Type() && se.StateKeyEquals(*h.StateKey()) { + respState.StateEvents[i] = h.Unwrap() + addedToState = true + break + } + } + if !addedToState { + respState.StateEvents = append(respState.StateEvents, h.Unwrap()) + } + } + + return respState, false, nil +} + +func (t *missingStateReq) cacheAndReturn(ev *gomatrixserverlib.HeaderedEvent) *gomatrixserverlib.HeaderedEvent { + t.haveEventsMutex.Lock() + defer t.haveEventsMutex.Unlock() + if cached, exists := t.haveEvents[ev.EventID()]; exists { + return cached + } + t.haveEvents[ev.EventID()] = ev + return ev +} + +func (t *missingStateReq) lookupStateAfterEventLocally(ctx context.Context, roomID, eventID string) *gomatrixserverlib.RespState { + var res api.QueryStateAfterEventsResponse + err := t.queryer.QueryStateAfterEvents(ctx, &api.QueryStateAfterEventsRequest{ + RoomID: roomID, + PrevEventIDs: []string{eventID}, + }, &res) + if err != nil || !res.PrevEventsExist { + util.GetLogger(ctx).WithField("room_id", roomID).WithError(err).Warnf("failed to query state after %s locally, prev exists=%v", eventID, res.PrevEventsExist) + return nil + } + stateEvents := make([]*gomatrixserverlib.HeaderedEvent, len(res.StateEvents)) + for i, ev := range res.StateEvents { + // set the event from the haveEvents cache - this means we will share pointers with other prev_event branches for this + // processEvent request, which is better for memory. + stateEvents[i] = t.cacheAndReturn(ev) + t.hadEvent(ev.EventID()) + } + // we should never access res.StateEvents again so we delete it here to make GC faster + res.StateEvents = nil + + var authEvents []*gomatrixserverlib.Event + missingAuthEvents := map[string]bool{} + for _, ev := range stateEvents { + t.haveEventsMutex.Lock() + for _, ae := range ev.AuthEventIDs() { + if aev, ok := t.haveEvents[ae]; ok { + authEvents = append(authEvents, aev.Unwrap()) + } else { + missingAuthEvents[ae] = true + } + } + t.haveEventsMutex.Unlock() + } + // QueryStateAfterEvents does not return the auth events, so fetch them now. We know the roomserver has them else it wouldn't + // have stored the event. + if len(missingAuthEvents) > 0 { + var missingEventList []string + for evID := range missingAuthEvents { + missingEventList = append(missingEventList, evID) + } + queryReq := api.QueryEventsByIDRequest{ + EventIDs: missingEventList, + } + util.GetLogger(ctx).WithField("count", len(missingEventList)).Infof("Fetching missing auth events") + var queryRes api.QueryEventsByIDResponse + if err = t.queryer.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil { + return nil + } + for i, ev := range queryRes.Events { + authEvents = append(authEvents, t.cacheAndReturn(queryRes.Events[i]).Unwrap()) + t.hadEvent(ev.EventID()) + } + queryRes.Events = nil + } + + return &gomatrixserverlib.RespState{ + StateEvents: gomatrixserverlib.UnwrapEventHeaders(stateEvents), + AuthEvents: authEvents, + } +} + +// lookuptStateBeforeEvent returns the room state before the event e, which is just /state_ids and/or /state depending on what +// the server supports. +func (t *missingStateReq) lookupStateBeforeEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, roomID, eventID string) ( + *gomatrixserverlib.RespState, error) { + + // Attempt to fetch the missing state using /state_ids and /events + return t.lookupMissingStateViaStateIDs(ctx, roomID, eventID, roomVersion) +} + +func (t *missingStateReq) resolveStatesAndCheck(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, states []*gomatrixserverlib.RespState, backwardsExtremity *gomatrixserverlib.Event) (*gomatrixserverlib.RespState, error) { + var authEventList []*gomatrixserverlib.Event + var stateEventList []*gomatrixserverlib.Event + for _, state := range states { + authEventList = append(authEventList, state.AuthEvents...) + stateEventList = append(stateEventList, state.StateEvents...) + } + resolvedStateEvents, err := gomatrixserverlib.ResolveConflicts(roomVersion, stateEventList, authEventList) + if err != nil { + return nil, err + } + // apply the current event +retryAllowedState: + if err = checkAllowedByState(backwardsExtremity, resolvedStateEvents); err != nil { + switch missing := err.(type) { + case gomatrixserverlib.MissingAuthEventError: + h, err2 := t.lookupEvent(ctx, roomVersion, backwardsExtremity.RoomID(), missing.AuthEventID, true) + switch err2.(type) { + case verifySigError: + return &gomatrixserverlib.RespState{ + AuthEvents: authEventList, + StateEvents: resolvedStateEvents, + }, nil + case nil: + // do nothing + default: + return nil, fmt.Errorf("missing auth event %s and failed to look it up: %w", missing.AuthEventID, err2) + } + util.GetLogger(ctx).Infof("fetched event %s", missing.AuthEventID) + resolvedStateEvents = append(resolvedStateEvents, h.Unwrap()) + goto retryAllowedState + default: + } + return nil, err + } + return &gomatrixserverlib.RespState{ + AuthEvents: authEventList, + StateEvents: resolvedStateEvents, + }, nil +} + +// get missing events for `e`. If `isGapFilled`=true then `newEvents` contains all the events to inject, +// without `e`. If `isGapFilled=false` then `newEvents` contains the response to /get_missing_events +func (t *missingStateReq) getMissingEvents(ctx context.Context, e *gomatrixserverlib.Event, roomVersion gomatrixserverlib.RoomVersion) (newEvents []*gomatrixserverlib.Event, isGapFilled bool, err error) { + logger := util.GetLogger(ctx).WithField("event_id", e.EventID()).WithField("room_id", e.RoomID()) + needed := gomatrixserverlib.StateNeededForAuth([]*gomatrixserverlib.Event{e}) + // query latest events (our trusted forward extremities) + req := api.QueryLatestEventsAndStateRequest{ + RoomID: e.RoomID(), + StateToFetch: needed.Tuples(), + } + var res api.QueryLatestEventsAndStateResponse + if err = t.queryer.QueryLatestEventsAndState(ctx, &req, &res); err != nil { + logger.WithError(err).Warn("Failed to query latest events") + return nil, false, err + } + latestEvents := make([]string, len(res.LatestEvents)) + for i, ev := range res.LatestEvents { + latestEvents[i] = res.LatestEvents[i].EventID + t.hadEvent(ev.EventID) + } + + var missingResp *gomatrixserverlib.RespMissingEvents + for server := range t.servers { + var m gomatrixserverlib.RespMissingEvents + if m, err = t.federation.LookupMissingEvents(ctx, server, e.RoomID(), gomatrixserverlib.MissingEvents{ + Limit: 20, + // The latest event IDs that the sender already has. These are skipped when retrieving the previous events of latest_events. + EarliestEvents: latestEvents, + // The event IDs to retrieve the previous events for. + LatestEvents: []string{e.EventID()}, + }, roomVersion); err == nil { + missingResp = &m + break + } else { + logger.WithError(err).Errorf("%s pushed us an event but %q did not respond to /get_missing_events", t.origin, server) + if errors.Is(err, context.DeadlineExceeded) { + select { + case <-ctx.Done(): // the parent request context timed out + return nil, false, context.DeadlineExceeded + default: // this request exceed its own timeout + continue + } + } + } + } + + if missingResp == nil { + logger.WithError(err).Errorf( + "%s pushed us an event but %d server(s) couldn't give us details about prev_events via /get_missing_events - dropping this event until it can", + t.origin, len(t.servers), + ) + return nil, false, missingPrevEventsError{ + eventID: e.EventID(), + err: err, + } + } + + // Make sure events from the missingResp are using the cache - missing events + // will be added and duplicates will be removed. + logger.Infof("get_missing_events returned %d events", len(missingResp.Events)) + for i, ev := range missingResp.Events { + missingResp.Events[i] = t.cacheAndReturn(ev.Headered(roomVersion)).Unwrap() + } + + // topologically sort and sanity check that we are making forward progress + newEvents = gomatrixserverlib.ReverseTopologicalOrdering(missingResp.Events, gomatrixserverlib.TopologicalOrderByPrevEvents) + shouldHaveSomeEventIDs := e.PrevEventIDs() + hasPrevEvent := false +Event: + for _, pe := range shouldHaveSomeEventIDs { + for _, ev := range newEvents { + if ev.EventID() == pe { + hasPrevEvent = true + break Event + } + } + } + if !hasPrevEvent { + err = fmt.Errorf("called /get_missing_events but server %s didn't return any prev_events with IDs %v", t.origin, shouldHaveSomeEventIDs) + logger.WithError(err).Errorf( + "%s pushed us an event but couldn't give us details about prev_events via /get_missing_events - dropping this event until it can", + t.origin, + ) + return nil, false, missingPrevEventsError{ + eventID: e.EventID(), + err: err, + } + } + if len(newEvents) == 0 { + return nil, false, nil // TODO: error instead? + } + + // now check if we can fill the gap. Look to see if we have state snapshot IDs for the earliest event + earliestNewEvent := newEvents[0] + if state, err := t.db.StateAtEventIDs(ctx, []string{earliestNewEvent.EventID()}); err != nil || len(state) == 0 { + if earliestNewEvent.Type() == gomatrixserverlib.MRoomCreate && earliestNewEvent.StateKeyEquals("") { + // we got to the beginning of the room so there will be no state! It's all good we can process this + return newEvents, true, nil + } + // we don't have the state at this earliest event from /g_m_e so we won't have state for later events either + return newEvents, false, nil + } + // StateAtEventIDs returned some kind of state for the earliest event so we can fill in the gap! + return newEvents, true, nil +} + +func (t *missingStateReq) lookupMissingStateViaState(ctx context.Context, roomID, eventID string, roomVersion gomatrixserverlib.RoomVersion) ( + respState *gomatrixserverlib.RespState, err error) { + state, err := t.federation.LookupState(ctx, t.origin, roomID, eventID, roomVersion) + if err != nil { + return nil, err + } + // Check that the returned state is valid. + if err := state.Check(ctx, t.keys, nil); err != nil { + return nil, err + } + // Cache the results of this state lookup and deduplicate anything we already + // have in the cache, freeing up memory. + for i, ev := range state.AuthEvents { + state.AuthEvents[i] = t.cacheAndReturn(ev.Headered(roomVersion)).Unwrap() + } + for i, ev := range state.StateEvents { + state.StateEvents[i] = t.cacheAndReturn(ev.Headered(roomVersion)).Unwrap() + } + return &state, nil +} + +func (t *missingStateReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, eventID string, roomVersion gomatrixserverlib.RoomVersion) ( + *gomatrixserverlib.RespState, error) { + util.GetLogger(ctx).WithField("room_id", roomID).Infof("lookupMissingStateViaStateIDs %s", eventID) + // fetch the state event IDs at the time of the event + stateIDs, err := t.federation.LookupStateIDs(ctx, t.origin, roomID, eventID) + if err != nil { + return nil, err + } + // work out which auth/state IDs are missing + wantIDs := append(stateIDs.StateEventIDs, stateIDs.AuthEventIDs...) + missing := make(map[string]bool) + var missingEventList []string + t.haveEventsMutex.Lock() + for _, sid := range wantIDs { + if _, ok := t.haveEvents[sid]; !ok { + if !missing[sid] { + missing[sid] = true + missingEventList = append(missingEventList, sid) + } + } + } + t.haveEventsMutex.Unlock() + + // fetch as many as we can from the roomserver + queryReq := api.QueryEventsByIDRequest{ + EventIDs: missingEventList, + } + var queryRes api.QueryEventsByIDResponse + if err = t.queryer.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil { + return nil, err + } + for i, ev := range queryRes.Events { + queryRes.Events[i] = t.cacheAndReturn(queryRes.Events[i]) + t.hadEvent(ev.EventID()) + evID := queryRes.Events[i].EventID() + if missing[evID] { + delete(missing, evID) + } + } + queryRes.Events = nil // allow it to be GCed + + concurrentRequests := 8 + missingCount := len(missing) + util.GetLogger(ctx).WithField("room_id", roomID).WithField("event_id", eventID).Infof("lookupMissingStateViaStateIDs missing %d/%d events", missingCount, len(wantIDs)) + + // If over 50% of the auth/state events from /state_ids are missing + // then we'll just call /state instead, otherwise we'll just end up + // hammering the remote side with /event requests unnecessarily. + if missingCount > concurrentRequests && missingCount > len(wantIDs)/2 { + util.GetLogger(ctx).WithFields(logrus.Fields{ + "missing": missingCount, + "event_id": eventID, + "room_id": roomID, + "total_state": len(stateIDs.StateEventIDs), + "total_auth_events": len(stateIDs.AuthEventIDs), + }).Info("Fetching all state at event") + return t.lookupMissingStateViaState(ctx, roomID, eventID, roomVersion) + } + + if missingCount > 0 { + util.GetLogger(ctx).WithFields(logrus.Fields{ + "missing": missingCount, + "event_id": eventID, + "room_id": roomID, + "total_state": len(stateIDs.StateEventIDs), + "total_auth_events": len(stateIDs.AuthEventIDs), + "concurrent_requests": concurrentRequests, + }).Info("Fetching missing state at event") + + // Create a queue containing all of the missing event IDs that we want + // to retrieve. + pending := make(chan string, missingCount) + for missingEventID := range missing { + pending <- missingEventID + } + close(pending) + + // Define how many workers we should start to do this. + if missingCount < concurrentRequests { + concurrentRequests = missingCount + } + + // Create the wait group. + var fetchgroup sync.WaitGroup + fetchgroup.Add(concurrentRequests) + + // This is the only place where we'll write to t.haveEvents from + // multiple goroutines, and everywhere else is blocked on this + // synchronous function anyway. + var haveEventsMutex sync.Mutex + + // Define what we'll do in order to fetch the missing event ID. + fetch := func(missingEventID string) { + var h *gomatrixserverlib.HeaderedEvent + h, err = t.lookupEvent(ctx, roomVersion, roomID, missingEventID, false) + switch err.(type) { + case verifySigError: + return + case nil: + break + default: + util.GetLogger(ctx).WithFields(logrus.Fields{ + "event_id": missingEventID, + "room_id": roomID, + }).Info("Failed to fetch missing event") + return + } + haveEventsMutex.Lock() + t.cacheAndReturn(h) + haveEventsMutex.Unlock() + } + + // Create the worker. + worker := func(ch <-chan string) { + defer fetchgroup.Done() + for missingEventID := range ch { + fetch(missingEventID) + } + } + + // Start the workers. + for i := 0; i < concurrentRequests; i++ { + go worker(pending) + } + + // Wait for the workers to finish. + fetchgroup.Wait() + } + + resp, err := t.createRespStateFromStateIDs(stateIDs) + return resp, err +} + +func (t *missingStateReq) createRespStateFromStateIDs(stateIDs gomatrixserverlib.RespStateIDs) ( + *gomatrixserverlib.RespState, error) { // nolint:unparam + t.haveEventsMutex.Lock() + defer t.haveEventsMutex.Unlock() + + // create a RespState response using the response to /state_ids as a guide + respState := gomatrixserverlib.RespState{} + + for i := range stateIDs.StateEventIDs { + ev, ok := t.haveEvents[stateIDs.StateEventIDs[i]] + if !ok { + logrus.Warnf("Missing state event in createRespStateFromStateIDs: %s", stateIDs.StateEventIDs[i]) + continue + } + respState.StateEvents = append(respState.StateEvents, ev.Unwrap()) + } + for i := range stateIDs.AuthEventIDs { + ev, ok := t.haveEvents[stateIDs.AuthEventIDs[i]] + if !ok { + logrus.Warnf("Missing auth event in createRespStateFromStateIDs: %s", stateIDs.AuthEventIDs[i]) + continue + } + respState.AuthEvents = append(respState.AuthEvents, ev.Unwrap()) + } + // We purposefully do not do auth checks on the returned events, as they will still + // be processed in the exact same way, just as a 'rejected' event + // TODO: Add a field to HeaderedEvent to indicate if the event is rejected. + return &respState, nil +} + +func (t *missingStateReq) lookupEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, _, missingEventID string, localFirst bool) (*gomatrixserverlib.HeaderedEvent, error) { + if localFirst { + // fetch from the roomserver + queryReq := api.QueryEventsByIDRequest{ + EventIDs: []string{missingEventID}, + } + var queryRes api.QueryEventsByIDResponse + if err := t.queryer.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil { + util.GetLogger(ctx).Warnf("Failed to query roomserver for missing event %s: %s - falling back to remote", missingEventID, err) + } else if len(queryRes.Events) == 1 { + return queryRes.Events[0], nil + } + } + var event *gomatrixserverlib.Event + found := false + for serverName := range t.servers { + reqctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + txn, err := t.federation.GetEvent(reqctx, serverName, missingEventID) + if err != nil || len(txn.PDUs) == 0 { + util.GetLogger(ctx).WithError(err).WithField("event_id", missingEventID).Warn("Failed to get missing /event for event ID") + if errors.Is(err, context.DeadlineExceeded) { + select { + case <-reqctx.Done(): // this server took too long + continue + case <-ctx.Done(): // the input request timed out + return nil, context.DeadlineExceeded + } + } + continue + } + event, err = gomatrixserverlib.NewEventFromUntrustedJSON(txn.PDUs[0], roomVersion) + if err != nil { + util.GetLogger(ctx).WithError(err).WithField("event_id", missingEventID).Warnf("Transaction: Failed to parse event JSON of event") + continue + } + found = true + break + } + if !found { + util.GetLogger(ctx).WithField("event_id", missingEventID).Warnf("Failed to get missing /event for event ID from %d server(s)", len(t.servers)) + return nil, fmt.Errorf("wasn't able to find event via %d server(s)", len(t.servers)) + } + if err := event.VerifyEventSignatures(ctx, t.keys); err != nil { + util.GetLogger(ctx).WithError(err).Warnf("Transaction: Couldn't validate signature of event %q", event.EventID()) + return nil, verifySigError{event.EventID(), err} + } + return t.cacheAndReturn(event.Headered(roomVersion)), nil +} + +func checkAllowedByState(e *gomatrixserverlib.Event, stateEvents []*gomatrixserverlib.Event) error { + authUsingState := gomatrixserverlib.NewAuthEvents(nil) + for i := range stateEvents { + err := authUsingState.AddEvent(stateEvents[i]) + if err != nil { + return err + } + } + return gomatrixserverlib.Allowed(e, &authUsingState) +} + +func (t *missingStateReq) hadEvent(eventID string) { + t.hadEventsMutex.Lock() + defer t.hadEventsMutex.Unlock() + t.hadEvents[eventID] = true +} + +type verifySigError struct { + eventID string + err error +} +type missingPrevEventsError struct { + eventID string + err error +} + +func (e verifySigError) Error() string { + return fmt.Sprintf("unable to verify signature of event %q: %s", e.eventID, e.err) +} +func (e missingPrevEventsError) Error() string { + return fmt.Sprintf("unable to get prev_events for event %q: %s", e.eventID, e.err) +} diff --git a/roomserver/internal/perform/perform_backfill.go b/roomserver/internal/perform/perform_backfill.go index e198f67d8..081f694a1 100644 --- a/roomserver/internal/perform/perform_backfill.go +++ b/roomserver/internal/perform/perform_backfill.go @@ -77,15 +77,19 @@ func (r *Backfiller) PerformBackfill( } // Scan the event tree for events to send back. - resultNIDs, err := helpers.ScanEventTree(ctx, r.DB, *info, front, visited, request.Limit, request.ServerName) + resultNIDs, err := helpers.ScanEventTree(ctx, r.DB, info, front, visited, request.Limit, request.ServerName) if err != nil { return err } - // Retrieve events from the list that was filled previously. + // Retrieve events from the list that was filled previously. If we fail to get + // events from the database then attempt once to get them from federation instead. var loadedEvents []*gomatrixserverlib.Event loadedEvents, err = helpers.LoadEvents(ctx, r.DB, resultNIDs) if err != nil { + if _, ok := err.(types.MissingEventError); ok { + return r.backfillViaFederation(ctx, request, response) + } return err } @@ -418,7 +422,7 @@ FindSuccessor: return nil } - stateEntries, err := helpers.StateBeforeEvent(ctx, b.db, *info, NIDs[eventID]) + stateEntries, err := helpers.StateBeforeEvent(ctx, b.db, info, NIDs[eventID]) if err != nil { logrus.WithField("event_id", eventID).WithError(err).Error("ServersAtEvent: failed to load state before event") return nil diff --git a/roomserver/internal/perform/perform_inbound_peek.go b/roomserver/internal/perform/perform_inbound_peek.go index 98f5f6f96..d19fc8386 100644 --- a/roomserver/internal/perform/perform_inbound_peek.go +++ b/roomserver/internal/perform/perform_inbound_peek.go @@ -79,7 +79,7 @@ func (r *InboundPeeker) PerformInboundPeek( response.LatestEvent = sortedLatestEvents[0].Headered(info.RoomVersion) // XXX: do we actually need to do a state resolution here? - roomState := state.NewStateResolution(r.DB, *info) + roomState := state.NewStateResolution(r.DB, info) var stateEntries []types.StateEntry stateEntries, err = roomState.LoadStateAtSnapshot( diff --git a/roomserver/internal/perform/perform_invite.go b/roomserver/internal/perform/perform_invite.go index ca0654685..85b2322fe 100644 --- a/roomserver/internal/perform/perform_invite.go +++ b/roomserver/internal/perform/perform_invite.go @@ -172,7 +172,7 @@ func (r *Inviter) PerformInvite( { Kind: api.KindNew, Event: event, - AuthEventIDs: event.AuthEventIDs(), + Origin: event.Origin(), SendAsServer: req.SendAsServer, }, }, @@ -231,7 +231,7 @@ func buildInviteStrippedState( StateKey: "", }) } - roomState := state.NewStateResolution(db, *info) + roomState := state.NewStateResolution(db, info) stateEntries, err := roomState.LoadStateAtSnapshotForStringTuples( ctx, info.StateSnapshotNID, stateWanted, ) diff --git a/roomserver/internal/perform/perform_join.go b/roomserver/internal/perform/perform_join.go index 75397eb60..a1ffab5dd 100644 --- a/roomserver/internal/perform/perform_join.go +++ b/roomserver/internal/perform/perform_join.go @@ -271,7 +271,6 @@ func (r *Joiner) performJoinRoomByID( { Kind: rsAPI.KindNew, Event: event.Headered(buildRes.RoomVersion), - AuthEventIDs: event.AuthEventIDs(), SendAsServer: string(r.Cfg.Matrix.ServerName), }, }, diff --git a/roomserver/internal/perform/perform_leave.go b/roomserver/internal/perform/perform_leave.go index 4daeb10af..eac528eaf 100644 --- a/roomserver/internal/perform/perform_leave.go +++ b/roomserver/internal/perform/perform_leave.go @@ -139,7 +139,7 @@ func (r *Leaver) performLeaveRoomByID( { Kind: api.KindNew, Event: event.Headered(buildRes.RoomVersion), - AuthEventIDs: event.AuthEventIDs(), + Origin: event.Origin(), SendAsServer: string(r.Cfg.Matrix.ServerName), }, }, diff --git a/roomserver/internal/query/query.go b/roomserver/internal/query/query.go index b80f08ab6..6b4cb5816 100644 --- a/roomserver/internal/query/query.go +++ b/roomserver/internal/query/query.go @@ -63,7 +63,7 @@ func (r *Queryer) QueryStateAfterEvents( return nil } - roomState := state.NewStateResolution(r.DB, *info) + roomState := state.NewStateResolution(r.DB, info) response.RoomExists = true response.RoomVersion = info.RoomVersion @@ -294,7 +294,7 @@ func (r *Queryer) QueryMembershipsForRoom( events, err = r.DB.Events(ctx, eventNIDs) } else { - stateEntries, err = helpers.StateBeforeEvent(ctx, r.DB, *info, membershipEventNID) + stateEntries, err = helpers.StateBeforeEvent(ctx, r.DB, info, membershipEventNID) if err != nil { logrus.WithField("membership_event_nid", membershipEventNID).WithError(err).Error("failed to load state before event") return err @@ -377,7 +377,7 @@ func (r *Queryer) QueryServerAllowedToSeeEvent( return fmt.Errorf("QueryServerAllowedToSeeEvent: no room info for room %s", roomID) } response.AllowedToSeeEvent, err = helpers.CheckServerAllowedToSeeEvent( - ctx, r.DB, *info, request.EventID, request.ServerName, inRoomRes.IsInRoom, + ctx, r.DB, info, request.EventID, request.ServerName, inRoomRes.IsInRoom, ) return } @@ -416,7 +416,7 @@ func (r *Queryer) QueryMissingEvents( return fmt.Errorf("missing RoomInfo for room %s", events[0].RoomID()) } - resultNIDs, err := helpers.ScanEventTree(ctx, r.DB, *info, front, visited, request.Limit, request.ServerName) + resultNIDs, err := helpers.ScanEventTree(ctx, r.DB, info, front, visited, request.Limit, request.ServerName) if err != nil { return err } @@ -457,8 +457,23 @@ func (r *Queryer) QueryStateAndAuthChain( response.RoomExists = true response.RoomVersion = info.RoomVersion + // handle this entirely separately to the other case so we don't have to pull out + // the entire current state of the room + // TODO: this probably means it should be a different query operation... + if request.OnlyFetchAuthChain { + var authEvents []*gomatrixserverlib.Event + authEvents, err = GetAuthChain(ctx, r.DB.EventsFromIDs, request.AuthEventIDs) + if err != nil { + return err + } + for _, event := range authEvents { + response.AuthChainEvents = append(response.AuthChainEvents, event.Headered(info.RoomVersion)) + } + return nil + } + var stateEvents []*gomatrixserverlib.Event - stateEvents, err = r.loadStateAtEventIDs(ctx, *info, request.PrevEventIDs) + stateEvents, err = r.loadStateAtEventIDs(ctx, info, request.PrevEventIDs) if err != nil { return err } @@ -497,7 +512,7 @@ func (r *Queryer) QueryStateAndAuthChain( return err } -func (r *Queryer) loadStateAtEventIDs(ctx context.Context, roomInfo types.RoomInfo, eventIDs []string) ([]*gomatrixserverlib.Event, error) { +func (r *Queryer) loadStateAtEventIDs(ctx context.Context, roomInfo *types.RoomInfo, eventIDs []string) ([]*gomatrixserverlib.Event, error) { roomState := state.NewStateResolution(r.DB, roomInfo) prevStates, err := r.DB.StateAtEventIDs(ctx, eventIDs) if err != nil { diff --git a/roomserver/roomserver.go b/roomserver/roomserver.go index e47421008..669957be1 100644 --- a/roomserver/roomserver.go +++ b/roomserver/roomserver.go @@ -23,8 +23,7 @@ import ( "github.com/matrix-org/dendrite/roomserver/internal" "github.com/matrix-org/dendrite/roomserver/storage" "github.com/matrix-org/dendrite/setup/base" - "github.com/matrix-org/dendrite/setup/config" - "github.com/matrix-org/dendrite/setup/kafka" + "github.com/matrix-org/dendrite/setup/jetstream" "github.com/sirupsen/logrus" ) @@ -41,8 +40,6 @@ func NewInternalAPI( ) api.RoomserverInternalAPI { cfg := &base.Cfg.RoomServer - _, producer := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka) - var perspectiveServerNames []gomatrixserverlib.ServerName for _, kp := range base.Cfg.FederationAPI.KeyPerspectives { perspectiveServerNames = append(perspectiveServerNames, kp.ServerName) @@ -53,8 +50,12 @@ func NewInternalAPI( logrus.WithError(err).Panicf("failed to connect to room server db") } + js, _, _ := jetstream.Prepare(&cfg.Matrix.JetStream) + return internal.NewRoomserverAPI( - cfg, roomserverDB, producer, string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)), + cfg, roomserverDB, js, + cfg.Matrix.JetStream.TopicFor(jetstream.InputRoomEvent), + cfg.Matrix.JetStream.TopicFor(jetstream.OutputRoomEvent), base.Caches, perspectiveServerNames, ) } diff --git a/roomserver/roomserver_test.go b/roomserver/roomserver_test.go deleted file mode 100644 index 40e8e92d1..000000000 --- a/roomserver/roomserver_test.go +++ /dev/null @@ -1,407 +0,0 @@ -package roomserver - -import ( - "bytes" - "context" - "crypto/ed25519" - "encoding/json" - "fmt" - "os" - "reflect" - "testing" - "time" - - "github.com/Shopify/sarama" - "github.com/matrix-org/dendrite/internal/caching" - "github.com/matrix-org/dendrite/roomserver/api" - "github.com/matrix-org/dendrite/roomserver/internal" - "github.com/matrix-org/dendrite/roomserver/storage" - "github.com/matrix-org/dendrite/setup/base" - "github.com/matrix-org/dendrite/setup/config" - "github.com/matrix-org/gomatrixserverlib" - "github.com/sirupsen/logrus" -) - -const ( - testOrigin = gomatrixserverlib.ServerName("kaer.morhen") - // we have to use an on-disk DB because we open multiple connections due to the *Updater structs. - // Using :memory: results in a brand new DB for each open connection, and sharing memory via - // ?cache=shared just allows read-only sharing, so writes to the database on other connections are lost. - roomserverDBFileURI = "file:roomserver_test.db" - roomserverDBFilePath = "./roomserver_test.db" -) - -var ( - ctx = context.Background() -) - -type dummyProducer struct { - topic string - producedMessages []*api.OutputEvent -} - -// SendMessage produces a given message, and returns only when it either has -// succeeded or failed to produce. It will return the partition and the offset -// of the produced message, or an error if the message failed to produce. -func (p *dummyProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) { - if msg.Topic != p.topic { - return 0, 0, nil - } - be := msg.Value.(sarama.ByteEncoder) - b := json.RawMessage(be) - fmt.Println("SENDING >>>>>>>> ", string(b)) - var out api.OutputEvent - err = json.Unmarshal(b, &out) - if err != nil { - return 0, 0, err - } - p.producedMessages = append(p.producedMessages, &out) - return 0, 0, nil -} - -// SendMessages produces a given set of messages, and returns only when all -// messages in the set have either succeeded or failed. Note that messages -// can succeed and fail individually; if some succeed and some fail, -// SendMessages will return an error. -func (p *dummyProducer) SendMessages(msgs []*sarama.ProducerMessage) error { - for _, m := range msgs { - p.SendMessage(m) - } - return nil -} - -// Close shuts down the producer and waits for any buffered messages to be -// flushed. You must call this function before a producer object passes out of -// scope, as it may otherwise leak memory. You must call this before calling -// Close on the underlying client. -func (p *dummyProducer) Close() error { - return nil -} - -func deleteDatabase() { - err := os.Remove(roomserverDBFilePath) - if err != nil { - fmt.Printf("failed to delete database %s: %s\n", roomserverDBFilePath, err) - } -} - -type fledglingEvent struct { - Type string - StateKey *string - Content interface{} - Sender string - RoomID string -} - -func mustCreateEvents(t *testing.T, roomVer gomatrixserverlib.RoomVersion, events []fledglingEvent) (result []*gomatrixserverlib.HeaderedEvent) { - t.Helper() - depth := int64(1) - seed := make([]byte, ed25519.SeedSize) // zero seed - key := ed25519.NewKeyFromSeed(seed) - var prevs []string - roomState := make(map[gomatrixserverlib.StateKeyTuple]string) // state -> event ID - for _, ev := range events { - eb := gomatrixserverlib.EventBuilder{ - Sender: ev.Sender, - Depth: depth, - Type: ev.Type, - StateKey: ev.StateKey, - RoomID: ev.RoomID, - PrevEvents: prevs, - } - err := eb.SetContent(ev.Content) - if err != nil { - t.Fatalf("mustCreateEvent: failed to marshal event content %+v", ev.Content) - } - stateNeeded, err := gomatrixserverlib.StateNeededForEventBuilder(&eb) - if err != nil { - t.Fatalf("mustCreateEvent: failed to work out auth_events : %s", err) - } - var authEvents []string - for _, tuple := range stateNeeded.Tuples() { - eventID := roomState[tuple] - if eventID != "" { - authEvents = append(authEvents, eventID) - } - } - eb.AuthEvents = authEvents - signedEvent, err := eb.Build(time.Now(), testOrigin, "ed25519:test", key, roomVer) - if err != nil { - t.Fatalf("mustCreateEvent: failed to sign event: %s", err) - } - depth++ - prevs = []string{signedEvent.EventID()} - if ev.StateKey != nil { - roomState[gomatrixserverlib.StateKeyTuple{ - EventType: ev.Type, - StateKey: *ev.StateKey, - }] = signedEvent.EventID() - } - result = append(result, signedEvent.Headered(roomVer)) - } - return -} - -func mustLoadRawEvents(t *testing.T, ver gomatrixserverlib.RoomVersion, events []json.RawMessage) []*gomatrixserverlib.HeaderedEvent { - t.Helper() - hs := make([]*gomatrixserverlib.HeaderedEvent, len(events)) - for i := range events { - e, err := gomatrixserverlib.NewEventFromTrustedJSON(events[i], false, ver) - if err != nil { - t.Fatalf("cannot load test data: " + err.Error()) - } - hs[i] = e.Headered(ver) - } - return hs -} - -func mustCreateRoomserverAPI(t *testing.T) (api.RoomserverInternalAPI, *dummyProducer) { - t.Helper() - cfg := &config.Dendrite{} - cfg.Defaults(true) - cfg.Global.ServerName = testOrigin - cfg.Global.Kafka.UseNaffka = true - cfg.RoomServer.Database = config.DatabaseOptions{ - ConnectionString: roomserverDBFileURI, - } - dp := &dummyProducer{ - topic: cfg.Global.Kafka.TopicFor(config.TopicOutputRoomEvent), - } - cache, err := caching.NewInMemoryLRUCache(false) - if err != nil { - t.Fatalf("failed to make caches: %s", err) - } - base := &base.BaseDendrite{ - Caches: cache, - Cfg: cfg, - } - roomserverDB, err := storage.Open(&cfg.RoomServer.Database, base.Caches) - if err != nil { - logrus.WithError(err).Panicf("failed to connect to room server db") - } - return internal.NewRoomserverAPI( - &cfg.RoomServer, roomserverDB, dp, string(cfg.Global.Kafka.TopicFor(config.TopicOutputRoomEvent)), - base.Caches, nil, - ), dp -} - -func mustSendEvents(t *testing.T, ver gomatrixserverlib.RoomVersion, events []json.RawMessage) (api.RoomserverInternalAPI, *dummyProducer, []*gomatrixserverlib.HeaderedEvent) { - t.Helper() - rsAPI, dp := mustCreateRoomserverAPI(t) - hevents := mustLoadRawEvents(t, ver, events) - if err := api.SendEvents(ctx, rsAPI, api.KindNew, hevents, testOrigin, nil); err != nil { - t.Errorf("failed to SendEvents: %s", err) - } - return rsAPI, dp, hevents -} - -func TestOutputRedactedEvent(t *testing.T) { - redactionEvents := []json.RawMessage{ - // create event - []byte(`{"auth_events":[],"content":{"creator":"@userid:kaer.morhen"},"depth":0,"event_id":"$N4us6vqqq3RjvpKd:kaer.morhen","hashes":{"sha256":"WTdrCn/YsiounXcJPsLP8xT0ZjHiO5Ov0NvXYmK2onE"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[],"prev_state":[],"room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"9+5JcpaN5b5KlHYHGp6r+GoNDH98lbfzGYwjfxensa5C5D/bDACaYnMDLnhwsHOE5nxgI+jT/GV271pz6PMSBQ"}},"state_key":"","type":"m.room.create"}`), - // join event - []byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}]],"content":{"membership":"join"},"depth":1,"event_id":"$6sUiGPQ0a3tqYGKo:kaer.morhen","hashes":{"sha256":"eYVBC7RO+FlxRyW1aXYf/ad4Dzi7T93tArdGw3r4RwQ"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}]],"prev_state":[],"room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"tiDBTPFa53YMfHiupX3vSRE/ZcCiCjmGt7gDpIpDpwZapeays5Vqqcqb7KiywrDldpTkrrdJBAw2jXcq6ZyhDw"}},"state_key":"@userid:kaer.morhen","type":"m.room.member"}`), - // room name - []byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}],["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"content":{"name":"My Room Name"},"depth":2,"event_id":"$VC1zZ9YWwuUbSNHD:kaer.morhen","hashes":{"sha256":"bpqTkfLx6KHzWz7/wwpsXnXwJWEGW14aV63ffexzDFg"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"prev_state":[],"room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"mhJZ3X4bAKrF/T0mtPf1K2Tmls0h6xGY1IPDpJ/SScQBqDlu3HQR2BPa7emqj5bViyLTWVNh+ZCpzx/6STTrAg"}},"state_key":"","type":"m.room.name"}`), - // redact room name - []byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}],["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"content":{"reason":"Spamming"},"depth":3,"event_id":"$tJI0pE3b8u9UMYpT:kaer.morhen","hashes":{"sha256":"/3TStqa5SQqYaEtl7ajEvSRvu6d12MMKfICUzrBpd2Q"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$VC1zZ9YWwuUbSNHD:kaer.morhen",{"sha256":"+l8cNa7syvm0EF7CAmQRlYknLEMjivnI4FLhB/TUBEY"}]],"redacts":"$VC1zZ9YWwuUbSNHD:kaer.morhen","room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"QBOh+amf0vTJbm6+9VwAcR9uJviBIor2KON0Y7+EyQx5YbUZEzW1HPeJxarLIHBcxMzgOVzjuM+StzjbUgDzAg"}},"type":"m.room.redaction"}`), - // message - []byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}],["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"content":{"body":"Test Message"},"depth":4,"event_id":"$o8KHsgSIYbJrddnd:kaer.morhen","hashes":{"sha256":"IE/rGVlKOpiGWeIo887g1CK1drYqcWDZhL6THZHkJ1c"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$tJI0pE3b8u9UMYpT:kaer.morhen",{"sha256":"zvmwyXuDox7jpA16JRH6Fc1zbfQht2tpkBbMTUOi3Jw"}]],"room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"/3z+pJjiJXWhwfqIEzmNksvBHCoXTktK/y0rRuWJXw6i1+ygRG/suDCKhFuuz6gPapRmEMPVILi2mJqHHXPKAg"}},"type":"m.room.message"}`), - // redact previous message - []byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}],["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"content":{"reason":"Spamming more"},"depth":5,"event_id":"$UpsE8belb2gJItJG:kaer.morhen","hashes":{"sha256":"zU8PWJOld/I7OtjdpltFSKC+DMNm2ZyEXAHcprsafD0"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$o8KHsgSIYbJrddnd:kaer.morhen",{"sha256":"UgjMuCFXH4warIjKuwlRq9zZ6dSJrZWCd+CkqtgLSHM"}]],"redacts":"$o8KHsgSIYbJrddnd:kaer.morhen","room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"zxFGr/7aGOzqOEN6zRNrBpFkkMnfGFPbCteYL33wC+PycBPIK+2WRa5qlAR2+lcLiK3HjIzwRYkKNsVFTqvRAw"}},"type":"m.room.redaction"}`), - } - var redactedOutputs []api.OutputEvent - deleteDatabase() - _, producer, hevents := mustSendEvents(t, gomatrixserverlib.RoomVersionV1, redactionEvents) - defer deleteDatabase() - for _, msg := range producer.producedMessages { - if msg.Type == api.OutputTypeRedactedEvent { - redactedOutputs = append(redactedOutputs, *msg) - } - } - wantRedactedOutputs := []api.OutputEvent{ - { - Type: api.OutputTypeRedactedEvent, - RedactedEvent: &api.OutputRedactedEvent{ - RedactedEventID: hevents[2].EventID(), - RedactedBecause: hevents[3], - }, - }, - { - Type: api.OutputTypeRedactedEvent, - RedactedEvent: &api.OutputRedactedEvent{ - RedactedEventID: hevents[4].EventID(), - RedactedBecause: hevents[5], - }, - }, - } - t.Logf("redactedOutputs: %+v", redactedOutputs) - if len(wantRedactedOutputs) != len(redactedOutputs) { - t.Fatalf("Got %d redacted events, want %d", len(redactedOutputs), len(wantRedactedOutputs)) - } - for i := 0; i < len(wantRedactedOutputs); i++ { - if !reflect.DeepEqual(*redactedOutputs[i].RedactedEvent, *wantRedactedOutputs[i].RedactedEvent) { - t.Errorf("OutputRedactionEvent %d: wrong event got:\n%+v want:\n%+v", i+1, redactedOutputs[i].RedactedEvent, wantRedactedOutputs[i].RedactedEvent) - } - } -} - -// This tests that rewriting state works correctly. -// This creates a small room with a create/join/name state, then replays it -// with a new room name. We expect the output events to contain the original events, -// followed by a single OutputNewRoomEvent with RewritesState set to true with the -// rewritten state events (with the 2nd room name). -func TestOutputRewritesState(t *testing.T) { - roomID := "!foo:" + string(testOrigin) - alice := "@alice:" + string(testOrigin) - emptyKey := "" - originalEvents := mustCreateEvents(t, gomatrixserverlib.RoomVersionV6, []fledglingEvent{ - { - RoomID: roomID, - Sender: alice, - Content: map[string]interface{}{ - "creator": alice, - "room_version": "6", - }, - StateKey: &emptyKey, - Type: gomatrixserverlib.MRoomCreate, - }, - { - RoomID: roomID, - Sender: alice, - Content: map[string]interface{}{ - "membership": "join", - }, - StateKey: &alice, - Type: gomatrixserverlib.MRoomMember, - }, - { - RoomID: roomID, - Sender: alice, - Content: map[string]interface{}{ - "body": "hello world", - }, - StateKey: nil, - Type: "m.room.message", - }, - { - RoomID: roomID, - Sender: alice, - Content: map[string]interface{}{ - "name": "Room Name", - }, - StateKey: &emptyKey, - Type: "m.room.name", - }, - }) - rewriteEvents := mustCreateEvents(t, gomatrixserverlib.RoomVersionV6, []fledglingEvent{ - { - RoomID: roomID, - Sender: alice, - Content: map[string]interface{}{ - "creator": alice, - }, - StateKey: &emptyKey, - Type: gomatrixserverlib.MRoomCreate, - }, - { - RoomID: roomID, - Sender: alice, - Content: map[string]interface{}{ - "membership": "join", - }, - StateKey: &alice, - Type: gomatrixserverlib.MRoomMember, - }, - { - RoomID: roomID, - Sender: alice, - Content: map[string]interface{}{ - "name": "Room Name 2", - }, - StateKey: &emptyKey, - Type: "m.room.name", - }, - { - RoomID: roomID, - Sender: alice, - Content: map[string]interface{}{ - "body": "hello world 2", - }, - StateKey: nil, - Type: "m.room.message", - }, - }) - deleteDatabase() - rsAPI, producer := mustCreateRoomserverAPI(t) - defer deleteDatabase() - err := api.SendEvents(context.Background(), rsAPI, api.KindNew, originalEvents, testOrigin, nil) - if err != nil { - t.Fatalf("failed to send original events: %s", err) - } - // assert we got them produced, this is just a sanity check and isn't the intention of this test - if len(producer.producedMessages) != len(originalEvents) { - t.Fatalf("SendEvents didn't result in same number of produced output events: got %d want %d", len(producer.producedMessages), len(originalEvents)) - } - producer.producedMessages = nil // we aren't actually interested in these events, just the rewrite ones - - var inputEvents []api.InputRoomEvent - // slowly build up the state IDs again, we're basically telling the roomserver what to store as a snapshot - var stateIDs []string - // skip the last event, we'll use this to tie together the rewrite as the KindNew event - for i := 0; i < len(rewriteEvents)-1; i++ { - ev := rewriteEvents[i] - inputEvents = append(inputEvents, api.InputRoomEvent{ - Kind: api.KindOutlier, - Event: ev, - AuthEventIDs: ev.AuthEventIDs(), - HasState: true, - StateEventIDs: stateIDs, - }) - if ev.StateKey() != nil { - stateIDs = append(stateIDs, ev.EventID()) - } - } - lastEv := rewriteEvents[len(rewriteEvents)-1] - inputEvents = append(inputEvents, api.InputRoomEvent{ - Kind: api.KindNew, - Event: lastEv, - AuthEventIDs: lastEv.AuthEventIDs(), - HasState: true, - StateEventIDs: stateIDs, - }) - if err := api.SendInputRoomEvents(context.Background(), rsAPI, inputEvents); err != nil { - t.Fatalf("SendInputRoomEvents returned error for rewrite events: %s", err) - } - // we should just have one output event with the entire state of the room in it - if len(producer.producedMessages) != 1 { - t.Fatalf("Rewritten events got output, want only 1 got %d", len(producer.producedMessages)) - } - outputEvent := producer.producedMessages[len(producer.producedMessages)-1] - if !outputEvent.NewRoomEvent.RewritesState { - t.Errorf("RewritesState flag not set on output event") - } - if !reflect.DeepEqual(stateIDs, outputEvent.NewRoomEvent.AddsStateEventIDs) { - t.Errorf("Output event is missing room state event IDs, got %v want %v", outputEvent.NewRoomEvent.AddsStateEventIDs, stateIDs) - } - if !bytes.Equal(outputEvent.NewRoomEvent.Event.JSON(), lastEv.JSON()) { - t.Errorf( - "Output event isn't the latest KindNew event:\ngot %s\nwant %s", - string(outputEvent.NewRoomEvent.Event.JSON()), - string(lastEv.JSON()), - ) - } - if len(outputEvent.NewRoomEvent.AddStateEvents) != len(stateIDs) { - t.Errorf("Output event is missing room state events themselves, got %d want %d", len(outputEvent.NewRoomEvent.AddStateEvents), len(stateIDs)) - } - // make sure the state got overwritten, check the room name - hasRoomName := false - for _, ev := range outputEvent.NewRoomEvent.AddStateEvents { - if ev.Type() == "m.room.name" { - hasRoomName = string(ev.Content()) == `{"name":"Room Name 2"}` - } - } - if !hasRoomName { - t.Errorf("Output event did not overwrite room state") - } -} diff --git a/roomserver/state/state.go b/roomserver/state/state.go index 78398fc7c..15d592b46 100644 --- a/roomserver/state/state.go +++ b/roomserver/state/state.go @@ -32,11 +32,11 @@ import ( type StateResolution struct { db storage.Database - roomInfo types.RoomInfo + roomInfo *types.RoomInfo events map[types.EventNID]*gomatrixserverlib.Event } -func NewStateResolution(db storage.Database, roomInfo types.RoomInfo) StateResolution { +func NewStateResolution(db storage.Database, roomInfo *types.RoomInfo) StateResolution { return StateResolution{ db: db, roomInfo: roomInfo, diff --git a/roomserver/storage/postgres/events_table.go b/roomserver/storage/postgres/events_table.go index c549fb650..778cd8d73 100644 --- a/roomserver/storage/postgres/events_table.go +++ b/roomserver/storage/postgres/events_table.go @@ -311,7 +311,9 @@ func (s *eventStatements) BulkSelectStateAtEventByID( ); err != nil { return nil, err } - if result.BeforeStateSnapshotNID == 0 { + // Genuine create events are the only case where it's OK to have no previous state. + isCreate := result.EventTypeNID == types.MRoomCreateNID && result.EventStateKeyNID == 1 + if result.BeforeStateSnapshotNID == 0 && !isCreate { return nil, types.MissingEventError( fmt.Sprintf("storage: missing state for event NID %d", result.EventNID), ) diff --git a/roomserver/storage/shared/storage.go b/roomserver/storage/shared/storage.go index f49536f4e..d4c5ebb5b 100644 --- a/roomserver/storage/shared/storage.go +++ b/roomserver/storage/shared/storage.go @@ -842,9 +842,13 @@ func (d *Database) GetStateEvent(ctx context.Context, roomID, evType, stateKey s if err != nil { return nil, err } - if roomInfo == nil || roomInfo.IsStub { + if roomInfo == nil { return nil, fmt.Errorf("room %s doesn't exist", roomID) } + // e.g invited rooms + if roomInfo.IsStub { + return nil, nil + } eventTypeNID, err := d.EventTypesTable.SelectEventTypeNID(ctx, nil, evType) if err == sql.ErrNoRows { // No rooms have an event of this type, otherwise we'd have an event type NID diff --git a/roomserver/storage/sqlite3/events_table.go b/roomserver/storage/sqlite3/events_table.go index 3127eb17d..7483e2815 100644 --- a/roomserver/storage/sqlite3/events_table.go +++ b/roomserver/storage/sqlite3/events_table.go @@ -322,7 +322,9 @@ func (s *eventStatements) BulkSelectStateAtEventByID( ); err != nil { return nil, err } - if result.BeforeStateSnapshotNID == 0 { + // Genuine create events are the only case where it's OK to have no previous state. + isCreate := result.EventTypeNID == types.MRoomCreateNID && result.EventStateKeyNID == 1 + if result.BeforeStateSnapshotNID == 0 && !isCreate { return nil, types.MissingEventError( fmt.Sprintf("storage: missing state for event NID %d", result.EventNID), ) diff --git a/setup/base/base.go b/setup/base/base.go index 06c971170..819fe1ad4 100644 --- a/setup/base/base.go +++ b/setup/base/base.go @@ -81,8 +81,6 @@ type BaseDendrite struct { Cfg *config.Dendrite Caches *caching.Caches DNSCache *gomatrixserverlib.DNSCache - // KafkaConsumer sarama.Consumer - // KafkaProducer sarama.SyncProducer } const NoListener = "" diff --git a/setup/config/config.go b/setup/config/config.go index 404b7178b..eb371a54b 100644 --- a/setup/config/config.go +++ b/setup/config/config.go @@ -40,7 +40,7 @@ var keyIDRegexp = regexp.MustCompile("^ed25519:[a-zA-Z0-9_]+$") // Version is the current version of the config format. // This will change whenever we make breaking changes to the config format. -const Version = 1 +const Version = 2 // Dendrite contains all the config used by a dendrite process. // Relative paths are resolved relative to the current working directory @@ -292,7 +292,7 @@ func (config *Dendrite) Derive() error { // SetDefaults sets default config values if they are not explicitly set. func (c *Dendrite) Defaults(generate bool) { - c.Version = 1 + c.Version = Version c.Global.Defaults(generate) c.ClientAPI.Defaults(generate) @@ -325,6 +325,7 @@ func (c *Dendrite) Verify(configErrs *ConfigErrors, isMonolith bool) { } func (c *Dendrite) Wiring() { + c.Global.JetStream.Matrix = &c.Global c.ClientAPI.Matrix = &c.Global c.EDUServer.Matrix = &c.Global c.FederationAPI.Matrix = &c.Global @@ -420,7 +421,11 @@ func (config *Dendrite) check(_ bool) error { // monolithic if config.Version != Version { configErrs.Add(fmt.Sprintf( - "unknown config version %q, expected %q", config.Version, Version, + "config version is %q, expected %q - this means that the format of the configuration "+ + "file has changed in some significant way, so please revisit the sample config "+ + "and ensure you are not missing any important options that may have been added "+ + "or changed recently!", + config.Version, Version, )) return configErrs } diff --git a/setup/config/config_global.go b/setup/config/config_global.go index 20ee6d37f..6f2306a6d 100644 --- a/setup/config/config_global.go +++ b/setup/config/config_global.go @@ -46,8 +46,8 @@ type Global struct { // Defaults to an empty array. TrustedIDServers []string `yaml:"trusted_third_party_id_servers"` - // Kafka/Naffka configuration - Kafka Kafka `yaml:"kafka"` + // JetStream configuration + JetStream JetStream `yaml:"jetstream"` // Metrics configuration Metrics Metrics `yaml:"metrics"` @@ -68,7 +68,7 @@ func (c *Global) Defaults(generate bool) { } c.KeyValidityPeriod = time.Hour * 24 * 7 - c.Kafka.Defaults(generate) + c.JetStream.Defaults(generate) c.Metrics.Defaults(generate) c.DNSCache.Defaults() c.Sentry.Defaults() @@ -78,7 +78,7 @@ func (c *Global) Verify(configErrs *ConfigErrors, isMonolith bool) { checkNotEmpty(configErrs, "global.server_name", string(c.ServerName)) checkNotEmpty(configErrs, "global.private_key", string(c.PrivateKeyPath)) - c.Kafka.Verify(configErrs, isMonolith) + c.JetStream.Verify(configErrs, isMonolith) c.Metrics.Verify(configErrs, isMonolith) c.Sentry.Verify(configErrs, isMonolith) c.DNSCache.Verify(configErrs, isMonolith) diff --git a/setup/config/config_jetstream.go b/setup/config/config_jetstream.go new file mode 100644 index 000000000..94e2d88b3 --- /dev/null +++ b/setup/config/config_jetstream.go @@ -0,0 +1,46 @@ +package config + +import ( + "fmt" + + "github.com/nats-io/nats.go" +) + +type JetStream struct { + Matrix *Global `yaml:"-"` + + // Persistent directory to store JetStream streams in. + StoragePath Path `yaml:"storage_path"` + // A list of NATS addresses to connect to. If none are specified, an + // internal NATS server will be used when running in monolith mode only. + Addresses []string `yaml:"addresses"` + // The prefix to use for stream names for this homeserver - really only + // useful if running more than one Dendrite on the same NATS deployment. + TopicPrefix string `yaml:"topic_prefix"` + // Keep all storage in memory. This is mostly useful for unit tests. + InMemory bool `yaml:"in_memory"` +} + +func (c *JetStream) TopicFor(name string) string { + return fmt.Sprintf("%s%s", c.TopicPrefix, name) +} + +func (c *JetStream) Durable(name string) nats.SubOpt { + return nats.Durable(c.TopicFor(name)) +} + +func (c *JetStream) Defaults(generate bool) { + c.Addresses = []string{} + c.TopicPrefix = "Dendrite" + if generate { + c.StoragePath = Path("./") + } +} + +func (c *JetStream) Verify(configErrs *ConfigErrors, isMonolith bool) { + // If we are running in a polylith deployment then we need at least + // one NATS JetStream server to talk to. + if !isMonolith { + checkNotZero(configErrs, "global.jetstream.addresses", int64(len(c.Addresses))) + } +} diff --git a/setup/config/config_kafka.go b/setup/config/config_kafka.go deleted file mode 100644 index 5a61f17eb..000000000 --- a/setup/config/config_kafka.go +++ /dev/null @@ -1,63 +0,0 @@ -package config - -import "fmt" - -// Defined Kafka topics. -const ( - TopicOutputTypingEvent = "OutputTypingEvent" - TopicOutputSendToDeviceEvent = "OutputSendToDeviceEvent" - TopicOutputKeyChangeEvent = "OutputKeyChangeEvent" - TopicOutputRoomEvent = "OutputRoomEvent" - TopicOutputClientData = "OutputClientData" - TopicOutputReceiptEvent = "OutputReceiptEvent" -) - -type Kafka struct { - // A list of kafka addresses to connect to. - Addresses []string `yaml:"addresses"` - // The prefix to use for Kafka topic names for this homeserver - really only - // useful if running more than one Dendrite on the same Kafka deployment. - TopicPrefix string `yaml:"topic_prefix"` - // Whether to use naffka instead of kafka. - // Naffka can only be used when running dendrite as a single monolithic server. - // Kafka can be used both with a monolithic server and when running the - // components as separate servers. - UseNaffka bool `yaml:"use_naffka"` - // The Naffka database is used internally by the naffka library, if used. - Database DatabaseOptions `yaml:"naffka_database"` - // The max size a Kafka message passed between consumer/producer can have - // Equals roughly max.message.bytes / fetch.message.max.bytes in Kafka - MaxMessageBytes *int `yaml:"max_message_bytes"` -} - -func (k *Kafka) TopicFor(name string) string { - return fmt.Sprintf("%s%s", k.TopicPrefix, name) -} - -func (c *Kafka) Defaults(generate bool) { - c.UseNaffka = true - c.Database.Defaults(10) - if generate { - c.Addresses = []string{"localhost:2181"} - c.Database.ConnectionString = DataSource("file:naffka.db") - } - c.TopicPrefix = "Dendrite" - - maxBytes := 1024 * 1024 * 8 // about 8MB - c.MaxMessageBytes = &maxBytes -} - -func (c *Kafka) Verify(configErrs *ConfigErrors, isMonolith bool) { - if c.UseNaffka { - if !isMonolith { - configErrs.Add("naffka can only be used in a monolithic server") - } - checkNotEmpty(configErrs, "global.kafka.database.connection_string", string(c.Database.ConnectionString)) - } else { - // If we aren't using naffka then we need to have at least one kafka - // server to talk to. - checkNotZero(configErrs, "global.kafka.addresses", int64(len(c.Addresses))) - } - checkNotEmpty(configErrs, "global.kafka.topic_prefix", string(c.TopicPrefix)) - checkPositive(configErrs, "global.kafka.max_message_bytes", int64(*c.MaxMessageBytes)) -} diff --git a/setup/config/config_test.go b/setup/config/config_test.go index ffe9edab2..5aa54929e 100644 --- a/setup/config/config_test.go +++ b/setup/config/config_test.go @@ -33,7 +33,7 @@ func TestLoadConfigRelative(t *testing.T) { } const testConfig = ` -version: 1 +version: 2 global: server_name: localhost private_key: matrix_key.pem diff --git a/setup/jetstream/helpers.go b/setup/jetstream/helpers.go new file mode 100644 index 000000000..1891b96b3 --- /dev/null +++ b/setup/jetstream/helpers.go @@ -0,0 +1,12 @@ +package jetstream + +import "github.com/nats-io/nats.go" + +func WithJetStreamMessage(msg *nats.Msg, f func(msg *nats.Msg) bool) { + _ = msg.InProgress() + if f(msg) { + _ = msg.Ack() + } else { + _ = msg.Nak() + } +} diff --git a/setup/jetstream/nats.go b/setup/jetstream/nats.go new file mode 100644 index 000000000..5d7937b5c --- /dev/null +++ b/setup/jetstream/nats.go @@ -0,0 +1,99 @@ +package jetstream + +import ( + "strings" + "sync" + "time" + + "github.com/Shopify/sarama" + "github.com/matrix-org/dendrite/setup/config" + "github.com/sirupsen/logrus" + + saramajs "github.com/S7evinK/saramajetstream" + natsserver "github.com/nats-io/nats-server/v2/server" + "github.com/nats-io/nats.go" + natsclient "github.com/nats-io/nats.go" +) + +var natsServer *natsserver.Server +var natsServerMutex sync.Mutex + +func Prepare(cfg *config.JetStream) (nats.JetStreamContext, sarama.Consumer, sarama.SyncProducer) { + // check if we need an in-process NATS Server + if len(cfg.Addresses) != 0 { + return setupNATS(cfg, nil) + } + natsServerMutex.Lock() + if natsServer == nil { + var err error + natsServer, err = natsserver.NewServer(&natsserver.Options{ + ServerName: "monolith", + DontListen: true, + JetStream: true, + StoreDir: string(cfg.StoragePath), + NoSystemAccount: true, + AllowNewAccounts: false, + MaxPayload: 16 * 1024 * 1024, + }) + if err != nil { + panic(err) + } + natsServer.ConfigureLogger() + go natsServer.Start() + } + natsServerMutex.Unlock() + if !natsServer.ReadyForConnections(time.Second * 10) { + logrus.Fatalln("NATS did not start in time") + } + nc, err := natsclient.Connect("", natsclient.InProcessServer(natsServer)) + if err != nil { + logrus.Fatalln("Failed to create NATS client") + } + return setupNATS(cfg, nc) +} + +func setupNATS(cfg *config.JetStream, nc *natsclient.Conn) (nats.JetStreamContext, sarama.Consumer, sarama.SyncProducer) { + if nc == nil { + var err error + nc, err = nats.Connect(strings.Join(cfg.Addresses, ",")) + if err != nil { + logrus.WithError(err).Panic("Unable to connect to NATS") + return nil, nil, nil + } + } + + s, err := nc.JetStream() + if err != nil { + logrus.WithError(err).Panic("Unable to get JetStream context") + return nil, nil, nil + } + + for _, stream := range streams { // streams are defined in streams.go + name := cfg.TopicFor(stream.Name) + info, err := s.StreamInfo(name) + if err != nil && err != natsclient.ErrStreamNotFound { + logrus.WithError(err).Fatal("Unable to get stream info") + } + if info == nil { + stream.Subjects = []string{name} + + // If we're trying to keep everything in memory (e.g. unit tests) + // then overwrite the storage policy. + if cfg.InMemory { + stream.Storage = nats.MemoryStorage + } + + // Namespace the streams without modifying the original streams + // array, otherwise we end up with namespaces on namespaces. + namespaced := *stream + namespaced.Name = name + if _, err = s.AddStream(&namespaced); err != nil { + logrus.WithError(err).WithField("stream", name).Fatal("Unable to add stream") + } + } + } + + consumer := saramajs.NewJetStreamConsumer(nc, s, "") + producer := saramajs.NewJetStreamProducer(nc, s, "") + return s, consumer, producer +} diff --git a/setup/jetstream/streams.go b/setup/jetstream/streams.go new file mode 100644 index 000000000..5810a2a91 --- /dev/null +++ b/setup/jetstream/streams.go @@ -0,0 +1,61 @@ +package jetstream + +import ( + "time" + + "github.com/nats-io/nats.go" +) + +const ( + UserID = "user_id" + RoomID = "room_id" +) + +var ( + InputRoomEvent = "InputRoomEvent" + OutputRoomEvent = "OutputRoomEvent" + OutputSendToDeviceEvent = "OutputSendToDeviceEvent" + OutputKeyChangeEvent = "OutputKeyChangeEvent" + OutputTypingEvent = "OutputTypingEvent" + OutputClientData = "OutputClientData" + OutputReceiptEvent = "OutputReceiptEvent" +) + +var streams = []*nats.StreamConfig{ + { + Name: InputRoomEvent, + Retention: nats.WorkQueuePolicy, + Storage: nats.FileStorage, + }, + { + Name: OutputRoomEvent, + Retention: nats.InterestPolicy, + Storage: nats.FileStorage, + }, + { + Name: OutputSendToDeviceEvent, + Retention: nats.InterestPolicy, + Storage: nats.FileStorage, + }, + { + Name: OutputKeyChangeEvent, + Retention: nats.LimitsPolicy, + Storage: nats.FileStorage, + }, + { + Name: OutputTypingEvent, + Retention: nats.InterestPolicy, + Storage: nats.MemoryStorage, + MaxAge: time.Second * 60, + }, + { + Name: OutputClientData, + Retention: nats.InterestPolicy, + Storage: nats.FileStorage, + }, + { + Name: OutputReceiptEvent, + Retention: nats.InterestPolicy, + Storage: nats.FileStorage, + }, +} diff --git a/setup/kafka/kafka.go b/setup/kafka/kafka.go deleted file mode 100644 index a2902c962..000000000 --- a/setup/kafka/kafka.go +++ /dev/null @@ -1,58 +0,0 @@ -package kafka - -import ( - "github.com/Shopify/sarama" - "github.com/matrix-org/dendrite/setup/config" - "github.com/matrix-org/naffka" - naffkaStorage "github.com/matrix-org/naffka/storage" - "github.com/sirupsen/logrus" -) - -func SetupConsumerProducer(cfg *config.Kafka) (sarama.Consumer, sarama.SyncProducer) { - if cfg.UseNaffka { - return setupNaffka(cfg) - } - return setupKafka(cfg) -} - -// setupKafka creates kafka consumer/producer pair from the config. -func setupKafka(cfg *config.Kafka) (sarama.Consumer, sarama.SyncProducer) { - sCfg := sarama.NewConfig() - sCfg.Producer.MaxMessageBytes = *cfg.MaxMessageBytes - sCfg.Producer.Return.Successes = true - sCfg.Consumer.Fetch.Default = int32(*cfg.MaxMessageBytes) - - consumer, err := sarama.NewConsumer(cfg.Addresses, sCfg) - if err != nil { - logrus.WithError(err).Panic("failed to start kafka consumer") - } - - producer, err := sarama.NewSyncProducer(cfg.Addresses, sCfg) - if err != nil { - logrus.WithError(err).Panic("failed to setup kafka producers") - } - - return consumer, producer -} - -// In monolith mode with Naffka, we don't have the same constraints about -// consuming the same topic from more than one place like we do with Kafka. -// Therefore, we will only open one Naffka connection in case Naffka is -// running on SQLite. -var naffkaInstance *naffka.Naffka - -// setupNaffka creates kafka consumer/producer pair from the config. -func setupNaffka(cfg *config.Kafka) (sarama.Consumer, sarama.SyncProducer) { - if naffkaInstance != nil { - return naffkaInstance, naffkaInstance - } - naffkaDB, err := naffkaStorage.NewDatabase(string(cfg.Database.ConnectionString)) - if err != nil { - logrus.WithError(err).Panic("Failed to setup naffka database") - } - naffkaInstance, err = naffka.New(naffkaDB) - if err != nil { - logrus.WithError(err).Panic("Failed to setup naffka") - } - return naffkaInstance, naffkaInstance -} diff --git a/setup/mscs/msc2836/msc2836.go b/setup/mscs/msc2836/msc2836.go index 7e2ecfb9d..8a35e4143 100644 --- a/setup/mscs/msc2836/msc2836.go +++ b/setup/mscs/msc2836/msc2836.go @@ -643,13 +643,12 @@ func (rc *reqCtx) injectResponseToRoomserver(res *gomatrixserverlib.MSC2836Event var ires []roomserver.InputRoomEvent for _, outlier := range append(eventsInOrder, messageEvents...) { ires = append(ires, roomserver.InputRoomEvent{ - Kind: roomserver.KindOutlier, - Event: outlier.Headered(outlier.Version()), - AuthEventIDs: outlier.AuthEventIDs(), + Kind: roomserver.KindOutlier, + Event: outlier.Headered(outlier.Version()), }) } // we've got the data by this point so use a background context - err = roomserver.SendInputRoomEvents(context.Background(), rc.rsAPI, ires) + err = roomserver.SendInputRoomEvents(context.Background(), rc.rsAPI, ires, false) if err != nil { util.GetLogger(rc.ctx).WithError(err).Error("failed to inject MSC2836EventRelationshipsResponse into the roomserver") } diff --git a/syncapi/consumers/clientapi.go b/syncapi/consumers/clientapi.go index a166ae14d..1ec9beb04 100644 --- a/syncapi/consumers/clientapi.go +++ b/syncapi/consumers/clientapi.go @@ -18,90 +18,90 @@ import ( "context" "encoding/json" - "github.com/Shopify/sarama" "github.com/getsentry/sentry-go" - "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/internal/eventutil" "github.com/matrix-org/dendrite/setup/config" + "github.com/matrix-org/dendrite/setup/jetstream" "github.com/matrix-org/dendrite/setup/process" "github.com/matrix-org/dendrite/syncapi/notifier" "github.com/matrix-org/dendrite/syncapi/storage" "github.com/matrix-org/dendrite/syncapi/types" + "github.com/nats-io/nats.go" log "github.com/sirupsen/logrus" ) // OutputClientDataConsumer consumes events that originated in the client API server. type OutputClientDataConsumer struct { - clientAPIConsumer *internal.ContinualConsumer - db storage.Database - stream types.StreamProvider - notifier *notifier.Notifier + ctx context.Context + jetstream nats.JetStreamContext + durable nats.SubOpt + topic string + db storage.Database + stream types.StreamProvider + notifier *notifier.Notifier } // NewOutputClientDataConsumer creates a new OutputClientData consumer. Call Start() to begin consuming from room servers. func NewOutputClientDataConsumer( process *process.ProcessContext, cfg *config.SyncAPI, - kafkaConsumer sarama.Consumer, + js nats.JetStreamContext, store storage.Database, notifier *notifier.Notifier, stream types.StreamProvider, ) *OutputClientDataConsumer { - consumer := internal.ContinualConsumer{ - Process: process, - ComponentName: "syncapi/clientapi", - Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputClientData)), - Consumer: kafkaConsumer, - PartitionStore: store, + return &OutputClientDataConsumer{ + ctx: process.Context(), + jetstream: js, + topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputClientData), + durable: cfg.Matrix.JetStream.Durable("SyncAPIClientAPIConsumer"), + db: store, + notifier: notifier, + stream: stream, } - s := &OutputClientDataConsumer{ - clientAPIConsumer: &consumer, - db: store, - notifier: notifier, - stream: stream, - } - consumer.ProcessMessage = s.onMessage - - return s } // Start consuming from room servers func (s *OutputClientDataConsumer) Start() error { - return s.clientAPIConsumer.Start() + _, err := s.jetstream.Subscribe(s.topic, s.onMessage, s.durable) + return err } // onMessage is called when the sync server receives a new event from the client API server output log. // It is not safe for this function to be called from multiple goroutines, or else the // sync stream position may race and be incorrectly calculated. -func (s *OutputClientDataConsumer) onMessage(msg *sarama.ConsumerMessage) error { - // Parse out the event JSON - var output eventutil.AccountData - if err := json.Unmarshal(msg.Value, &output); err != nil { - // If the message was invalid, log it and move on to the next message in the stream - log.WithError(err).Errorf("client API server output log: message parse failure") - sentry.CaptureException(err) - return nil - } +func (s *OutputClientDataConsumer) onMessage(msg *nats.Msg) { + jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool { + // Parse out the event JSON + userID := msg.Header.Get(jetstream.UserID) + var output eventutil.AccountData + if err := json.Unmarshal(msg.Data, &output); err != nil { + // If the message was invalid, log it and move on to the next message in the stream + log.WithError(err).Errorf("client API server output log: message parse failure") + sentry.CaptureException(err) + return true + } - log.WithFields(log.Fields{ - "type": output.Type, - "room_id": output.RoomID, - }).Info("received data from client API server") - - streamPos, err := s.db.UpsertAccountData( - context.TODO(), string(msg.Key), output.RoomID, output.Type, - ) - if err != nil { - sentry.CaptureException(err) log.WithFields(log.Fields{ - "type": output.Type, - "room_id": output.RoomID, - log.ErrorKey: err, - }).Panicf("could not save account data") - } + "type": output.Type, + "room_id": output.RoomID, + }).Info("received data from client API server") - s.stream.Advance(streamPos) - s.notifier.OnNewAccountData(string(msg.Key), types.StreamingToken{AccountDataPosition: streamPos}) + streamPos, err := s.db.UpsertAccountData( + s.ctx, userID, output.RoomID, output.Type, + ) + if err != nil { + sentry.CaptureException(err) + log.WithFields(log.Fields{ + "type": output.Type, + "room_id": output.RoomID, + log.ErrorKey: err, + }).Panicf("could not save account data") + } - return nil + s.stream.Advance(streamPos) + s.notifier.OnNewAccountData(userID, types.StreamingToken{AccountDataPosition: streamPos}) + + return true + }) } diff --git a/syncapi/consumers/eduserver_receipts.go b/syncapi/consumers/eduserver_receipts.go index 668f945bc..57d69d6fb 100644 --- a/syncapi/consumers/eduserver_receipts.go +++ b/syncapi/consumers/eduserver_receipts.go @@ -18,24 +18,27 @@ import ( "context" "encoding/json" - "github.com/Shopify/sarama" "github.com/getsentry/sentry-go" "github.com/matrix-org/dendrite/eduserver/api" - "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/setup/config" + "github.com/matrix-org/dendrite/setup/jetstream" "github.com/matrix-org/dendrite/setup/process" "github.com/matrix-org/dendrite/syncapi/notifier" "github.com/matrix-org/dendrite/syncapi/storage" "github.com/matrix-org/dendrite/syncapi/types" + "github.com/nats-io/nats.go" log "github.com/sirupsen/logrus" ) // OutputReceiptEventConsumer consumes events that originated in the EDU server. type OutputReceiptEventConsumer struct { - receiptConsumer *internal.ContinualConsumer - db storage.Database - stream types.StreamProvider - notifier *notifier.Notifier + ctx context.Context + jetstream nats.JetStreamContext + durable nats.SubOpt + topic string + db storage.Database + stream types.StreamProvider + notifier *notifier.Notifier } // NewOutputReceiptEventConsumer creates a new OutputReceiptEventConsumer. @@ -43,61 +46,54 @@ type OutputReceiptEventConsumer struct { func NewOutputReceiptEventConsumer( process *process.ProcessContext, cfg *config.SyncAPI, - kafkaConsumer sarama.Consumer, + js nats.JetStreamContext, store storage.Database, notifier *notifier.Notifier, stream types.StreamProvider, ) *OutputReceiptEventConsumer { - - consumer := internal.ContinualConsumer{ - Process: process, - ComponentName: "syncapi/eduserver/receipt", - Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputReceiptEvent), - Consumer: kafkaConsumer, - PartitionStore: store, + return &OutputReceiptEventConsumer{ + ctx: process.Context(), + jetstream: js, + topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputReceiptEvent), + durable: cfg.Matrix.JetStream.Durable("SyncAPIEDUServerReceiptConsumer"), + db: store, + notifier: notifier, + stream: stream, } - - s := &OutputReceiptEventConsumer{ - receiptConsumer: &consumer, - db: store, - notifier: notifier, - stream: stream, - } - - consumer.ProcessMessage = s.onMessage - - return s } // Start consuming from EDU api func (s *OutputReceiptEventConsumer) Start() error { - return s.receiptConsumer.Start() + _, err := s.jetstream.Subscribe(s.topic, s.onMessage, s.durable) + return err } -func (s *OutputReceiptEventConsumer) onMessage(msg *sarama.ConsumerMessage) error { - var output api.OutputReceiptEvent - if err := json.Unmarshal(msg.Value, &output); err != nil { - // If the message was invalid, log it and move on to the next message in the stream - log.WithError(err).Errorf("EDU server output log: message parse failure") - sentry.CaptureException(err) - return nil - } +func (s *OutputReceiptEventConsumer) onMessage(msg *nats.Msg) { + jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool { + var output api.OutputReceiptEvent + if err := json.Unmarshal(msg.Data, &output); err != nil { + // If the message was invalid, log it and move on to the next message in the stream + log.WithError(err).Errorf("EDU server output log: message parse failure") + sentry.CaptureException(err) + return true + } - streamPos, err := s.db.StoreReceipt( - context.TODO(), - output.RoomID, - output.Type, - output.UserID, - output.EventID, - output.Timestamp, - ) - if err != nil { - sentry.CaptureException(err) - return err - } + streamPos, err := s.db.StoreReceipt( + s.ctx, + output.RoomID, + output.Type, + output.UserID, + output.EventID, + output.Timestamp, + ) + if err != nil { + sentry.CaptureException(err) + return true + } - s.stream.Advance(streamPos) - s.notifier.OnNewReceipt(output.RoomID, types.StreamingToken{ReceiptPosition: streamPos}) + s.stream.Advance(streamPos) + s.notifier.OnNewReceipt(output.RoomID, types.StreamingToken{ReceiptPosition: streamPos}) - return nil + return true + }) } diff --git a/syncapi/consumers/eduserver_sendtodevice.go b/syncapi/consumers/eduserver_sendtodevice.go index 5e626aefe..54e689fa1 100644 --- a/syncapi/consumers/eduserver_sendtodevice.go +++ b/syncapi/consumers/eduserver_sendtodevice.go @@ -18,27 +18,30 @@ import ( "context" "encoding/json" - "github.com/Shopify/sarama" "github.com/getsentry/sentry-go" "github.com/matrix-org/dendrite/eduserver/api" - "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/setup/config" + "github.com/matrix-org/dendrite/setup/jetstream" "github.com/matrix-org/dendrite/setup/process" "github.com/matrix-org/dendrite/syncapi/notifier" "github.com/matrix-org/dendrite/syncapi/storage" "github.com/matrix-org/dendrite/syncapi/types" "github.com/matrix-org/gomatrixserverlib" "github.com/matrix-org/util" + "github.com/nats-io/nats.go" log "github.com/sirupsen/logrus" ) // OutputSendToDeviceEventConsumer consumes events that originated in the EDU server. type OutputSendToDeviceEventConsumer struct { - sendToDeviceConsumer *internal.ContinualConsumer - db storage.Database - serverName gomatrixserverlib.ServerName // our server name - stream types.StreamProvider - notifier *notifier.Notifier + ctx context.Context + jetstream nats.JetStreamContext + durable nats.SubOpt + topic string + db storage.Database + serverName gomatrixserverlib.ServerName // our server name + stream types.StreamProvider + notifier *notifier.Notifier } // NewOutputSendToDeviceEventConsumer creates a new OutputSendToDeviceEventConsumer. @@ -46,78 +49,71 @@ type OutputSendToDeviceEventConsumer struct { func NewOutputSendToDeviceEventConsumer( process *process.ProcessContext, cfg *config.SyncAPI, - kafkaConsumer sarama.Consumer, + js nats.JetStreamContext, store storage.Database, notifier *notifier.Notifier, stream types.StreamProvider, ) *OutputSendToDeviceEventConsumer { - - consumer := internal.ContinualConsumer{ - Process: process, - ComponentName: "syncapi/eduserver/sendtodevice", - Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent)), - Consumer: kafkaConsumer, - PartitionStore: store, + return &OutputSendToDeviceEventConsumer{ + ctx: process.Context(), + jetstream: js, + topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputSendToDeviceEvent), + durable: cfg.Matrix.JetStream.Durable("SyncAPIEDUServerSendToDeviceConsumer"), + db: store, + serverName: cfg.Matrix.ServerName, + notifier: notifier, + stream: stream, } - - s := &OutputSendToDeviceEventConsumer{ - sendToDeviceConsumer: &consumer, - db: store, - serverName: cfg.Matrix.ServerName, - notifier: notifier, - stream: stream, - } - - consumer.ProcessMessage = s.onMessage - - return s } // Start consuming from EDU api func (s *OutputSendToDeviceEventConsumer) Start() error { - return s.sendToDeviceConsumer.Start() + _, err := s.jetstream.Subscribe(s.topic, s.onMessage, s.durable) + return err } -func (s *OutputSendToDeviceEventConsumer) onMessage(msg *sarama.ConsumerMessage) error { - var output api.OutputSendToDeviceEvent - if err := json.Unmarshal(msg.Value, &output); err != nil { - // If the message was invalid, log it and move on to the next message in the stream - log.WithError(err).Errorf("EDU server output log: message parse failure") - sentry.CaptureException(err) - return err - } +func (s *OutputSendToDeviceEventConsumer) onMessage(msg *nats.Msg) { + jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool { + var output api.OutputSendToDeviceEvent + if err := json.Unmarshal(msg.Data, &output); err != nil { + // If the message was invalid, log it and move on to the next message in the stream + log.WithError(err).Errorf("EDU server output log: message parse failure") + sentry.CaptureException(err) + return true + } - _, domain, err := gomatrixserverlib.SplitID('@', output.UserID) - if err != nil { - sentry.CaptureException(err) - return err - } - if domain != s.serverName { - return nil - } + _, domain, err := gomatrixserverlib.SplitID('@', output.UserID) + if err != nil { + sentry.CaptureException(err) + return true + } + if domain != s.serverName { + return true + } - util.GetLogger(context.TODO()).WithFields(log.Fields{ - "sender": output.Sender, - "user_id": output.UserID, - "device_id": output.DeviceID, - "event_type": output.Type, - }).Info("sync API received send-to-device event from EDU server") + util.GetLogger(context.TODO()).WithFields(log.Fields{ + "sender": output.Sender, + "user_id": output.UserID, + "device_id": output.DeviceID, + "event_type": output.Type, + }).Info("sync API received send-to-device event from EDU server") - streamPos, err := s.db.StoreNewSendForDeviceMessage( - context.TODO(), output.UserID, output.DeviceID, output.SendToDeviceEvent, - ) - if err != nil { - sentry.CaptureException(err) - log.WithError(err).Errorf("failed to store send-to-device message") - return err - } + streamPos, err := s.db.StoreNewSendForDeviceMessage( + s.ctx, output.UserID, output.DeviceID, output.SendToDeviceEvent, + ) + if err != nil { + sentry.CaptureException(err) + log.WithError(err).Errorf("failed to store send-to-device message") + return false + } - s.stream.Advance(streamPos) - s.notifier.OnNewSendToDevice( - output.UserID, - []string{output.DeviceID}, - types.StreamingToken{SendToDevicePosition: streamPos}, - ) + s.stream.Advance(streamPos) + s.notifier.OnNewSendToDevice( + output.UserID, + []string{output.DeviceID}, + types.StreamingToken{SendToDevicePosition: streamPos}, + ) - return nil + return true + }) } diff --git a/syncapi/consumers/eduserver_typing.go b/syncapi/consumers/eduserver_typing.go index 8d06e3ca8..de2f6f950 100644 --- a/syncapi/consumers/eduserver_typing.go +++ b/syncapi/consumers/eduserver_typing.go @@ -15,27 +15,31 @@ package consumers import ( + "context" "encoding/json" - "github.com/Shopify/sarama" "github.com/getsentry/sentry-go" "github.com/matrix-org/dendrite/eduserver/api" "github.com/matrix-org/dendrite/eduserver/cache" - "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/setup/config" + "github.com/matrix-org/dendrite/setup/jetstream" "github.com/matrix-org/dendrite/setup/process" "github.com/matrix-org/dendrite/syncapi/notifier" "github.com/matrix-org/dendrite/syncapi/storage" "github.com/matrix-org/dendrite/syncapi/types" + "github.com/nats-io/nats.go" log "github.com/sirupsen/logrus" ) // OutputTypingEventConsumer consumes events that originated in the EDU server. type OutputTypingEventConsumer struct { - typingConsumer *internal.ContinualConsumer - eduCache *cache.EDUCache - stream types.StreamProvider - notifier *notifier.Notifier + ctx context.Context + jetstream nats.JetStreamContext + durable nats.SubOpt + topic string + eduCache *cache.EDUCache + stream types.StreamProvider + notifier *notifier.Notifier } // NewOutputTypingEventConsumer creates a new OutputTypingEventConsumer. @@ -43,72 +47,60 @@ type OutputTypingEventConsumer struct { func NewOutputTypingEventConsumer( process *process.ProcessContext, cfg *config.SyncAPI, - kafkaConsumer sarama.Consumer, + js nats.JetStreamContext, store storage.Database, eduCache *cache.EDUCache, notifier *notifier.Notifier, stream types.StreamProvider, ) *OutputTypingEventConsumer { - - consumer := internal.ContinualConsumer{ - Process: process, - ComponentName: "syncapi/eduserver/typing", - Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent)), - Consumer: kafkaConsumer, - PartitionStore: store, + return &OutputTypingEventConsumer{ + ctx: process.Context(), + jetstream: js, + topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputTypingEvent), + durable: cfg.Matrix.JetStream.Durable("SyncAPIEDUServerTypingConsumer"), + eduCache: eduCache, + notifier: notifier, + stream: stream, } - - s := &OutputTypingEventConsumer{ - typingConsumer: &consumer, - eduCache: eduCache, - notifier: notifier, - stream: stream, - } - - consumer.ProcessMessage = s.onMessage - - return s } // Start consuming from EDU api func (s *OutputTypingEventConsumer) Start() error { - s.eduCache.SetTimeoutCallback(func(userID, roomID string, latestSyncPosition int64) { - pos := types.StreamPosition(latestSyncPosition) - s.stream.Advance(pos) - s.notifier.OnNewTyping(roomID, types.StreamingToken{TypingPosition: pos}) + _, err := s.jetstream.Subscribe(s.topic, s.onMessage, s.durable) + return err +} + +func (s *OutputTypingEventConsumer) onMessage(msg *nats.Msg) { + jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool { + var output api.OutputTypingEvent + if err := json.Unmarshal(msg.Data, &output); err != nil { + // If the message was invalid, log it and move on to the next message in the stream + log.WithError(err).Errorf("EDU server output log: message parse failure") + sentry.CaptureException(err) + return true + } + + log.WithFields(log.Fields{ + "room_id": output.Event.RoomID, + "user_id": output.Event.UserID, + "typing": output.Event.Typing, + }).Debug("received data from EDU server") + + var typingPos types.StreamPosition + typingEvent := output.Event + if typingEvent.Typing { + typingPos = types.StreamPosition( + s.eduCache.AddTypingUser(typingEvent.UserID, typingEvent.RoomID, output.ExpireTime), + ) + } else { + typingPos = types.StreamPosition( + s.eduCache.RemoveUser(typingEvent.UserID, typingEvent.RoomID), + ) + } + + s.stream.Advance(typingPos) + s.notifier.OnNewTyping(output.Event.RoomID, types.StreamingToken{TypingPosition: typingPos}) + + return true }) - return s.typingConsumer.Start() -} - -func (s *OutputTypingEventConsumer) onMessage(msg *sarama.ConsumerMessage) error { - var output api.OutputTypingEvent - if err := json.Unmarshal(msg.Value, &output); err != nil { - // If the message was invalid, log it and move on to the next message in the stream - log.WithError(err).Errorf("EDU server output log: message parse failure") - sentry.CaptureException(err) - return nil - } - - log.WithFields(log.Fields{ - "room_id": output.Event.RoomID, - "user_id": output.Event.UserID, - "typing": output.Event.Typing, - }).Debug("received data from EDU server") - - var typingPos types.StreamPosition - typingEvent := output.Event - if typingEvent.Typing { - typingPos = types.StreamPosition( - s.eduCache.AddTypingUser(typingEvent.UserID, typingEvent.RoomID, output.ExpireTime), - ) - } else { - typingPos = types.StreamPosition( - s.eduCache.RemoveUser(typingEvent.UserID, typingEvent.RoomID), - ) - } - - s.stream.Advance(typingPos) - s.notifier.OnNewTyping(output.Event.RoomID, types.StreamingToken{TypingPosition: typingPos}) - - return nil } diff --git a/syncapi/consumers/keychange.go b/syncapi/consumers/keychange.go index dfedc6409..97685cc04 100644 --- a/syncapi/consumers/keychange.go +++ b/syncapi/consumers/keychange.go @@ -17,7 +17,6 @@ package consumers import ( "context" "encoding/json" - "sync" "github.com/Shopify/sarama" "github.com/getsentry/sentry-go" @@ -34,15 +33,14 @@ import ( // OutputKeyChangeEventConsumer consumes events that originated in the key server. type OutputKeyChangeEventConsumer struct { - keyChangeConsumer *internal.ContinualConsumer - db storage.Database - notifier *notifier.Notifier - stream types.PartitionedStreamProvider - serverName gomatrixserverlib.ServerName // our server name - rsAPI roomserverAPI.RoomserverInternalAPI - keyAPI api.KeyInternalAPI - partitionToOffset map[int32]int64 - partitionToOffsetMu sync.Mutex + ctx context.Context + keyChangeConsumer *internal.ContinualConsumer + db storage.Database + notifier *notifier.Notifier + stream types.StreamProvider + serverName gomatrixserverlib.ServerName // our server name + rsAPI roomserverAPI.RoomserverInternalAPI + keyAPI api.KeyInternalAPI } // NewOutputKeyChangeEventConsumer creates a new OutputKeyChangeEventConsumer. @@ -56,7 +54,7 @@ func NewOutputKeyChangeEventConsumer( rsAPI roomserverAPI.RoomserverInternalAPI, store storage.Database, notifier *notifier.Notifier, - stream types.PartitionedStreamProvider, + stream types.StreamProvider, ) *OutputKeyChangeEventConsumer { consumer := internal.ContinualConsumer{ @@ -68,15 +66,14 @@ func NewOutputKeyChangeEventConsumer( } s := &OutputKeyChangeEventConsumer{ - keyChangeConsumer: &consumer, - db: store, - serverName: serverName, - keyAPI: keyAPI, - rsAPI: rsAPI, - partitionToOffset: make(map[int32]int64), - partitionToOffsetMu: sync.Mutex{}, - notifier: notifier, - stream: stream, + ctx: process.Context(), + keyChangeConsumer: &consumer, + db: store, + serverName: serverName, + keyAPI: keyAPI, + rsAPI: rsAPI, + notifier: notifier, + stream: stream, } consumer.ProcessMessage = s.onMessage @@ -86,24 +83,10 @@ func NewOutputKeyChangeEventConsumer( // Start consuming from the key server func (s *OutputKeyChangeEventConsumer) Start() error { - offsets, err := s.keyChangeConsumer.StartOffsets() - s.partitionToOffsetMu.Lock() - for _, o := range offsets { - s.partitionToOffset[o.Partition] = o.Offset - } - s.partitionToOffsetMu.Unlock() - return err -} - -func (s *OutputKeyChangeEventConsumer) updateOffset(msg *sarama.ConsumerMessage) { - s.partitionToOffsetMu.Lock() - defer s.partitionToOffsetMu.Unlock() - s.partitionToOffset[msg.Partition] = msg.Offset + return s.keyChangeConsumer.Start() } func (s *OutputKeyChangeEventConsumer) onMessage(msg *sarama.ConsumerMessage) error { - defer s.updateOffset(msg) - var m api.DeviceMessage if err := json.Unmarshal(msg.Value, &m); err != nil { logrus.WithError(err).Errorf("failed to read device message from key change topic") @@ -116,19 +99,22 @@ func (s *OutputKeyChangeEventConsumer) onMessage(msg *sarama.ConsumerMessage) er } switch m.Type { case api.TypeCrossSigningUpdate: - return s.onCrossSigningMessage(m, msg.Offset, msg.Partition) + return s.onCrossSigningMessage(m, m.DeviceChangeID) case api.TypeDeviceKeyUpdate: fallthrough default: - return s.onDeviceKeyMessage(m, msg.Offset, msg.Partition) + return s.onDeviceKeyMessage(m, m.DeviceChangeID) } } -func (s *OutputKeyChangeEventConsumer) onDeviceKeyMessage(m api.DeviceMessage, offset int64, partition int32) error { +func (s *OutputKeyChangeEventConsumer) onDeviceKeyMessage(m api.DeviceMessage, deviceChangeID int64) error { + if m.DeviceKeys == nil { + return nil + } output := m.DeviceKeys // work out who we need to notify about the new key var queryRes roomserverAPI.QuerySharedUsersResponse - err := s.rsAPI.QuerySharedUsers(context.Background(), &roomserverAPI.QuerySharedUsersRequest{ + err := s.rsAPI.QuerySharedUsers(s.ctx, &roomserverAPI.QuerySharedUsersRequest{ UserID: output.UserID, }, &queryRes) if err != nil { @@ -138,10 +124,7 @@ func (s *OutputKeyChangeEventConsumer) onDeviceKeyMessage(m api.DeviceMessage, o } // make sure we get our own key updates too! queryRes.UserIDsToCount[output.UserID] = 1 - posUpdate := types.LogPosition{ - Offset: offset, - Partition: partition, - } + posUpdate := types.StreamPosition(deviceChangeID) s.stream.Advance(posUpdate) for userID := range queryRes.UserIDsToCount { @@ -151,11 +134,11 @@ func (s *OutputKeyChangeEventConsumer) onDeviceKeyMessage(m api.DeviceMessage, o return nil } -func (s *OutputKeyChangeEventConsumer) onCrossSigningMessage(m api.DeviceMessage, offset int64, partition int32) error { +func (s *OutputKeyChangeEventConsumer) onCrossSigningMessage(m api.DeviceMessage, deviceChangeID int64) error { output := m.CrossSigningKeyUpdate // work out who we need to notify about the new key var queryRes roomserverAPI.QuerySharedUsersResponse - err := s.rsAPI.QuerySharedUsers(context.Background(), &roomserverAPI.QuerySharedUsersRequest{ + err := s.rsAPI.QuerySharedUsers(s.ctx, &roomserverAPI.QuerySharedUsersRequest{ UserID: output.UserID, }, &queryRes) if err != nil { @@ -165,10 +148,7 @@ func (s *OutputKeyChangeEventConsumer) onCrossSigningMessage(m api.DeviceMessage } // make sure we get our own key updates too! queryRes.UserIDsToCount[output.UserID] = 1 - posUpdate := types.LogPosition{ - Offset: offset, - Partition: partition, - } + posUpdate := types.StreamPosition(deviceChangeID) s.stream.Advance(posUpdate) for userID := range queryRes.UserIDsToCount { diff --git a/syncapi/consumers/roomserver.go b/syncapi/consumers/roomserver.go index 0d6f528ad..e9c4abe88 100644 --- a/syncapi/consumers/roomserver.go +++ b/syncapi/consumers/roomserver.go @@ -19,24 +19,27 @@ import ( "encoding/json" "fmt" - "github.com/Shopify/sarama" "github.com/getsentry/sentry-go" - "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/roomserver/api" "github.com/matrix-org/dendrite/setup/config" + "github.com/matrix-org/dendrite/setup/jetstream" "github.com/matrix-org/dendrite/setup/process" "github.com/matrix-org/dendrite/syncapi/notifier" "github.com/matrix-org/dendrite/syncapi/storage" "github.com/matrix-org/dendrite/syncapi/types" "github.com/matrix-org/gomatrixserverlib" + "github.com/nats-io/nats.go" log "github.com/sirupsen/logrus" ) // OutputRoomEventConsumer consumes events that originated in the room server. type OutputRoomEventConsumer struct { + ctx context.Context cfg *config.SyncAPI rsAPI api.RoomserverInternalAPI - rsConsumer *internal.ContinualConsumer + jetstream nats.JetStreamContext + durable nats.SubOpt + topic string db storage.Database pduStream types.StreamProvider inviteStream types.StreamProvider @@ -47,83 +50,88 @@ type OutputRoomEventConsumer struct { func NewOutputRoomEventConsumer( process *process.ProcessContext, cfg *config.SyncAPI, - kafkaConsumer sarama.Consumer, + js nats.JetStreamContext, store storage.Database, notifier *notifier.Notifier, pduStream types.StreamProvider, inviteStream types.StreamProvider, rsAPI api.RoomserverInternalAPI, ) *OutputRoomEventConsumer { - - consumer := internal.ContinualConsumer{ - Process: process, - ComponentName: "syncapi/roomserver", - Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)), - Consumer: kafkaConsumer, - PartitionStore: store, - } - s := &OutputRoomEventConsumer{ + return &OutputRoomEventConsumer{ + ctx: process.Context(), cfg: cfg, - rsConsumer: &consumer, + jetstream: js, + topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputRoomEvent), + durable: cfg.Matrix.JetStream.Durable("SyncAPIRoomServerConsumer"), db: store, notifier: notifier, pduStream: pduStream, inviteStream: inviteStream, rsAPI: rsAPI, } - consumer.ProcessMessage = s.onMessage - - return s } // Start consuming from room servers func (s *OutputRoomEventConsumer) Start() error { - return s.rsConsumer.Start() + _, err := s.jetstream.Subscribe( + s.topic, s.onMessage, s.durable, + nats.DeliverAll(), + nats.ManualAck(), + ) + return err } // onMessage is called when the sync server receives a new event from the room server output log. // It is not safe for this function to be called from multiple goroutines, or else the // sync stream position may race and be incorrectly calculated. -func (s *OutputRoomEventConsumer) onMessage(msg *sarama.ConsumerMessage) error { - // Parse out the event JSON - var output api.OutputEvent - if err := json.Unmarshal(msg.Value, &output); err != nil { - // If the message was invalid, log it and move on to the next message in the stream - log.WithError(err).Errorf("roomserver output log: message parse failure") - return nil - } - - switch output.Type { - case api.OutputTypeNewRoomEvent: - // Ignore redaction events. We will add them to the database when they are - // validated (when we receive OutputTypeRedactedEvent) - event := output.NewRoomEvent.Event - if event.Type() == gomatrixserverlib.MRoomRedaction && event.StateKey() == nil { - // in the special case where the event redacts itself, just pass the message through because - // we will never see the other part of the pair - if event.Redacts() != event.EventID() { - return nil - } +func (s *OutputRoomEventConsumer) onMessage(msg *nats.Msg) { + jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool { + // Parse out the event JSON + var err error + var output api.OutputEvent + if err = json.Unmarshal(msg.Data, &output); err != nil { + // If the message was invalid, log it and move on to the next message in the stream + log.WithError(err).Errorf("roomserver output log: message parse failure") + return true } - return s.onNewRoomEvent(context.TODO(), *output.NewRoomEvent) - case api.OutputTypeOldRoomEvent: - return s.onOldRoomEvent(context.TODO(), *output.OldRoomEvent) - case api.OutputTypeNewInviteEvent: - return s.onNewInviteEvent(context.TODO(), *output.NewInviteEvent) - case api.OutputTypeRetireInviteEvent: - return s.onRetireInviteEvent(context.TODO(), *output.RetireInviteEvent) - case api.OutputTypeNewPeek: - return s.onNewPeek(context.TODO(), *output.NewPeek) - case api.OutputTypeRetirePeek: - return s.onRetirePeek(context.TODO(), *output.RetirePeek) - case api.OutputTypeRedactedEvent: - return s.onRedactEvent(context.TODO(), *output.RedactedEvent) - default: - log.WithField("type", output.Type).Debug( - "roomserver output log: ignoring unknown output type", - ) - return nil - } + + switch output.Type { + case api.OutputTypeNewRoomEvent: + // Ignore redaction events. We will add them to the database when they are + // validated (when we receive OutputTypeRedactedEvent) + event := output.NewRoomEvent.Event + if event.Type() == gomatrixserverlib.MRoomRedaction && event.StateKey() == nil { + // in the special case where the event redacts itself, just pass the message through because + // we will never see the other part of the pair + if event.Redacts() != event.EventID() { + return true + } + } + err = s.onNewRoomEvent(s.ctx, *output.NewRoomEvent) + case api.OutputTypeOldRoomEvent: + err = s.onOldRoomEvent(s.ctx, *output.OldRoomEvent) + case api.OutputTypeNewInviteEvent: + s.onNewInviteEvent(s.ctx, *output.NewInviteEvent) + case api.OutputTypeRetireInviteEvent: + s.onRetireInviteEvent(s.ctx, *output.RetireInviteEvent) + case api.OutputTypeNewPeek: + s.onNewPeek(s.ctx, *output.NewPeek) + case api.OutputTypeRetirePeek: + s.onRetirePeek(s.ctx, *output.RetirePeek) + case api.OutputTypeRedactedEvent: + err = s.onRedactEvent(s.ctx, *output.RedactedEvent) + default: + log.WithField("type", output.Type).Debug( + "roomserver output log: ignoring unknown output type", + ) + } + if err != nil { + log.WithError(err).Error("roomserver output log: failed to process event") + return false + } + + return true + }) } func (s *OutputRoomEventConsumer) onRedactEvent( @@ -275,12 +283,12 @@ func (s *OutputRoomEventConsumer) notifyJoinedPeeks(ctx context.Context, ev *gom func (s *OutputRoomEventConsumer) onNewInviteEvent( ctx context.Context, msg api.OutputNewInviteEvent, -) error { +) { if msg.Event.StateKey() == nil { log.WithFields(log.Fields{ "event": string(msg.Event.JSON()), }).Panicf("roomserver output log: invite has no state key") - return nil + return } pduPos, err := s.db.AddInviteEvent(ctx, msg.Event) if err != nil { @@ -292,18 +300,16 @@ func (s *OutputRoomEventConsumer) onNewInviteEvent( "pdupos": pduPos, log.ErrorKey: err, }).Panicf("roomserver output log: write invite failure") - return nil + return } s.inviteStream.Advance(pduPos) s.notifier.OnNewInvite(types.StreamingToken{InvitePosition: pduPos}, *msg.Event.StateKey()) - - return nil } func (s *OutputRoomEventConsumer) onRetireInviteEvent( ctx context.Context, msg api.OutputRetireInviteEvent, -) error { +) { pduPos, err := s.db.RetireInviteEvent(ctx, msg.EventID) if err != nil { sentry.CaptureException(err) @@ -312,19 +318,17 @@ func (s *OutputRoomEventConsumer) onRetireInviteEvent( "event_id": msg.EventID, log.ErrorKey: err, }).Panicf("roomserver output log: remove invite failure") - return nil + return } // Notify any active sync requests that the invite has been retired. s.inviteStream.Advance(pduPos) s.notifier.OnNewInvite(types.StreamingToken{InvitePosition: pduPos}, msg.TargetUserID) - - return nil } func (s *OutputRoomEventConsumer) onNewPeek( ctx context.Context, msg api.OutputNewPeek, -) error { +) { sp, err := s.db.AddPeek(ctx, msg.RoomID, msg.UserID, msg.DeviceID) if err != nil { sentry.CaptureException(err) @@ -332,7 +336,7 @@ func (s *OutputRoomEventConsumer) onNewPeek( log.WithFields(log.Fields{ log.ErrorKey: err, }).Panicf("roomserver output log: write peek failure") - return nil + return } // tell the notifier about the new peek so it knows to wake up new devices @@ -340,20 +344,18 @@ func (s *OutputRoomEventConsumer) onNewPeek( // index as PDUs, but we should fix this s.pduStream.Advance(sp) s.notifier.OnNewPeek(msg.RoomID, msg.UserID, msg.DeviceID, types.StreamingToken{PDUPosition: sp}) - - return nil } func (s *OutputRoomEventConsumer) onRetirePeek( ctx context.Context, msg api.OutputRetirePeek, -) error { +) { sp, err := s.db.DeletePeek(ctx, msg.RoomID, msg.UserID, msg.DeviceID) if err != nil { // panic rather than continue with an inconsistent database log.WithFields(log.Fields{ log.ErrorKey: err, }).Panicf("roomserver output log: write peek failure") - return nil + return } // tell the notifier about the new peek so it knows to wake up new devices @@ -361,8 +363,6 @@ func (s *OutputRoomEventConsumer) onRetirePeek( // index as PDUs, but we should fix this s.pduStream.Advance(sp) s.notifier.OnRetirePeek(msg.RoomID, msg.UserID, msg.DeviceID, types.StreamingToken{PDUPosition: sp}) - - return nil } func (s *OutputRoomEventConsumer) updateStateEvent(event *gomatrixserverlib.HeaderedEvent) (*gomatrixserverlib.HeaderedEvent, error) { diff --git a/syncapi/internal/keychange.go b/syncapi/internal/keychange.go index 56a438fb5..41efd4a07 100644 --- a/syncapi/internal/keychange.go +++ b/syncapi/internal/keychange.go @@ -47,8 +47,8 @@ func DeviceOTKCounts(ctx context.Context, keyAPI keyapi.KeyInternalAPI, userID, // be already filled in with join/leave information. func DeviceListCatchup( ctx context.Context, keyAPI keyapi.KeyInternalAPI, rsAPI roomserverAPI.RoomserverInternalAPI, - userID string, res *types.Response, from, to types.LogPosition, -) (newPos types.LogPosition, hasNew bool, err error) { + userID string, res *types.Response, from, to types.StreamPosition, +) (newPos types.StreamPosition, hasNew bool, err error) { // Track users who we didn't track before but now do by virtue of sharing a room with them, or not. newlyJoinedRooms := joinedRooms(res, userID) @@ -64,27 +64,18 @@ func DeviceListCatchup( } // now also track users who we already share rooms with but who have updated their devices between the two tokens - - var partition int32 - var offset int64 - partition = -1 - offset = sarama.OffsetOldest - // Extract partition/offset from sync token - // TODO: In a world where keyserver is sharded there will be multiple partitions and hence multiple QueryKeyChanges to make. - if !from.IsEmpty() { - partition = from.Partition - offset = from.Offset + offset := sarama.OffsetOldest + toOffset := sarama.OffsetNewest + if to > 0 && to > from { + toOffset = int64(to) } - var toOffset int64 - toOffset = sarama.OffsetNewest - if toLog := to; toLog.Partition == partition && toLog.Offset > 0 { - toOffset = toLog.Offset + if from > 0 { + offset = int64(from) } var queryRes keyapi.QueryKeyChangesResponse keyAPI.QueryKeyChanges(ctx, &keyapi.QueryKeyChangesRequest{ - Partition: partition, - Offset: offset, - ToOffset: toOffset, + Offset: offset, + ToOffset: toOffset, }, &queryRes) if queryRes.Error != nil { // don't fail the catchup because we may have got useful information by tracking membership @@ -95,8 +86,8 @@ func DeviceListCatchup( var sharedUsersMap map[string]int sharedUsersMap, queryRes.UserIDs = filterSharedUsers(ctx, rsAPI, userID, queryRes.UserIDs) util.GetLogger(ctx).Debugf( - "QueryKeyChanges request p=%d,off=%d,to=%d response p=%d off=%d uids=%v", - partition, offset, toOffset, queryRes.Partition, queryRes.Offset, queryRes.UserIDs, + "QueryKeyChanges request off=%d,to=%d response off=%d uids=%v", + offset, toOffset, queryRes.Offset, queryRes.UserIDs, ) userSet := make(map[string]bool) for _, userID := range res.DeviceLists.Changed { @@ -125,13 +116,8 @@ func DeviceListCatchup( res.DeviceLists.Left = append(res.DeviceLists.Left, userID) } } - // set the new token - to = types.LogPosition{ - Partition: queryRes.Partition, - Offset: queryRes.Offset, - } - return to, hasNew, nil + return types.StreamPosition(queryRes.Offset), hasNew, nil } // TrackChangedUsers calculates the values of device_lists.changed|left in the /sync response. diff --git a/syncapi/internal/keychange_test.go b/syncapi/internal/keychange_test.go index e52e55564..d9fb9cf82 100644 --- a/syncapi/internal/keychange_test.go +++ b/syncapi/internal/keychange_test.go @@ -6,7 +6,6 @@ import ( "sort" "testing" - "github.com/Shopify/sarama" keyapi "github.com/matrix-org/dendrite/keyserver/api" "github.com/matrix-org/dendrite/roomserver/api" "github.com/matrix-org/dendrite/syncapi/types" @@ -16,11 +15,7 @@ import ( var ( syncingUser = "@alice:localhost" - emptyToken = types.LogPosition{} - newestToken = types.LogPosition{ - Offset: sarama.OffsetNewest, - Partition: 0, - } + emptyToken = types.StreamPosition(0) ) type mockKeyAPI struct{} @@ -186,7 +181,7 @@ func TestKeyChangeCatchupOnJoinShareNewUser(t *testing.T) { "!another:room": {syncingUser}, }, } - _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken) + _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) if err != nil { t.Fatalf("DeviceListCatchup returned an error: %s", err) } @@ -209,7 +204,7 @@ func TestKeyChangeCatchupOnLeaveShareLeftUser(t *testing.T) { "!another:room": {syncingUser}, }, } - _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken) + _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) if err != nil { t.Fatalf("DeviceListCatchup returned an error: %s", err) } @@ -232,7 +227,7 @@ func TestKeyChangeCatchupOnJoinShareNoNewUsers(t *testing.T) { "!another:room": {syncingUser, existingUser}, }, } - _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken) + _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) if err != nil { t.Fatalf("Catchup returned an error: %s", err) } @@ -254,7 +249,7 @@ func TestKeyChangeCatchupOnLeaveShareNoUsers(t *testing.T) { "!another:room": {syncingUser, existingUser}, }, } - _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken) + _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) if err != nil { t.Fatalf("DeviceListCatchup returned an error: %s", err) } @@ -313,7 +308,7 @@ func TestKeyChangeCatchupNoNewJoinsButMessages(t *testing.T) { roomID: {syncingUser, existingUser}, }, } - _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken) + _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) if err != nil { t.Fatalf("DeviceListCatchup returned an error: %s", err) } @@ -341,7 +336,7 @@ func TestKeyChangeCatchupChangeAndLeft(t *testing.T) { "!another:room": {syncingUser}, }, } - _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken) + _, hasNew, err := DeviceListCatchup(context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken) if err != nil { t.Fatalf("Catchup returned an error: %s", err) } @@ -427,7 +422,7 @@ func TestKeyChangeCatchupChangeAndLeftSameRoom(t *testing.T) { }, } _, hasNew, err := DeviceListCatchup( - context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, newestToken, + context.Background(), &mockKeyAPI{}, rsAPI, syncingUser, syncResponse, emptyToken, emptyToken, ) if err != nil { t.Fatalf("DeviceListCatchup returned an error: %s", err) diff --git a/syncapi/notifier/notifier_test.go b/syncapi/notifier/notifier_test.go index 1401fc676..c6d3df7ee 100644 --- a/syncapi/notifier/notifier_test.go +++ b/syncapi/notifier/notifier_test.go @@ -127,7 +127,7 @@ func TestNewEventAndJoinedToRoom(t *testing.T) { go func() { pos, err := waitForEvents(n, newTestSyncRequest(bob, bobDev, syncPositionBefore)) if err != nil { - t.Errorf("TestNewEventAndJoinedToRoom error: %w", err) + t.Errorf("TestNewEventAndJoinedToRoom error: %s", err) } mustEqualPositions(t, pos, syncPositionAfter) wg.Done() @@ -190,7 +190,7 @@ func TestNewInviteEventForUser(t *testing.T) { go func() { pos, err := waitForEvents(n, newTestSyncRequest(bob, bobDev, syncPositionBefore)) if err != nil { - t.Errorf("TestNewInviteEventForUser error: %w", err) + t.Errorf("TestNewInviteEventForUser error: %s", err) } mustEqualPositions(t, pos, syncPositionAfter) wg.Done() @@ -246,7 +246,7 @@ func TestMultipleRequestWakeup(t *testing.T) { poll := func() { pos, err := waitForEvents(n, newTestSyncRequest(bob, bobDev, syncPositionBefore)) if err != nil { - t.Errorf("TestMultipleRequestWakeup error: %w", err) + t.Errorf("TestMultipleRequestWakeup error: %s", err) } mustEqualPositions(t, pos, syncPositionAfter) wg.Done() @@ -284,7 +284,7 @@ func TestNewEventAndWasPreviouslyJoinedToRoom(t *testing.T) { go func() { pos, err := waitForEvents(n, newTestSyncRequest(bob, bobDev, syncPositionBefore)) if err != nil { - t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom error: %w", err) + t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom error: %s", err) } mustEqualPositions(t, pos, syncPositionAfter) leaveWG.Done() @@ -301,7 +301,7 @@ func TestNewEventAndWasPreviouslyJoinedToRoom(t *testing.T) { go func() { pos, err := waitForEvents(n, newTestSyncRequest(alice, aliceDev, syncPositionAfter)) if err != nil { - t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom error: %w", err) + t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom error: %s", err) } mustEqualPositions(t, pos, syncPositionAfter2) aliceWG.Done() diff --git a/syncapi/streams/stream_devicelist.go b/syncapi/streams/stream_devicelist.go index 9ea9d088f..6ff8a7fd5 100644 --- a/syncapi/streams/stream_devicelist.go +++ b/syncapi/streams/stream_devicelist.go @@ -10,7 +10,7 @@ import ( ) type DeviceListStreamProvider struct { - PartitionedStreamProvider + StreamProvider rsAPI api.RoomserverInternalAPI keyAPI keyapi.KeyInternalAPI } @@ -18,15 +18,15 @@ type DeviceListStreamProvider struct { func (p *DeviceListStreamProvider) CompleteSync( ctx context.Context, req *types.SyncRequest, -) types.LogPosition { +) types.StreamPosition { return p.LatestPosition(ctx) } func (p *DeviceListStreamProvider) IncrementalSync( ctx context.Context, req *types.SyncRequest, - from, to types.LogPosition, -) types.LogPosition { + from, to types.StreamPosition, +) types.StreamPosition { var err error to, _, err = internal.DeviceListCatchup(context.Background(), p.keyAPI, p.rsAPI, req.Device.UserID, req.Response, from, to) if err != nil { diff --git a/syncapi/streams/streams.go b/syncapi/streams/streams.go index ba4118df5..6b02c75ea 100644 --- a/syncapi/streams/streams.go +++ b/syncapi/streams/streams.go @@ -18,7 +18,7 @@ type Streams struct { InviteStreamProvider types.StreamProvider SendToDeviceStreamProvider types.StreamProvider AccountDataStreamProvider types.StreamProvider - DeviceListStreamProvider types.PartitionedStreamProvider + DeviceListStreamProvider types.StreamProvider } func NewSyncStreamProviders( @@ -48,9 +48,9 @@ func NewSyncStreamProviders( userAPI: userAPI, }, DeviceListStreamProvider: &DeviceListStreamProvider{ - PartitionedStreamProvider: PartitionedStreamProvider{DB: d}, - rsAPI: rsAPI, - keyAPI: keyAPI, + StreamProvider: StreamProvider{DB: d}, + rsAPI: rsAPI, + keyAPI: keyAPI, }, } diff --git a/syncapi/streams/template_pstream.go b/syncapi/streams/template_pstream.go deleted file mode 100644 index 265e22a20..000000000 --- a/syncapi/streams/template_pstream.go +++ /dev/null @@ -1,38 +0,0 @@ -package streams - -import ( - "context" - "sync" - - "github.com/matrix-org/dendrite/syncapi/storage" - "github.com/matrix-org/dendrite/syncapi/types" -) - -type PartitionedStreamProvider struct { - DB storage.Database - latest types.LogPosition - latestMutex sync.RWMutex -} - -func (p *PartitionedStreamProvider) Setup() { -} - -func (p *PartitionedStreamProvider) Advance( - latest types.LogPosition, -) { - p.latestMutex.Lock() - defer p.latestMutex.Unlock() - - if latest.IsAfter(&p.latest) { - p.latest = latest - } -} - -func (p *PartitionedStreamProvider) LatestPosition( - ctx context.Context, -) types.LogPosition { - p.latestMutex.RLock() - defer p.latestMutex.RUnlock() - - return p.latest -} diff --git a/syncapi/sync/requestpool.go b/syncapi/sync/requestpool.go index a45736106..ca35951a0 100644 --- a/syncapi/sync/requestpool.go +++ b/syncapi/sync/requestpool.go @@ -140,6 +140,12 @@ func (rp *RequestPool) OnIncomingSyncRequest(req *http.Request, device *userapi. // Extract values from request syncReq, err := newSyncRequest(req, *device, rp.db) if err != nil { + if err == types.ErrMalformedSyncToken { + return util.JSONResponse{ + Code: http.StatusBadRequest, + JSON: jsonerror.InvalidArgumentValue(err.Error()), + } + } return util.JSONResponse{ Code: http.StatusBadRequest, JSON: jsonerror.Unknown(err.Error()), diff --git a/syncapi/syncapi.go b/syncapi/syncapi.go index 84c7140ca..39bc233ae 100644 --- a/syncapi/syncapi.go +++ b/syncapi/syncapi.go @@ -24,7 +24,7 @@ import ( keyapi "github.com/matrix-org/dendrite/keyserver/api" "github.com/matrix-org/dendrite/roomserver/api" "github.com/matrix-org/dendrite/setup/config" - "github.com/matrix-org/dendrite/setup/kafka" + "github.com/matrix-org/dendrite/setup/jetstream" "github.com/matrix-org/dendrite/setup/process" userapi "github.com/matrix-org/dendrite/userapi/api" "github.com/matrix-org/gomatrixserverlib" @@ -48,7 +48,7 @@ func AddPublicRoutes( federation *gomatrixserverlib.FederationClient, cfg *config.SyncAPI, ) { - consumer, _ := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka) + js, consumer, _ := jetstream.Prepare(&cfg.Matrix.JetStream) syncDB, err := storage.NewSyncServerDatasource(&cfg.Database) if err != nil { @@ -65,15 +65,16 @@ func AddPublicRoutes( requestPool := sync.NewRequestPool(syncDB, cfg, userAPI, keyAPI, rsAPI, streams, notifier) keyChangeConsumer := consumers.NewOutputKeyChangeEventConsumer( - process, cfg.Matrix.ServerName, string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)), - consumer, keyAPI, rsAPI, syncDB, notifier, streams.DeviceListStreamProvider, + process, cfg.Matrix.ServerName, cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent), + consumer, keyAPI, rsAPI, syncDB, notifier, + streams.DeviceListStreamProvider, ) if err = keyChangeConsumer.Start(); err != nil { logrus.WithError(err).Panicf("failed to start key change consumer") } roomConsumer := consumers.NewOutputRoomEventConsumer( - process, cfg, consumer, syncDB, notifier, streams.PDUStreamProvider, + process, cfg, js, syncDB, notifier, streams.PDUStreamProvider, streams.InviteStreamProvider, rsAPI, ) if err = roomConsumer.Start(); err != nil { @@ -81,28 +82,28 @@ func AddPublicRoutes( } clientConsumer := consumers.NewOutputClientDataConsumer( - process, cfg, consumer, syncDB, notifier, streams.AccountDataStreamProvider, + process, cfg, js, syncDB, notifier, streams.AccountDataStreamProvider, ) if err = clientConsumer.Start(); err != nil { logrus.WithError(err).Panicf("failed to start client data consumer") } typingConsumer := consumers.NewOutputTypingEventConsumer( - process, cfg, consumer, syncDB, eduCache, notifier, streams.TypingStreamProvider, + process, cfg, js, syncDB, eduCache, notifier, streams.TypingStreamProvider, ) if err = typingConsumer.Start(); err != nil { logrus.WithError(err).Panicf("failed to start typing consumer") } sendToDeviceConsumer := consumers.NewOutputSendToDeviceEventConsumer( - process, cfg, consumer, syncDB, notifier, streams.SendToDeviceStreamProvider, + process, cfg, js, syncDB, notifier, streams.SendToDeviceStreamProvider, ) if err = sendToDeviceConsumer.Start(); err != nil { logrus.WithError(err).Panicf("failed to start send-to-device consumer") } receiptConsumer := consumers.NewOutputReceiptEventConsumer( - process, cfg, consumer, syncDB, notifier, streams.ReceiptStreamProvider, + process, cfg, js, syncDB, notifier, streams.ReceiptStreamProvider, ) if err = receiptConsumer.Start(); err != nil { logrus.WithError(err).Panicf("failed to start receipts consumer") diff --git a/syncapi/types/provider.go b/syncapi/types/provider.go index 93ed12661..f6185fcb5 100644 --- a/syncapi/types/provider.go +++ b/syncapi/types/provider.go @@ -42,11 +42,3 @@ type StreamProvider interface { // LatestPosition returns the latest stream position for this stream. LatestPosition(ctx context.Context) StreamPosition } - -type PartitionedStreamProvider interface { - Setup() - Advance(latest LogPosition) - CompleteSync(ctx context.Context, req *SyncRequest) LogPosition - IncrementalSync(ctx context.Context, req *SyncRequest, from, to LogPosition) LogPosition - LatestPosition(ctx context.Context) LogPosition -} diff --git a/syncapi/types/types.go b/syncapi/types/types.go index 44e718b38..68c308d83 100644 --- a/syncapi/types/types.go +++ b/syncapi/types/types.go @@ -16,6 +16,7 @@ package types import ( "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -26,13 +27,10 @@ import ( ) var ( - // ErrInvalidSyncTokenType is returned when an attempt at creating a - // new instance of SyncToken with an invalid type (i.e. neither "s" - // nor "t"). - ErrInvalidSyncTokenType = fmt.Errorf("sync token has an unknown prefix (should be either s or t)") - // ErrInvalidSyncTokenLen is returned when the pagination token is an - // invalid length - ErrInvalidSyncTokenLen = fmt.Errorf("sync token has an invalid length") + // This error is returned when parsing sync tokens if the token is invalid. Callers can use this + // error to detect whether to 400 or 401 the client. It is recommended to 401 them to force a + // logout. + ErrMalformedSyncToken = errors.New("malformed sync token") ) type StateDelta struct { @@ -47,27 +45,6 @@ type StateDelta struct { // StreamPosition represents the offset in the sync stream a client is at. type StreamPosition int64 -// LogPosition represents the offset in a Kafka log a client is at. -type LogPosition struct { - Partition int32 - Offset int64 -} - -func (p *LogPosition) IsEmpty() bool { - return p.Offset == 0 -} - -// IsAfter returns true if this position is after `lp`. -func (p *LogPosition) IsAfter(lp *LogPosition) bool { - if lp == nil { - return false - } - if p.Partition != lp.Partition { - return false - } - return p.Offset > lp.Offset -} - // StreamEvent is the same as gomatrixserverlib.Event but also has the PDU stream position for this event. type StreamEvent struct { *gomatrixserverlib.HeaderedEvent @@ -124,7 +101,7 @@ type StreamingToken struct { SendToDevicePosition StreamPosition InvitePosition StreamPosition AccountDataPosition StreamPosition - DeviceListPosition LogPosition + DeviceListPosition StreamPosition } // This will be used as a fallback by json.Marshal. @@ -140,14 +117,11 @@ func (s *StreamingToken) UnmarshalText(text []byte) (err error) { func (t StreamingToken) String() string { posStr := fmt.Sprintf( - "s%d_%d_%d_%d_%d_%d", + "s%d_%d_%d_%d_%d_%d_%d", t.PDUPosition, t.TypingPosition, t.ReceiptPosition, t.SendToDevicePosition, - t.InvitePosition, t.AccountDataPosition, + t.InvitePosition, t.AccountDataPosition, t.DeviceListPosition, ) - if dl := t.DeviceListPosition; !dl.IsEmpty() { - posStr += fmt.Sprintf(".dl-%d-%d", dl.Partition, dl.Offset) - } return posStr } @@ -166,14 +140,14 @@ func (t *StreamingToken) IsAfter(other StreamingToken) bool { return true case t.AccountDataPosition > other.AccountDataPosition: return true - case t.DeviceListPosition.IsAfter(&other.DeviceListPosition): + case t.DeviceListPosition > other.DeviceListPosition: return true } return false } func (t *StreamingToken) IsEmpty() bool { - return t == nil || t.PDUPosition+t.TypingPosition+t.ReceiptPosition+t.SendToDevicePosition+t.InvitePosition+t.AccountDataPosition == 0 && t.DeviceListPosition.IsEmpty() + return t == nil || t.PDUPosition+t.TypingPosition+t.ReceiptPosition+t.SendToDevicePosition+t.InvitePosition+t.AccountDataPosition+t.DeviceListPosition == 0 } // WithUpdates returns a copy of the StreamingToken with updates applied from another StreamingToken. @@ -208,7 +182,7 @@ func (t *StreamingToken) ApplyUpdates(other StreamingToken) { if other.AccountDataPosition > t.AccountDataPosition { t.AccountDataPosition = other.AccountDataPosition } - if other.DeviceListPosition.IsAfter(&t.DeviceListPosition) { + if other.DeviceListPosition > t.DeviceListPosition { t.DeviceListPosition = other.DeviceListPosition } } @@ -292,16 +266,18 @@ func NewTopologyTokenFromString(tok string) (token TopologyToken, err error) { func NewStreamTokenFromString(tok string) (token StreamingToken, err error) { if len(tok) < 1 { - err = fmt.Errorf("empty stream token") + err = ErrMalformedSyncToken return } if tok[0] != SyncTokenTypeStream[0] { - err = fmt.Errorf("stream token must start with 's'") + err = ErrMalformedSyncToken return } - categories := strings.Split(tok[1:], ".") - parts := strings.Split(categories[0], "_") - var positions [6]StreamPosition + // Migration: Remove everything after and including '.' - we previously had tokens like: + // s478_0_0_0_0_13.dl-0-2 but we have now removed partitioned stream positions + tok = strings.Split(tok, ".")[0] + parts := strings.Split(tok[1:], "_") + var positions [7]StreamPosition for i, p := range parts { if i > len(positions) { break @@ -309,6 +285,7 @@ func NewStreamTokenFromString(tok string) (token StreamingToken, err error) { var pos int pos, err = strconv.Atoi(p) if err != nil { + err = ErrMalformedSyncToken return } positions[i] = StreamPosition(pos) @@ -320,31 +297,7 @@ func NewStreamTokenFromString(tok string) (token StreamingToken, err error) { SendToDevicePosition: positions[3], InvitePosition: positions[4], AccountDataPosition: positions[5], - } - // dl-0-1234 - // $log_name-$partition-$offset - for _, logStr := range categories[1:] { - segments := strings.Split(logStr, "-") - if len(segments) != 3 { - err = fmt.Errorf("invalid log position %q", logStr) - return - } - switch segments[0] { - case "dl": - // Device list syncing - var partition, offset int - if partition, err = strconv.Atoi(segments[1]); err != nil { - return - } - if offset, err = strconv.Atoi(segments[2]); err != nil { - return - } - token.DeviceListPosition.Partition = int32(partition) - token.DeviceListPosition.Offset = int64(offset) - default: - err = fmt.Errorf("unrecognised token type %q", segments[0]) - return - } + DeviceListPosition: positions[6], } return token, nil } diff --git a/syncapi/types/types_test.go b/syncapi/types/types_test.go index 3e5777888..cda178b37 100644 --- a/syncapi/types/types_test.go +++ b/syncapi/types/types_test.go @@ -2,50 +2,17 @@ package types import ( "encoding/json" - "reflect" "testing" "github.com/matrix-org/gomatrixserverlib" ) -func TestNewSyncTokenWithLogs(t *testing.T) { - tests := map[string]*StreamingToken{ - "s4_0_0_0_0_0": { - PDUPosition: 4, - }, - "s4_0_0_0_0_0.dl-0-123": { - PDUPosition: 4, - DeviceListPosition: LogPosition{ - Partition: 0, - Offset: 123, - }, - }, - } - for tok, want := range tests { - got, err := NewStreamTokenFromString(tok) - if err != nil { - if want == nil { - continue // error expected - } - t.Errorf("%s errored: %s", tok, err) - continue - } - if !reflect.DeepEqual(got, *want) { - t.Errorf("%s mismatch: got %v want %v", tok, got, want) - } - gotStr := got.String() - if gotStr != tok { - t.Errorf("%s reserialisation mismatch: got %s want %s", tok, gotStr, tok) - } - } -} - func TestSyncTokens(t *testing.T) { shouldPass := map[string]string{ - "s4_0_0_0_0_0": StreamingToken{4, 0, 0, 0, 0, 0, LogPosition{}}.String(), - "s3_1_0_0_0_0.dl-1-2": StreamingToken{3, 1, 0, 0, 0, 0, LogPosition{1, 2}}.String(), - "s3_1_2_3_5_0": StreamingToken{3, 1, 2, 3, 5, 0, LogPosition{}}.String(), - "t3_1": TopologyToken{3, 1}.String(), + "s4_0_0_0_0_0_0": StreamingToken{4, 0, 0, 0, 0, 0, 0}.String(), + "s3_1_0_0_0_0_2": StreamingToken{3, 1, 0, 0, 0, 0, 2}.String(), + "s3_1_2_3_5_0_0": StreamingToken{3, 1, 2, 3, 5, 0, 0}.String(), + "t3_1": TopologyToken{3, 1}.String(), } for a, b := range shouldPass { diff --git a/sytest-blacklist b/sytest-blacklist index 5e562845e..3e08f0cb4 100644 --- a/sytest-blacklist +++ b/sytest-blacklist @@ -25,7 +25,6 @@ Local device key changes get to remote servers with correct prev_id # Flakey Local device key changes appear in /keys/changes Device list doesn't change if remote server is down -If a device list update goes missing, the server resyncs on the next one # we don't support groups Remove group category @@ -33,4 +32,3 @@ Remove group role # See https://github.com/matrix-org/sytest/pull/1142 Device list doesn't change if remote server is down -If a device list update goes missing, the server resyncs on the next one diff --git a/sytest-whitelist b/sytest-whitelist index 558eb29a6..7d26c610e 100644 --- a/sytest-whitelist +++ b/sytest-whitelist @@ -588,3 +588,5 @@ User can invite remote user to room with version 9 Remote user can backfill in a room with version 9 Can reject invites over federation for rooms with version 9 Can receive redactions from regular users over federation in room version 9 +Forward extremities remain so even after the next events are populated as outliers +If a device list update goes missing, the server resyncs on the next one diff --git a/userapi/storage/accounts/interface.go b/userapi/storage/accounts/interface.go index 7af2f15f3..f03b3774c 100644 --- a/userapi/storage/accounts/interface.go +++ b/userapi/storage/accounts/interface.go @@ -20,12 +20,10 @@ import ( "errors" "github.com/matrix-org/dendrite/clientapi/auth/authtypes" - "github.com/matrix-org/dendrite/internal" "github.com/matrix-org/dendrite/userapi/api" ) type Database interface { - internal.PartitionStorer GetAccountByPassword(ctx context.Context, localpart, plaintextPassword string) (*api.Account, error) GetProfileByLocalpart(ctx context.Context, localpart string) (*authtypes.Profile, error) SetPassword(ctx context.Context, localpart string, plaintextPassword string) error