mirror of
https://github.com/matrix-org/dendrite.git
synced 2025-12-26 08:13:09 -06:00
Merge remote-tracking branch 'origin/master' into hs/as-transaction-send-if-in-room
This commit is contained in:
commit
768bd9322f
71
.github/workflows/docker-hub.yml
vendored
Normal file
71
.github/workflows/docker-hub.yml
vendored
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
# Based on https://github.com/docker/build-push-action
|
||||
|
||||
name: "Docker Hub"
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
env:
|
||||
DOCKER_NAMESPACE: matrixdotorg
|
||||
DOCKER_HUB_USER: dendritegithub
|
||||
PLATFORMS: linux/amd64,linux/arm64,linux/arm/v7
|
||||
|
||||
jobs:
|
||||
Monolith:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Get release tag
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ env.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: Build monolith image
|
||||
id: docker_build_monolith
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/docker/Dockerfile.monolith
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-monolith:latest
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-monolith:${{ env.RELEASE_VERSION }}
|
||||
|
||||
Polylith:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Get release tag
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ env.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: Build polylith image
|
||||
id: docker_build_polylith
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/docker/Dockerfile.polylith
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-polylith:latest
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-polylith:${{ env.RELEASE_VERSION }}
|
||||
82
CHANGES.md
82
CHANGES.md
|
|
@ -1,5 +1,87 @@
|
|||
# Changelog
|
||||
|
||||
## Dendrite 0.3.10 (2021-02-17)
|
||||
|
||||
### Features
|
||||
|
||||
* In-memory caches will now gradually evict old entries, reducing idle memory usage
|
||||
* Federation sender queues will now be fully unloaded when idle, reducing idle memory usage
|
||||
* The `power_level_content_override` option is now supported in `/createRoom`
|
||||
* The `/send` endpoint will now attempt more servers in the room when trying to fetch missing events or state
|
||||
|
||||
### Fixes
|
||||
|
||||
* A panic in the membership updater has been fixed
|
||||
* Events in the sync API that weren't excluded from sync can no longer be incorrectly excluded from sync by backfill
|
||||
* Retrieving remote media now correcly respects the locally configured maximum file size, even when the `Content-Length` header is unavailable
|
||||
* The `/send` endpoint will no longer hit the database more than once to find servers in the room
|
||||
|
||||
## Dendrite 0.3.9 (2021-02-04)
|
||||
|
||||
### Features
|
||||
|
||||
* Performance of initial/complete syncs has been improved dramatically
|
||||
* State events that can't be authed are now dropped when joining a room rather than unexpectedly causing the room join to fail
|
||||
* State events that already appear in the timeline will no longer be requested from the sync API database more than once, which may reduce memory usage in some cases
|
||||
|
||||
### Fixes
|
||||
|
||||
* A crash at startup due to a conflict in the sync API account data has been fixed
|
||||
* A crash at startup due to mismatched event IDs in the federation sender has been fixed
|
||||
* A redundant check which may cause the roomserver memberships table to get out of sync has been removed
|
||||
|
||||
## Dendrite 0.3.8 (2021-01-28)
|
||||
|
||||
### Fixes
|
||||
|
||||
* A well-known lookup regression in version 0.3.7 has been fixed
|
||||
|
||||
## Dendrite 0.3.7 (2021-01-26)
|
||||
|
||||
### Features
|
||||
|
||||
* Sync filtering support (for event types, senders and limits)
|
||||
* In-process DNS caching support for deployments where a local DNS caching resolver is not available (disabled by default)
|
||||
* Experimental support for MSC2444 (Peeking over Federation) has been merged
|
||||
* Experimental federation support for MSC2946 (Spaces Summary) has been merged
|
||||
|
||||
### Fixes
|
||||
|
||||
* Dendrite will no longer load a given event more than once for state resolution, which may help to reduce memory usage and database I/O slightly in some cases
|
||||
* Large well-known responses will no longer use significant amounts of memory
|
||||
|
||||
## Dendrite 0.3.6 (2021-01-18)
|
||||
|
||||
### Features
|
||||
|
||||
* Experimental support for MSC2946 (Spaces Summary) has been merged
|
||||
* Send-to-device messages have been refactored and now take advantage of having their own stream position, making delivery more reliable
|
||||
* Unstable features and MSCs are now listed in `/versions` (contributed by [sumitks866](https://github.com/sumitks866))
|
||||
* Well-known and DNS SRV record results for federated servers are now cached properly, improving outbound federation performance and reducing traffic
|
||||
|
||||
### Fixes
|
||||
|
||||
* Updating forward extremities will no longer result in so many unnecessary state snapshots, reducing on-going disk usage in the roomserver database
|
||||
* Pagination tokens for `/messages` have been fixed, which should improve the reliability of scrollback/pagination
|
||||
* Dendrite now avoids returning `null`s in fields of the `/sync` response, and omitting some fields altogether when not needed, which should fix sync issues with Element Android
|
||||
* Requests for user device lists now time out quicker, which prevents federated `/send` requests from also timing out in many cases
|
||||
* Empty push rules are no longer sent over and over again in `/sync`
|
||||
* An integer overflow in the device list updater which could result in panics on 32-bit platforms has been fixed (contributed by [Lesterpig](https://github.com/Lesterpig))
|
||||
* Event IDs are now logged properly in federation sender and sync API consumer errors
|
||||
|
||||
## Dendrite 0.3.5 (2021-01-11)
|
||||
|
||||
### Features
|
||||
|
||||
* All `/sync` streams are now logically separate after a refactoring exercise
|
||||
|
||||
### Fixes
|
||||
|
||||
* Event references are now deeply checked properly when calculating forward extremities, reducing the amount of forward extremities in most cases, which improves RAM utilisation and reduces the work done by state resolution
|
||||
* Sync no longer sends incorrect `next_batch` tokens with old stream positions, reducing flashbacks of old messages in clients
|
||||
* The federation `/send` endpoint no longer uses the request context, which could result in some events failing to be persisted if the sending server gave up the HTTP connection
|
||||
* Appservices can now auth as users in their namespaces properly
|
||||
|
||||
## Dendrite 0.3.4 (2020-12-18)
|
||||
|
||||
### Features
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ func NewInternalAPI(
|
|||
// We can't add ASes at runtime so this is safe to do.
|
||||
if len(workerStates) > 0 {
|
||||
consumer := consumers.NewOutputRoomEventConsumer(
|
||||
base.Cfg, consumer, appserviceDB,
|
||||
base.ProcessContext, base.Cfg, consumer, appserviceDB,
|
||||
rsAPI, workerStates,
|
||||
)
|
||||
if err := consumer.Start(); err != nil {
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
|
|
@ -41,6 +42,7 @@ type OutputRoomEventConsumer struct {
|
|||
// NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call
|
||||
// Start() to begin consuming from room servers.
|
||||
func NewOutputRoomEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.Dendrite,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
appserviceDB storage.Database,
|
||||
|
|
@ -48,6 +50,7 @@ func NewOutputRoomEventConsumer(
|
|||
workerStates []types.ApplicationServiceWorkerState,
|
||||
) *OutputRoomEventConsumer {
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "appservice/roomserver",
|
||||
Topic: cfg.Global.Kafka.TopicFor(config.TopicOutputRoomEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
|
|
|
|||
6
build.sh
6
build.sh
|
|
@ -17,6 +17,8 @@ else
|
|||
export FLAGS=""
|
||||
fi
|
||||
|
||||
go install -trimpath -ldflags "$FLAGS" -v $PWD/`dirname $0`/cmd/...
|
||||
mkdir -p bin
|
||||
|
||||
GOOS=js GOARCH=wasm go build -trimpath -ldflags "$FLAGS" -o bin/main.wasm ./cmd/dendritejs
|
||||
CGO_ENABLED=1 go build -trimpath -ldflags "$FLAGS" -v -o "bin/" ./cmd/...
|
||||
|
||||
CGO_ENABLED=0 GOOS=js GOARCH=wasm go build -trimpath -ldflags "$FLAGS" -o bin/main.wasm ./cmd/dendritejs
|
||||
|
|
|
|||
|
|
@ -23,13 +23,13 @@ RUN apt-get update && apt-get -y install python
|
|||
WORKDIR /build
|
||||
ADD https://github.com/matrix-org/go-http-js-libp2p/archive/master.tar.gz /build/libp2p.tar.gz
|
||||
RUN tar xvfz libp2p.tar.gz
|
||||
ADD https://github.com/vector-im/riot-web/archive/matthew/p2p.tar.gz /build/p2p.tar.gz
|
||||
ADD https://github.com/vector-im/element-web/archive/matthew/p2p.tar.gz /build/p2p.tar.gz
|
||||
RUN tar xvfz p2p.tar.gz
|
||||
|
||||
# Install deps for riot-web, symlink in libp2p repo and build that too
|
||||
WORKDIR /build/riot-web-matthew-p2p
|
||||
# Install deps for element-web, symlink in libp2p repo and build that too
|
||||
WORKDIR /build/element-web-matthew-p2p
|
||||
RUN yarn install
|
||||
RUN ln -s /build/go-http-js-libp2p-master /build/riot-web-matthew-p2p/node_modules/go-http-js-libp2p
|
||||
RUN ln -s /build/go-http-js-libp2p-master /build/element-web-matthew-p2p/node_modules/go-http-js-libp2p
|
||||
RUN (cd node_modules/go-http-js-libp2p && yarn install)
|
||||
COPY --from=gobuild /build/dendrite-master/main.wasm ./src/vector/dendrite.wasm
|
||||
# build it all
|
||||
|
|
@ -108,4 +108,4 @@ server { \n\
|
|||
} \n\
|
||||
}' > /etc/nginx/conf.d/default.conf
|
||||
RUN sed -i 's/}/ application\/wasm wasm;\n}/g' /etc/nginx/mime.types
|
||||
COPY --from=jsbuild /build/riot-web-matthew-p2p/webapp /usr/share/nginx/html
|
||||
COPY --from=jsbuild /build/element-web-matthew-p2p/webapp /usr/share/nginx/html
|
||||
|
|
|
|||
|
|
@ -1,10 +0,0 @@
|
|||
FROM docker.io/golang:1.15-alpine AS builder
|
||||
|
||||
RUN apk --update --no-cache add bash build-base
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
COPY . /build
|
||||
|
||||
RUN mkdir -p bin
|
||||
RUN sh ./build.sh
|
||||
|
|
@ -1,11 +1,20 @@
|
|||
FROM matrixdotorg/dendrite:latest AS base
|
||||
FROM docker.io/golang:1.15-alpine AS base
|
||||
|
||||
RUN apk --update --no-cache add bash build-base
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
COPY . /build
|
||||
|
||||
RUN mkdir -p bin
|
||||
RUN go build -trimpath -o bin/ ./cmd/dendrite-monolith-server
|
||||
RUN go build -trimpath -o bin/ ./cmd/goose
|
||||
RUN go build -trimpath -o bin/ ./cmd/create-account
|
||||
RUN go build -trimpath -o bin/ ./cmd/generate-keys
|
||||
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=base /build/bin/dendrite-monolith-server /usr/bin
|
||||
COPY --from=base /build/bin/goose /usr/bin
|
||||
COPY --from=base /build/bin/create-account /usr/bin
|
||||
COPY --from=base /build/bin/generate-keys /usr/bin
|
||||
COPY --from=base /build/bin/* /usr/bin
|
||||
|
||||
VOLUME /etc/dendrite
|
||||
WORKDIR /etc/dendrite
|
||||
|
|
|
|||
|
|
@ -1,11 +1,20 @@
|
|||
FROM matrixdotorg/dendrite:latest AS base
|
||||
FROM docker.io/golang:1.15-alpine AS base
|
||||
|
||||
RUN apk --update --no-cache add bash build-base
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
COPY . /build
|
||||
|
||||
RUN mkdir -p bin
|
||||
RUN go build -trimpath -o bin/ ./cmd/dendrite-polylith-multi
|
||||
RUN go build -trimpath -o bin/ ./cmd/goose
|
||||
RUN go build -trimpath -o bin/ ./cmd/create-account
|
||||
RUN go build -trimpath -o bin/ ./cmd/generate-keys
|
||||
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=base /build/bin/dendrite-polylith-multi /usr/bin
|
||||
COPY --from=base /build/bin/goose /usr/bin
|
||||
COPY --from=base /build/bin/create-account /usr/bin
|
||||
COPY --from=base /build/bin/generate-keys /usr/bin
|
||||
COPY --from=base /build/bin/* /usr/bin
|
||||
|
||||
VOLUME /etc/dendrite
|
||||
WORKDIR /etc/dendrite
|
||||
|
|
|
|||
|
|
@ -4,8 +4,8 @@ These are Docker images for Dendrite!
|
|||
|
||||
They can be found on Docker Hub:
|
||||
|
||||
- [matrixdotorg/dendrite-monolith](https://hub.docker.com/repository/docker/matrixdotorg/dendrite-monolith) for monolith deployments
|
||||
- [matrixdotorg/dendrite-polylith](https://hub.docker.com/repository/docker/matrixdotorg/dendrite-polylith) for polylith deployments
|
||||
- [matrixdotorg/dendrite-monolith](https://hub.docker.com/r/matrixdotorg/dendrite-monolith) for monolith deployments
|
||||
- [matrixdotorg/dendrite-polylith](https://hub.docker.com/r/matrixdotorg/dendrite-polylith) for polylith deployments
|
||||
|
||||
## Dockerfiles
|
||||
|
||||
|
|
|
|||
|
|
@ -91,6 +91,17 @@ global:
|
|||
username: metrics
|
||||
password: metrics
|
||||
|
||||
# DNS cache options. The DNS cache may reduce the load on DNS servers
|
||||
# if there is no local caching resolver available for use.
|
||||
dns_cache:
|
||||
# Whether or not the DNS cache is enabled.
|
||||
enabled: false
|
||||
|
||||
# Maximum number of entries to hold in the DNS cache, and
|
||||
# for how long those items should be considered valid in seconds.
|
||||
cache_size: 256
|
||||
cache_lifetime: 300
|
||||
|
||||
# Configuration for the Appservice API.
|
||||
app_service_api:
|
||||
internal_api:
|
||||
|
|
@ -298,12 +309,12 @@ user_api:
|
|||
listen: http://0.0.0.0:7781
|
||||
connect: http://user_api:7781
|
||||
account_database:
|
||||
connection_string: postgresql://dendrite:itsasecret@postgres/dendrite_account?sslmode=disable
|
||||
connection_string: postgresql://dendrite:itsasecret@postgres/dendrite_userapi_accounts?sslmode=disable
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
device_database:
|
||||
connection_string: postgresql://dendrite:itsasecret@postgres/dendrite_device?sslmode=disable
|
||||
connection_string: postgresql://dendrite:itsasecret@postgres/dendrite_userapi_devices?sslmode=disable
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ services:
|
|||
volumes:
|
||||
- ./config:/etc/dendrite
|
||||
networks:
|
||||
- internal
|
||||
- internal
|
||||
|
||||
signing_key_server:
|
||||
hostname: signing_key_server
|
||||
|
|
@ -86,9 +86,9 @@ services:
|
|||
image: matrixdotorg/dendrite-polylith:latest
|
||||
command: userapi
|
||||
volumes:
|
||||
- ./config:/etc/dendrite
|
||||
- ./config:/etc/dendrite
|
||||
networks:
|
||||
- internal
|
||||
- internal
|
||||
|
||||
appservice_api:
|
||||
hostname: appservice_api
|
||||
|
|
|
|||
|
|
@ -6,7 +6,5 @@ TAG=${1:-latest}
|
|||
|
||||
echo "Building tag '${TAG}'"
|
||||
|
||||
docker build -f build/docker/Dockerfile -t matrixdotorg/dendrite:${TAG} .
|
||||
|
||||
docker build -t matrixdotorg/dendrite-monolith:${TAG} -f build/docker/Dockerfile.monolith .
|
||||
docker build -t matrixdotorg/dendrite-polylith:${TAG} -f build/docker/Dockerfile.polylith .
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
#!/bin/sh
|
||||
|
||||
for db in account device mediaapi syncapi roomserver signingkeyserver keyserver federationsender appservice naffka; do
|
||||
for db in userapi_accounts userapi_devices mediaapi syncapi roomserver signingkeyserver keyserver federationsender appservice naffka; do
|
||||
createdb -U dendrite -O dendrite dendrite_$db
|
||||
done
|
||||
|
|
|
|||
|
|
@ -166,6 +166,7 @@ func (m *DendriteMonolith) Start() {
|
|||
),
|
||||
}
|
||||
monolith.AddAllPublicRoutes(
|
||||
base.ProcessContext,
|
||||
base.PublicClientAPIMux,
|
||||
base.PublicFederationAPIMux,
|
||||
base.PublicKeyAPIMux,
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ func AddPublicRoutes(
|
|||
userAPI userapi.UserInternalAPI,
|
||||
keyAPI keyserverAPI.KeyInternalAPI,
|
||||
extRoomsProvider api.ExtraPublicRoomsProvider,
|
||||
mscCfg *config.MSCs,
|
||||
) {
|
||||
_, producer := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
|
||||
|
||||
|
|
@ -57,6 +58,6 @@ func AddPublicRoutes(
|
|||
routing.Setup(
|
||||
router, cfg, eduInputAPI, rsAPI, asAPI,
|
||||
accountsDB, userAPI, federation,
|
||||
syncProducer, transactionsCache, fsAPI, keyAPI, extRoomsProvider,
|
||||
syncProducer, transactionsCache, fsAPI, keyAPI, extRoomsProvider, mscCfg,
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,16 +38,17 @@ import (
|
|||
|
||||
// https://matrix.org/docs/spec/client_server/r0.2.0.html#post-matrix-client-r0-createroom
|
||||
type createRoomRequest struct {
|
||||
Invite []string `json:"invite"`
|
||||
Name string `json:"name"`
|
||||
Visibility string `json:"visibility"`
|
||||
Topic string `json:"topic"`
|
||||
Preset string `json:"preset"`
|
||||
CreationContent map[string]interface{} `json:"creation_content"`
|
||||
InitialState []fledglingEvent `json:"initial_state"`
|
||||
RoomAliasName string `json:"room_alias_name"`
|
||||
GuestCanJoin bool `json:"guest_can_join"`
|
||||
RoomVersion gomatrixserverlib.RoomVersion `json:"room_version"`
|
||||
Invite []string `json:"invite"`
|
||||
Name string `json:"name"`
|
||||
Visibility string `json:"visibility"`
|
||||
Topic string `json:"topic"`
|
||||
Preset string `json:"preset"`
|
||||
CreationContent map[string]interface{} `json:"creation_content"`
|
||||
InitialState []fledglingEvent `json:"initial_state"`
|
||||
RoomAliasName string `json:"room_alias_name"`
|
||||
GuestCanJoin bool `json:"guest_can_join"`
|
||||
RoomVersion gomatrixserverlib.RoomVersion `json:"room_version"`
|
||||
PowerLevelContentOverride json.RawMessage `json:"power_level_content_override"`
|
||||
}
|
||||
|
||||
const (
|
||||
|
|
@ -257,6 +258,18 @@ func createRoom(
|
|||
|
||||
var builtEvents []*gomatrixserverlib.HeaderedEvent
|
||||
|
||||
powerLevelContent := eventutil.InitialPowerLevelsContent(userID)
|
||||
if r.PowerLevelContentOverride != nil {
|
||||
// Merge powerLevelContentOverride fields by unmarshalling it atop the defaults
|
||||
err = json.Unmarshal(r.PowerLevelContentOverride, &powerLevelContent)
|
||||
if err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.BadJSON("malformed power_level_content_override"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// send events into the room in order of:
|
||||
// 1- m.room.create
|
||||
// 2- room creator join member
|
||||
|
|
@ -278,7 +291,7 @@ func createRoom(
|
|||
eventsToMake := []fledglingEvent{
|
||||
{"m.room.create", "", r.CreationContent},
|
||||
{"m.room.member", userID, membershipContent},
|
||||
{"m.room.power_levels", "", eventutil.InitialPowerLevelsContent(userID)},
|
||||
{"m.room.power_levels", "", powerLevelContent},
|
||||
{"m.room.join_rules", "", gomatrixserverlib.JoinRuleContent{JoinRule: joinRules}},
|
||||
{"m.room.history_visibility", "", eventutil.HistoryVisibilityContent{HistoryVisibility: historyVisibility}},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -58,17 +58,24 @@ func Setup(
|
|||
federationSender federationSenderAPI.FederationSenderInternalAPI,
|
||||
keyAPI keyserverAPI.KeyInternalAPI,
|
||||
extRoomsProvider api.ExtraPublicRoomsProvider,
|
||||
mscCfg *config.MSCs,
|
||||
) {
|
||||
rateLimits := newRateLimits(&cfg.RateLimiting)
|
||||
userInteractiveAuth := auth.NewUserInteractive(accountDB.GetAccountByPassword, cfg)
|
||||
|
||||
unstableFeatures := make(map[string]bool)
|
||||
for _, msc := range cfg.MSCs.MSCs {
|
||||
unstableFeatures["org.matrix."+msc] = true
|
||||
}
|
||||
|
||||
publicAPIMux.Handle("/versions",
|
||||
httputil.MakeExternalAPI("versions", func(req *http.Request) util.JSONResponse {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: struct {
|
||||
Versions []string `json:"versions"`
|
||||
}{[]string{
|
||||
Versions []string `json:"versions"`
|
||||
UnstableFeatures map[string]bool `json:"unstable_features"`
|
||||
}{Versions: []string{
|
||||
"r0.0.1",
|
||||
"r0.1.0",
|
||||
"r0.2.0",
|
||||
|
|
@ -76,7 +83,7 @@ func Setup(
|
|||
"r0.4.0",
|
||||
"r0.5.0",
|
||||
"r0.6.1",
|
||||
}},
|
||||
}, UnstableFeatures: unstableFeatures},
|
||||
}
|
||||
}),
|
||||
).Methods(http.MethodGet, http.MethodOptions)
|
||||
|
|
@ -104,20 +111,23 @@ func Setup(
|
|||
)
|
||||
}),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
r0mux.Handle("/peek/{roomIDOrAlias}",
|
||||
httputil.MakeAuthAPI(gomatrixserverlib.Peek, userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.rateLimit(req); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return PeekRoomByIDOrAlias(
|
||||
req, device, rsAPI, accountDB, vars["roomIDOrAlias"],
|
||||
)
|
||||
}),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
if mscCfg.Enabled("msc2753") {
|
||||
r0mux.Handle("/peek/{roomIDOrAlias}",
|
||||
httputil.MakeAuthAPI(gomatrixserverlib.Peek, userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.rateLimit(req); r != nil {
|
||||
return *r
|
||||
}
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return PeekRoomByIDOrAlias(
|
||||
req, device, rsAPI, accountDB, vars["roomIDOrAlias"],
|
||||
)
|
||||
}),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
}
|
||||
r0mux.Handle("/joined_rooms",
|
||||
httputil.MakeAuthAPI("joined_rooms", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
return GetJoinedRooms(req, device, rsAPI)
|
||||
|
|
|
|||
|
|
@ -76,9 +76,10 @@ func createFederationClient(
|
|||
"matrix",
|
||||
p2phttp.NewTransport(base.LibP2P, p2phttp.ProtocolOption("/matrix")),
|
||||
)
|
||||
return gomatrixserverlib.NewFederationClientWithTransport(
|
||||
return gomatrixserverlib.NewFederationClient(
|
||||
base.Base.Cfg.Global.ServerName, base.Base.Cfg.Global.KeyID,
|
||||
base.Base.Cfg.Global.PrivateKey, true, tr,
|
||||
base.Base.Cfg.Global.PrivateKey,
|
||||
gomatrixserverlib.WithTransport(tr),
|
||||
)
|
||||
}
|
||||
|
||||
|
|
@ -90,7 +91,9 @@ func createClient(
|
|||
"matrix",
|
||||
p2phttp.NewTransport(base.LibP2P, p2phttp.ProtocolOption("/matrix")),
|
||||
)
|
||||
return gomatrixserverlib.NewClientWithTransport(tr)
|
||||
return gomatrixserverlib.NewClient(
|
||||
gomatrixserverlib.WithTransport(tr),
|
||||
)
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
|
@ -189,6 +192,7 @@ func main() {
|
|||
ExtPublicRoomsProvider: provider,
|
||||
}
|
||||
monolith.AddAllPublicRoutes(
|
||||
base.Base.ProcessContext,
|
||||
base.Base.PublicClientAPIMux,
|
||||
base.Base.PublicFederationAPIMux,
|
||||
base.Base.PublicKeyAPIMux,
|
||||
|
|
@ -231,5 +235,5 @@ func main() {
|
|||
}
|
||||
|
||||
// We want to block forever to let the HTTP and HTTPS handler serve the APIs
|
||||
select {}
|
||||
base.Base.WaitForShutdown()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -150,6 +150,7 @@ func main() {
|
|||
),
|
||||
}
|
||||
monolith.AddAllPublicRoutes(
|
||||
base.ProcessContext,
|
||||
base.PublicClientAPIMux,
|
||||
base.PublicFederationAPIMux,
|
||||
base.PublicKeyAPIMux,
|
||||
|
|
@ -200,5 +201,6 @@ func main() {
|
|||
}
|
||||
}()
|
||||
|
||||
select {}
|
||||
// We want to block forever to let the HTTP and HTTPS handler serve the APIs
|
||||
base.WaitForShutdown()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,7 +33,9 @@ func (n *Node) CreateClient(
|
|||
},
|
||||
},
|
||||
)
|
||||
return gomatrixserverlib.NewClientWithTransport(tr)
|
||||
return gomatrixserverlib.NewClient(
|
||||
gomatrixserverlib.WithTransport(tr),
|
||||
)
|
||||
}
|
||||
|
||||
func (n *Node) CreateFederationClient(
|
||||
|
|
@ -53,8 +55,9 @@ func (n *Node) CreateFederationClient(
|
|||
},
|
||||
},
|
||||
)
|
||||
return gomatrixserverlib.NewFederationClientWithTransport(
|
||||
return gomatrixserverlib.NewFederationClient(
|
||||
base.Cfg.Global.ServerName, base.Cfg.Global.KeyID,
|
||||
base.Cfg.Global.PrivateKey, true, tr,
|
||||
base.Cfg.Global.PrivateKey,
|
||||
gomatrixserverlib.WithTransport(tr),
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -144,6 +144,7 @@ func main() {
|
|||
KeyAPI: keyAPI,
|
||||
}
|
||||
monolith.AddAllPublicRoutes(
|
||||
base.ProcessContext,
|
||||
base.PublicClientAPIMux,
|
||||
base.PublicFederationAPIMux,
|
||||
base.PublicKeyAPIMux,
|
||||
|
|
@ -176,5 +177,5 @@ func main() {
|
|||
}
|
||||
|
||||
// We want to block forever to let the HTTP and HTTPS handler serve the APIs
|
||||
select {}
|
||||
base.WaitForShutdown()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -74,5 +74,6 @@ func main() {
|
|||
base := setup.NewBaseDendrite(cfg, component, false) // TODO
|
||||
defer base.Close() // nolint: errcheck
|
||||
|
||||
start(base, cfg)
|
||||
go start(base, cfg)
|
||||
base.WaitForShutdown()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ func ClientAPI(base *setup.BaseDendrite, cfg *config.Dendrite) {
|
|||
clientapi.AddPublicRoutes(
|
||||
base.PublicClientAPIMux, &base.Cfg.ClientAPI, accountDB, federation,
|
||||
rsAPI, eduInputAPI, asQuery, transactions.New(), fsAPI, userAPI, keyAPI, nil,
|
||||
&cfg.MSCs,
|
||||
)
|
||||
|
||||
base.SetupAndServeHTTP(
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ func FederationAPI(base *setup.BaseDendrite, cfg *config.Dendrite) {
|
|||
base.PublicFederationAPIMux, base.PublicKeyAPIMux,
|
||||
&base.Cfg.FederationAPI, userAPI, federation, keyRing,
|
||||
rsAPI, fsAPI, base.EDUServerClient(), keyAPI,
|
||||
&base.Cfg.MSCs,
|
||||
)
|
||||
|
||||
base.SetupAndServeHTTP(
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ func SyncAPI(base *setup.BaseDendrite, cfg *config.Dendrite) {
|
|||
rsAPI := base.RoomserverHTTPClient()
|
||||
|
||||
syncapi.AddPublicRoutes(
|
||||
base.ProcessContext,
|
||||
base.PublicClientAPIMux, userAPI, rsAPI,
|
||||
base.KeyServerHTTPClient(),
|
||||
federation, &cfg.SyncAPI,
|
||||
|
|
|
|||
|
|
@ -139,16 +139,18 @@ func createFederationClient(cfg *config.Dendrite, node *go_http_js_libp2p.P2pLoc
|
|||
tr := go_http_js_libp2p.NewP2pTransport(node)
|
||||
|
||||
fed := gomatrixserverlib.NewFederationClient(
|
||||
cfg.Global.ServerName, cfg.Global.KeyID, cfg.Global.PrivateKey, true,
|
||||
cfg.Global.ServerName, cfg.Global.KeyID, cfg.Global.PrivateKey,
|
||||
gomatrixserverlib.WithTransport(tr),
|
||||
)
|
||||
fed.Client = *gomatrixserverlib.NewClientWithTransport(tr)
|
||||
|
||||
return fed
|
||||
}
|
||||
|
||||
func createClient(node *go_http_js_libp2p.P2pLocalNode) *gomatrixserverlib.Client {
|
||||
tr := go_http_js_libp2p.NewP2pTransport(node)
|
||||
return gomatrixserverlib.NewClientWithTransport(tr)
|
||||
return gomatrixserverlib.NewClient(
|
||||
gomatrixserverlib.WithTransport(tr),
|
||||
)
|
||||
}
|
||||
|
||||
func createP2PNode(privKey ed25519.PrivateKey) (serverName string, node *go_http_js_libp2p.P2pLocalNode) {
|
||||
|
|
@ -229,6 +231,7 @@ func main() {
|
|||
ExtPublicRoomsProvider: p2pPublicRoomProvider,
|
||||
}
|
||||
monolith.AddAllPublicRoutes(
|
||||
base.ProcessContext,
|
||||
base.PublicClientAPIMux,
|
||||
base.PublicFederationAPIMux,
|
||||
base.PublicKeyAPIMux,
|
||||
|
|
|
|||
|
|
@ -54,7 +54,6 @@ func main() {
|
|||
gomatrixserverlib.ServerName(*requestFrom),
|
||||
gomatrixserverlib.KeyID(keyBlock.Headers["Key-ID"]),
|
||||
privateKey,
|
||||
false,
|
||||
)
|
||||
|
||||
u, err := url.Parse(flag.Arg(0))
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ func main() {
|
|||
if *defaultsForCI {
|
||||
cfg.ClientAPI.RateLimiting.Enabled = false
|
||||
cfg.FederationSender.DisableTLSValidation = true
|
||||
cfg.MSCs.MSCs = []string{"msc2836"}
|
||||
cfg.MSCs.MSCs = []string{"msc2836", "msc2946", "msc2444", "msc2753"}
|
||||
cfg.Logging[0].Level = "trace"
|
||||
// don't hit matrix.org when running tests!!!
|
||||
cfg.SigningKeyServer.KeyPerspectives = config.KeyPerspectives{}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ import (
|
|||
"strconv"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/roomserver/state"
|
||||
"github.com/matrix-org/dendrite/roomserver/storage"
|
||||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
"github.com/matrix-org/dendrite/setup"
|
||||
|
|
@ -105,7 +104,7 @@ func main() {
|
|||
}
|
||||
|
||||
fmt.Println("Resolving state")
|
||||
resolved, err := state.ResolveConflictsAdhoc(
|
||||
resolved, err := gomatrixserverlib.ResolveConflicts(
|
||||
gomatrixserverlib.RoomVersion(*roomVersion),
|
||||
events,
|
||||
authEvents,
|
||||
|
|
|
|||
|
|
@ -103,6 +103,17 @@ global:
|
|||
username: metrics
|
||||
password: metrics
|
||||
|
||||
# DNS cache options. The DNS cache may reduce the load on DNS servers
|
||||
# if there is no local caching resolver available for use.
|
||||
dns_cache:
|
||||
# Whether or not the DNS cache is enabled.
|
||||
enabled: false
|
||||
|
||||
# Maximum number of entries to hold in the DNS cache, and
|
||||
# for how long those items should be considered valid in seconds.
|
||||
cache_size: 256
|
||||
cache_lifetime: 300
|
||||
|
||||
# Configuration for the Appservice API.
|
||||
app_service_api:
|
||||
internal_api:
|
||||
|
|
@ -253,6 +264,19 @@ media_api:
|
|||
height: 480
|
||||
method: scale
|
||||
|
||||
# Configuration for experimental MSC's
|
||||
mscs:
|
||||
# A list of enabled MSC's
|
||||
# Currently valid values are:
|
||||
# - msc2836 (Threading, see https://github.com/matrix-org/matrix-doc/pull/2836)
|
||||
# - msc2946 (Spaces Summary, see https://github.com/matrix-org/matrix-doc/pull/2946)
|
||||
mscs: []
|
||||
database:
|
||||
connection_string: file:mscs.db
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
||||
# Configuration for the Room Server.
|
||||
room_server:
|
||||
internal_api:
|
||||
|
|
|
|||
|
|
@ -109,7 +109,7 @@ On macOS, omit `sudo -u postgres` from the below commands.
|
|||
* If you want to run each Dendrite component with its own database:
|
||||
|
||||
```bash
|
||||
for i in mediaapi syncapi roomserver signingkeyserver federationsender appservice keyserver userapi_account userapi_device naffka; do
|
||||
for i in mediaapi syncapi roomserver signingkeyserver federationsender appservice keyserver userapi_accounts userapi_devices naffka; do
|
||||
sudo -u postgres createdb -O dendrite dendrite_$i
|
||||
done
|
||||
```
|
||||
|
|
|
|||
|
|
@ -1,19 +1,26 @@
|
|||
## Peeking
|
||||
|
||||
Peeking is implemented as per [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753).
|
||||
Local peeking is implemented as per [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753).
|
||||
|
||||
Implementationwise, this means:
|
||||
* Users call `/peek` and `/unpeek` on the clientapi from a given device.
|
||||
* The clientapi delegates these via HTTP to the roomserver, which coordinates peeking in general for a given room
|
||||
* The roomserver writes an NewPeek event into the kafka log headed to the syncserver
|
||||
* The syncserver tracks the existence of the local peek in its DB, and then starts waking up the peeking devices for the room in question, putting it in the `peek` section of the /sync response.
|
||||
* The syncserver tracks the existence of the local peek in the syncapi_peeks table in its DB, and then starts waking up the peeking devices for the room in question, putting it in the `peek` section of the /sync response.
|
||||
|
||||
Questions (given this is [my](https://github.com/ara4n) first time hacking on Dendrite):
|
||||
* The whole clientapi -> roomserver -> syncapi flow to initiate a peek seems very indirect. Is there a reason not to just let syncapi itself host the implementation of `/peek`?
|
||||
Peeking over federation is implemented as per [MSC2444](https://github.com/matrix-org/matrix-doc/pull/2444).
|
||||
|
||||
In future, peeking over federation will be added as per [MSC2444](https://github.com/matrix-org/matrix-doc/pull/2444).
|
||||
* The `roomserver` will kick the `federationsender` much as it does for a federated `/join` in order to trigger a federated `/peek`
|
||||
* The `federationsender` tracks the existence of the remote peek in question
|
||||
For requests to peek our rooms ("inbound peeks"):
|
||||
* Remote servers call `/peek` on federationapi
|
||||
* The federationapi queries the federationsender to check if this is renewing an inbound peek or not.
|
||||
* If not, it hits the PerformInboundPeek on the roomserver to ask it for the current state of the room.
|
||||
* The roomserver atomically (in theory) adds a NewInboundPeek to its kafka stream to tell the federationserver to start peeking.
|
||||
* The federationsender receives the event, tracks the inbound peek in the federationsender_inbound_peeks table, and starts sending events to the peeking server.
|
||||
* The federationsender evicts stale inbound peeks which haven't been renewed.
|
||||
|
||||
For peeking into other server's rooms ("outbound peeks"):
|
||||
* The `roomserver` will kick the `federationsender` much as it does for a federated `/join` in order to trigger a federated outbound `/peek`
|
||||
* The `federationsender` tracks the existence of the outbound peek in in its federationsender_outbound_peeks table.
|
||||
* The `federationsender` regularly renews the remote peek as long as there are still peeking devices syncing for it.
|
||||
* TBD: how do we tell if there are no devices currently syncing for a given peeked room? The syncserver needs to tell the roomserver
|
||||
somehow who then needs to warn the federationsender.
|
||||
|
|
@ -5,6 +5,7 @@ After=network.target
|
|||
After=postgresql.service
|
||||
|
||||
[Service]
|
||||
Environment=GODEBUG=madvdontneed=1
|
||||
RestartSec=2s
|
||||
Type=simple
|
||||
User=dendrite
|
||||
|
|
|
|||
13
eduserver/cache/cache.go
vendored
13
eduserver/cache/cache.go
vendored
|
|
@ -113,19 +113,6 @@ func (t *EDUCache) AddTypingUser(
|
|||
return t.GetLatestSyncPosition()
|
||||
}
|
||||
|
||||
// AddSendToDeviceMessage increases the sync position for
|
||||
// send-to-device updates.
|
||||
// Returns the sync position before update, as the caller
|
||||
// will use this to record the current stream position
|
||||
// at the time that the send-to-device message was sent.
|
||||
func (t *EDUCache) AddSendToDeviceMessage() int64 {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
latestSyncPosition := t.latestSyncPosition
|
||||
t.latestSyncPosition++
|
||||
return latestSyncPosition
|
||||
}
|
||||
|
||||
// addUser with mutex lock & replace the previous timer.
|
||||
// Returns the latest typing sync position after update.
|
||||
func (t *EDUCache) addUser(
|
||||
|
|
|
|||
|
|
@ -38,10 +38,11 @@ func AddPublicRoutes(
|
|||
federationSenderAPI federationSenderAPI.FederationSenderInternalAPI,
|
||||
eduAPI eduserverAPI.EDUServerInputAPI,
|
||||
keyAPI keyserverAPI.KeyInternalAPI,
|
||||
mscCfg *config.MSCs,
|
||||
) {
|
||||
routing.Setup(
|
||||
fedRouter, keyRouter, cfg, rsAPI,
|
||||
eduAPI, federationSenderAPI, keyRing,
|
||||
federation, userAPI, keyAPI,
|
||||
federation, userAPI, keyAPI, mscCfg,
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,12 +31,15 @@ func TestRoomsV3URLEscapeDoNot404(t *testing.T) {
|
|||
fsAPI := base.FederationSenderHTTPClient()
|
||||
// TODO: This is pretty fragile, as if anything calls anything on these nils this test will break.
|
||||
// Unfortunately, it makes little sense to instantiate these dependencies when we just want to test routing.
|
||||
federationapi.AddPublicRoutes(base.PublicFederationAPIMux, base.PublicKeyAPIMux, &cfg.FederationAPI, nil, nil, keyRing, nil, fsAPI, nil, nil)
|
||||
federationapi.AddPublicRoutes(base.PublicFederationAPIMux, base.PublicKeyAPIMux, &cfg.FederationAPI, nil, nil, keyRing, nil, fsAPI, nil, nil, &cfg.MSCs)
|
||||
baseURL, cancel := test.ListenAndServe(t, base.PublicFederationAPIMux, true)
|
||||
defer cancel()
|
||||
serverName := gomatrixserverlib.ServerName(strings.TrimPrefix(baseURL, "https://"))
|
||||
|
||||
fedCli := gomatrixserverlib.NewFederationClient(serverName, cfg.Global.KeyID, cfg.Global.PrivateKey, true)
|
||||
fedCli := gomatrixserverlib.NewFederationClient(
|
||||
serverName, cfg.Global.KeyID, cfg.Global.PrivateKey,
|
||||
gomatrixserverlib.WithSkipVerify(true),
|
||||
)
|
||||
|
||||
testCases := []struct {
|
||||
roomVer gomatrixserverlib.RoomVersion
|
||||
|
|
|
|||
102
federationapi/routing/peek.go
Normal file
102
federationapi/routing/peek.go
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
// Copyright 2020 New Vector Ltd
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package routing
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
// Peek implements the SS /peek API, handling inbound peeks
|
||||
func Peek(
|
||||
httpReq *http.Request,
|
||||
request *gomatrixserverlib.FederationRequest,
|
||||
cfg *config.FederationAPI,
|
||||
rsAPI api.RoomserverInternalAPI,
|
||||
roomID, peekID string,
|
||||
remoteVersions []gomatrixserverlib.RoomVersion,
|
||||
) util.JSONResponse {
|
||||
// TODO: check if we're just refreshing an existing peek by querying the federationsender
|
||||
|
||||
verReq := api.QueryRoomVersionForRoomRequest{RoomID: roomID}
|
||||
verRes := api.QueryRoomVersionForRoomResponse{}
|
||||
if err := rsAPI.QueryRoomVersionForRoom(httpReq.Context(), &verReq, &verRes); err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusInternalServerError,
|
||||
JSON: jsonerror.InternalServerError(),
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the room that the peeking server is trying to peek is actually
|
||||
// one of the room versions that they listed in their supported ?ver= in
|
||||
// the peek URL.
|
||||
remoteSupportsVersion := false
|
||||
for _, v := range remoteVersions {
|
||||
if v == verRes.RoomVersion {
|
||||
remoteSupportsVersion = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// If it isn't, stop trying to peek the room.
|
||||
if !remoteSupportsVersion {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.IncompatibleRoomVersion(verRes.RoomVersion),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Check history visibility
|
||||
|
||||
// tell the peeking server to renew every hour
|
||||
renewalInterval := int64(60 * 60 * 1000 * 1000)
|
||||
|
||||
var response api.PerformInboundPeekResponse
|
||||
err := rsAPI.PerformInboundPeek(
|
||||
httpReq.Context(),
|
||||
&api.PerformInboundPeekRequest{
|
||||
RoomID: roomID,
|
||||
PeekID: peekID,
|
||||
ServerName: request.Origin(),
|
||||
RenewalInterval: renewalInterval,
|
||||
},
|
||||
&response,
|
||||
)
|
||||
if err != nil {
|
||||
resErr := util.ErrorResponse(err)
|
||||
return resErr
|
||||
}
|
||||
|
||||
if !response.RoomExists {
|
||||
return util.JSONResponse{Code: http.StatusNotFound, JSON: nil}
|
||||
}
|
||||
|
||||
respPeek := gomatrixserverlib.RespPeek{
|
||||
StateEvents: gomatrixserverlib.UnwrapEventHeaders(response.StateEvents),
|
||||
AuthEvents: gomatrixserverlib.UnwrapEventHeaders(response.AuthChainEvents),
|
||||
RoomVersion: response.RoomVersion,
|
||||
LatestEvent: response.LatestEvent.Unwrap(),
|
||||
RenewalInterval: renewalInterval,
|
||||
}
|
||||
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: respPeek,
|
||||
}
|
||||
}
|
||||
|
|
@ -48,6 +48,7 @@ func Setup(
|
|||
federation *gomatrixserverlib.FederationClient,
|
||||
userAPI userapi.UserInternalAPI,
|
||||
keyAPI keyserverAPI.KeyInternalAPI,
|
||||
mscCfg *config.MSCs,
|
||||
) {
|
||||
v2keysmux := keyMux.PathPrefix("/v2").Subrouter()
|
||||
v1fedmux := fedMux.PathPrefix("/v1").Subrouter()
|
||||
|
|
@ -229,7 +230,39 @@ func Setup(
|
|||
},
|
||||
)).Methods(http.MethodGet)
|
||||
|
||||
v1fedmux.Handle("/make_join/{roomID}/{eventID}", httputil.MakeFedAPI(
|
||||
if mscCfg.Enabled("msc2444") {
|
||||
v1fedmux.Handle("/peek/{roomID}/{peekID}", httputil.MakeFedAPI(
|
||||
"federation_peek", cfg.Matrix.ServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("Forbidden by server ACLs"),
|
||||
}
|
||||
}
|
||||
roomID := vars["roomID"]
|
||||
peekID := vars["peekID"]
|
||||
queryVars := httpReq.URL.Query()
|
||||
remoteVersions := []gomatrixserverlib.RoomVersion{}
|
||||
if vers, ok := queryVars["ver"]; ok {
|
||||
// The remote side supplied a ?ver= so use that to build up the list
|
||||
// of supported room versions
|
||||
for _, v := range vers {
|
||||
remoteVersions = append(remoteVersions, gomatrixserverlib.RoomVersion(v))
|
||||
}
|
||||
} else {
|
||||
// The remote side didn't supply a ?ver= so just assume that they only
|
||||
// support room version 1
|
||||
remoteVersions = append(remoteVersions, gomatrixserverlib.RoomVersionV1)
|
||||
}
|
||||
return Peek(
|
||||
httpReq, request, cfg, rsAPI, roomID, peekID, remoteVersions,
|
||||
)
|
||||
},
|
||||
)).Methods(http.MethodPut, http.MethodDelete)
|
||||
}
|
||||
|
||||
v1fedmux.Handle("/make_join/{roomID}/{userID}", httputil.MakeFedAPI(
|
||||
"federation_make_join", cfg.Matrix.ServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
|
|
@ -239,11 +272,11 @@ func Setup(
|
|||
}
|
||||
}
|
||||
roomID := vars["roomID"]
|
||||
eventID := vars["eventID"]
|
||||
userID := vars["userID"]
|
||||
queryVars := httpReq.URL.Query()
|
||||
remoteVersions := []gomatrixserverlib.RoomVersion{}
|
||||
if vers, ok := queryVars["ver"]; ok {
|
||||
// The remote side supplied a ?=ver so use that to build up the list
|
||||
// The remote side supplied a ?ver= so use that to build up the list
|
||||
// of supported room versions
|
||||
for _, v := range vers {
|
||||
remoteVersions = append(remoteVersions, gomatrixserverlib.RoomVersion(v))
|
||||
|
|
@ -255,7 +288,7 @@ func Setup(
|
|||
remoteVersions = append(remoteVersions, gomatrixserverlib.RoomVersionV1)
|
||||
}
|
||||
return MakeJoin(
|
||||
httpReq, request, cfg, rsAPI, roomID, eventID, remoteVersions,
|
||||
httpReq, request, cfg, rsAPI, roomID, userID, remoteVersions,
|
||||
)
|
||||
},
|
||||
)).Methods(http.MethodGet)
|
||||
|
|
|
|||
|
|
@ -102,11 +102,13 @@ func Send(
|
|||
|
||||
type txnReq struct {
|
||||
gomatrixserverlib.Transaction
|
||||
rsAPI api.RoomserverInternalAPI
|
||||
eduAPI eduserverAPI.EDUServerInputAPI
|
||||
keyAPI keyapi.KeyInternalAPI
|
||||
keys gomatrixserverlib.JSONVerifier
|
||||
federation txnFederationClient
|
||||
rsAPI api.RoomserverInternalAPI
|
||||
eduAPI eduserverAPI.EDUServerInputAPI
|
||||
keyAPI keyapi.KeyInternalAPI
|
||||
keys gomatrixserverlib.JSONVerifier
|
||||
federation txnFederationClient
|
||||
servers []gomatrixserverlib.ServerName
|
||||
serversMutex sync.RWMutex
|
||||
// local cache of events for auth checks, etc - this may include events
|
||||
// which the roomserver is unaware of.
|
||||
haveEvents map[string]*gomatrixserverlib.HeaderedEvent
|
||||
|
|
@ -404,16 +406,21 @@ func (t *txnReq) processDeviceListUpdate(ctx context.Context, e gomatrixserverli
|
|||
}
|
||||
|
||||
func (t *txnReq) getServers(ctx context.Context, roomID string) []gomatrixserverlib.ServerName {
|
||||
servers := []gomatrixserverlib.ServerName{t.Origin}
|
||||
t.serversMutex.Lock()
|
||||
defer t.serversMutex.Unlock()
|
||||
if t.servers != nil {
|
||||
return t.servers
|
||||
}
|
||||
t.servers = []gomatrixserverlib.ServerName{t.Origin}
|
||||
serverReq := &api.QueryServerJoinedToRoomRequest{
|
||||
RoomID: roomID,
|
||||
}
|
||||
serverRes := &api.QueryServerJoinedToRoomResponse{}
|
||||
if err := t.rsAPI.QueryServerJoinedToRoom(ctx, serverReq, serverRes); err == nil {
|
||||
servers = append(servers, serverRes.ServerNames...)
|
||||
util.GetLogger(ctx).Infof("Found %d server(s) to query for missing events in %q", len(servers), roomID)
|
||||
t.servers = append(t.servers, serverRes.ServerNames...)
|
||||
util.GetLogger(ctx).Infof("Found %d server(s) to query for missing events in %q", len(t.servers), roomID)
|
||||
}
|
||||
return servers
|
||||
return t.servers
|
||||
}
|
||||
|
||||
func (t *txnReq) processEvent(ctx context.Context, e *gomatrixserverlib.Event) error {
|
||||
|
|
@ -482,14 +489,10 @@ func (t *txnReq) retrieveMissingAuthEvents(
|
|||
missingAuthEvents[missingAuthEventID] = struct{}{}
|
||||
}
|
||||
|
||||
servers := t.getServers(ctx, e.RoomID())
|
||||
if len(servers) > 5 {
|
||||
servers = servers[:5]
|
||||
}
|
||||
withNextEvent:
|
||||
for missingAuthEventID := range missingAuthEvents {
|
||||
withNextServer:
|
||||
for _, server := range servers {
|
||||
for _, server := range t.getServers(ctx, e.RoomID()) {
|
||||
logger.Infof("Retrieving missing auth event %q from %q", missingAuthEventID, server)
|
||||
tx, err := t.federation.GetEvent(ctx, server, missingAuthEventID)
|
||||
if err != nil {
|
||||
|
|
@ -692,13 +695,8 @@ func (t *txnReq) lookupStateAfterEvent(ctx context.Context, roomVersion gomatrix
|
|||
return nil, false, fmt.Errorf("t.lookupStateBeforeEvent: %w", err)
|
||||
}
|
||||
|
||||
servers := t.getServers(ctx, roomID)
|
||||
if len(servers) > 5 {
|
||||
servers = servers[:5]
|
||||
}
|
||||
|
||||
// fetch the event we're missing and add it to the pile
|
||||
h, err := t.lookupEvent(ctx, roomVersion, eventID, false, servers)
|
||||
h, err := t.lookupEvent(ctx, roomVersion, roomID, eventID, false)
|
||||
switch err.(type) {
|
||||
case verifySigError:
|
||||
return respState, false, nil
|
||||
|
|
@ -740,11 +738,10 @@ func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, event
|
|||
t.haveEvents[ev.EventID()] = res.StateEvents[i]
|
||||
}
|
||||
var authEvents []*gomatrixserverlib.Event
|
||||
missingAuthEvents := make(map[string]bool)
|
||||
missingAuthEvents := map[string]bool{}
|
||||
for _, ev := range res.StateEvents {
|
||||
for _, ae := range ev.AuthEventIDs() {
|
||||
aev, ok := t.haveEvents[ae]
|
||||
if ok {
|
||||
if aev, ok := t.haveEvents[ae]; ok {
|
||||
authEvents = append(authEvents, aev.Unwrap())
|
||||
} else {
|
||||
missingAuthEvents[ae] = true
|
||||
|
|
@ -753,27 +750,28 @@ func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, event
|
|||
}
|
||||
// QueryStateAfterEvents does not return the auth events, so fetch them now. We know the roomserver has them else it wouldn't
|
||||
// have stored the event.
|
||||
var missingEventList []string
|
||||
for evID := range missingAuthEvents {
|
||||
missingEventList = append(missingEventList, evID)
|
||||
}
|
||||
queryReq := api.QueryEventsByIDRequest{
|
||||
EventIDs: missingEventList,
|
||||
}
|
||||
util.GetLogger(ctx).Infof("Fetching missing auth events: %v", missingEventList)
|
||||
var queryRes api.QueryEventsByIDResponse
|
||||
if err = t.rsAPI.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil {
|
||||
return nil
|
||||
}
|
||||
for i := range queryRes.Events {
|
||||
evID := queryRes.Events[i].EventID()
|
||||
t.haveEvents[evID] = queryRes.Events[i]
|
||||
authEvents = append(authEvents, queryRes.Events[i].Unwrap())
|
||||
if len(missingAuthEvents) > 0 {
|
||||
var missingEventList []string
|
||||
for evID := range missingAuthEvents {
|
||||
missingEventList = append(missingEventList, evID)
|
||||
}
|
||||
queryReq := api.QueryEventsByIDRequest{
|
||||
EventIDs: missingEventList,
|
||||
}
|
||||
util.GetLogger(ctx).Infof("Fetching missing auth events: %v", missingEventList)
|
||||
var queryRes api.QueryEventsByIDResponse
|
||||
if err = t.rsAPI.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil {
|
||||
return nil
|
||||
}
|
||||
for i := range queryRes.Events {
|
||||
evID := queryRes.Events[i].EventID()
|
||||
t.haveEvents[evID] = queryRes.Events[i]
|
||||
authEvents = append(authEvents, queryRes.Events[i].Unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
evs := gomatrixserverlib.UnwrapEventHeaders(res.StateEvents)
|
||||
return &gomatrixserverlib.RespState{
|
||||
StateEvents: evs,
|
||||
StateEvents: gomatrixserverlib.UnwrapEventHeaders(res.StateEvents),
|
||||
AuthEvents: authEvents,
|
||||
}
|
||||
}
|
||||
|
|
@ -805,11 +803,7 @@ retryAllowedState:
|
|||
if err = checkAllowedByState(backwardsExtremity, resolvedStateEvents); err != nil {
|
||||
switch missing := err.(type) {
|
||||
case gomatrixserverlib.MissingAuthEventError:
|
||||
servers := t.getServers(ctx, backwardsExtremity.RoomID())
|
||||
if len(servers) > 5 {
|
||||
servers = servers[:5]
|
||||
}
|
||||
h, err2 := t.lookupEvent(ctx, roomVersion, missing.AuthEventID, true, servers)
|
||||
h, err2 := t.lookupEvent(ctx, roomVersion, backwardsExtremity.RoomID(), missing.AuthEventID, true)
|
||||
switch err2.(type) {
|
||||
case verifySigError:
|
||||
return &gomatrixserverlib.RespState{
|
||||
|
|
@ -857,17 +851,8 @@ func (t *txnReq) getMissingEvents(ctx context.Context, e *gomatrixserverlib.Even
|
|||
latestEvents[i] = res.LatestEvents[i].EventID
|
||||
}
|
||||
|
||||
servers := []gomatrixserverlib.ServerName{t.Origin}
|
||||
serverReq := &api.QueryServerJoinedToRoomRequest{
|
||||
RoomID: e.RoomID(),
|
||||
}
|
||||
serverRes := &api.QueryServerJoinedToRoomResponse{}
|
||||
if err = t.rsAPI.QueryServerJoinedToRoom(ctx, serverReq, serverRes); err == nil {
|
||||
servers = append(servers, serverRes.ServerNames...)
|
||||
logger.Infof("Found %d server(s) to query for missing events", len(servers))
|
||||
}
|
||||
|
||||
var missingResp *gomatrixserverlib.RespMissingEvents
|
||||
servers := t.getServers(ctx, e.RoomID())
|
||||
for _, server := range servers {
|
||||
var m gomatrixserverlib.RespMissingEvents
|
||||
if m, err = t.federation.LookupMissingEvents(ctx, server, e.RoomID(), gomatrixserverlib.MissingEvents{
|
||||
|
|
@ -1015,12 +1000,6 @@ func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, even
|
|||
"concurrent_requests": concurrentRequests,
|
||||
}).Info("Fetching missing state at event")
|
||||
|
||||
// Get a list of servers to fetch from.
|
||||
servers := t.getServers(ctx, roomID)
|
||||
if len(servers) > 5 {
|
||||
servers = servers[:5]
|
||||
}
|
||||
|
||||
// Create a queue containing all of the missing event IDs that we want
|
||||
// to retrieve.
|
||||
pending := make(chan string, missingCount)
|
||||
|
|
@ -1046,7 +1025,7 @@ func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, even
|
|||
// Define what we'll do in order to fetch the missing event ID.
|
||||
fetch := func(missingEventID string) {
|
||||
var h *gomatrixserverlib.HeaderedEvent
|
||||
h, err = t.lookupEvent(ctx, roomVersion, missingEventID, false, servers)
|
||||
h, err = t.lookupEvent(ctx, roomVersion, roomID, missingEventID, false)
|
||||
switch err.(type) {
|
||||
case verifySigError:
|
||||
return
|
||||
|
|
@ -1112,7 +1091,7 @@ func (t *txnReq) createRespStateFromStateIDs(stateIDs gomatrixserverlib.RespStat
|
|||
return &respState, nil
|
||||
}
|
||||
|
||||
func (t *txnReq) lookupEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, missingEventID string, localFirst bool, servers []gomatrixserverlib.ServerName) (*gomatrixserverlib.HeaderedEvent, error) {
|
||||
func (t *txnReq) lookupEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, roomID, missingEventID string, localFirst bool) (*gomatrixserverlib.HeaderedEvent, error) {
|
||||
if localFirst {
|
||||
// fetch from the roomserver
|
||||
queryReq := api.QueryEventsByIDRequest{
|
||||
|
|
@ -1127,6 +1106,7 @@ func (t *txnReq) lookupEvent(ctx context.Context, roomVersion gomatrixserverlib.
|
|||
}
|
||||
var event *gomatrixserverlib.Event
|
||||
found := false
|
||||
servers := t.getServers(ctx, roomID)
|
||||
for _, serverName := range servers {
|
||||
txn, err := t.federation.GetEvent(ctx, serverName, missingEventID)
|
||||
if err != nil || len(txn.PDUs) == 0 {
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ type FederationClient interface {
|
|||
GetEvent(ctx context.Context, s gomatrixserverlib.ServerName, eventID string) (res gomatrixserverlib.Transaction, err error)
|
||||
GetServerKeys(ctx context.Context, matrixServer gomatrixserverlib.ServerName) (gomatrixserverlib.ServerKeys, error)
|
||||
MSC2836EventRelationships(ctx context.Context, dst gomatrixserverlib.ServerName, r gomatrixserverlib.MSC2836EventRelationshipsRequest, roomVersion gomatrixserverlib.RoomVersion) (res gomatrixserverlib.MSC2836EventRelationshipsResponse, err error)
|
||||
MSC2946Spaces(ctx context.Context, dst gomatrixserverlib.ServerName, roomID string, r gomatrixserverlib.MSC2946SpacesRequest) (res gomatrixserverlib.MSC2946SpacesResponse, err error)
|
||||
LookupServerKeys(ctx context.Context, s gomatrixserverlib.ServerName, keyRequests map[gomatrixserverlib.PublicKeyLookupRequest]gomatrixserverlib.Timestamp) ([]gomatrixserverlib.ServerKeys, error)
|
||||
}
|
||||
|
||||
|
|
@ -61,6 +62,12 @@ type FederationSenderInternalAPI interface {
|
|||
request *PerformJoinRequest,
|
||||
response *PerformJoinResponse,
|
||||
)
|
||||
// Handle an instruction to peek a room on a remote server.
|
||||
PerformOutboundPeek(
|
||||
ctx context.Context,
|
||||
request *PerformOutboundPeekRequest,
|
||||
response *PerformOutboundPeekResponse,
|
||||
) error
|
||||
// Handle an instruction to make_leave & send_leave with a remote server.
|
||||
PerformLeave(
|
||||
ctx context.Context,
|
||||
|
|
@ -110,6 +117,16 @@ type PerformJoinResponse struct {
|
|||
LastError *gomatrix.HTTPError
|
||||
}
|
||||
|
||||
type PerformOutboundPeekRequest struct {
|
||||
RoomID string `json:"room_id"`
|
||||
// The sorted list of servers to try. Servers will be tried sequentially, after de-duplication.
|
||||
ServerNames types.ServerNames `json:"server_names"`
|
||||
}
|
||||
|
||||
type PerformOutboundPeekResponse struct {
|
||||
LastError *gomatrix.HTTPError
|
||||
}
|
||||
|
||||
type PerformLeaveRequest struct {
|
||||
RoomID string `json:"room_id"`
|
||||
UserID string `json:"user_id"`
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/federationsender/storage"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
|
@ -44,6 +45,7 @@ type OutputEDUConsumer struct {
|
|||
|
||||
// NewOutputEDUConsumer creates a new OutputEDUConsumer. Call Start() to begin consuming from EDU servers.
|
||||
func NewOutputEDUConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.FederationSender,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
queues *queue.OutgoingQueues,
|
||||
|
|
@ -51,18 +53,21 @@ func NewOutputEDUConsumer(
|
|||
) *OutputEDUConsumer {
|
||||
c := &OutputEDUConsumer{
|
||||
typingConsumer: &internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "eduserver/typing",
|
||||
Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: store,
|
||||
},
|
||||
sendToDeviceConsumer: &internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "eduserver/sendtodevice",
|
||||
Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
PartitionStore: store,
|
||||
},
|
||||
receiptConsumer: &internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "eduserver/receipt",
|
||||
Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputReceiptEvent),
|
||||
Consumer: kafkaConsumer,
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/keyserver/api"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
|
@ -41,6 +42,7 @@ type KeyChangeConsumer struct {
|
|||
|
||||
// NewKeyChangeConsumer creates a new KeyChangeConsumer. Call Start() to begin consuming from key servers.
|
||||
func NewKeyChangeConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.KeyServer,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
queues *queue.OutgoingQueues,
|
||||
|
|
@ -49,6 +51,7 @@ func NewKeyChangeConsumer(
|
|||
) *KeyChangeConsumer {
|
||||
c := &KeyChangeConsumer{
|
||||
consumer: &internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "federationsender/keychange",
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)),
|
||||
Consumer: kafkaConsumer,
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
|
@ -41,6 +42,7 @@ type OutputRoomEventConsumer struct {
|
|||
|
||||
// NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call Start() to begin consuming from room servers.
|
||||
func NewOutputRoomEventConsumer(
|
||||
process *process.ProcessContext,
|
||||
cfg *config.FederationSender,
|
||||
kafkaConsumer sarama.Consumer,
|
||||
queues *queue.OutgoingQueues,
|
||||
|
|
@ -48,6 +50,7 @@ func NewOutputRoomEventConsumer(
|
|||
rsAPI api.RoomserverInternalAPI,
|
||||
) *OutputRoomEventConsumer {
|
||||
consumer := internal.ContinualConsumer{
|
||||
Process: process,
|
||||
ComponentName: "federationsender/roomserver",
|
||||
Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)),
|
||||
Consumer: kafkaConsumer,
|
||||
|
|
@ -102,6 +105,7 @@ func (s *OutputRoomEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
|
|||
default:
|
||||
// panic rather than continue with an inconsistent database
|
||||
log.WithFields(log.Fields{
|
||||
"event_id": ev.EventID(),
|
||||
"event": string(ev.JSON()),
|
||||
"add": output.NewRoomEvent.AddsStateEventIDs,
|
||||
"del": output.NewRoomEvent.RemovesStateEventIDs,
|
||||
|
|
@ -110,6 +114,14 @@ func (s *OutputRoomEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
case api.OutputTypeNewInboundPeek:
|
||||
if err := s.processInboundPeek(*output.NewInboundPeek); err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"event": output.NewInboundPeek,
|
||||
log.ErrorKey: err,
|
||||
}).Panicf("roomserver output log: remote peek event failure")
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
log.WithField("type", output.Type).Debug(
|
||||
"roomserver output log: ignoring unknown output type",
|
||||
|
|
@ -120,6 +132,23 @@ func (s *OutputRoomEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// processInboundPeek starts tracking a new federated inbound peek (replacing the existing one if any)
|
||||
// causing the federationsender to start sending messages to the peeking server
|
||||
func (s *OutputRoomEventConsumer) processInboundPeek(orp api.OutputNewInboundPeek) error {
|
||||
|
||||
// FIXME: there's a race here - we should start /sending new peeked events
|
||||
// atomically after the orp.LatestEventID to ensure there are no gaps between
|
||||
// the peek beginning and the send stream beginning.
|
||||
//
|
||||
// We probably need to track orp.LatestEventID on the inbound peek, but it's
|
||||
// unclear how we then use that to prevent the race when we start the send
|
||||
// stream.
|
||||
//
|
||||
// This is making the tests flakey.
|
||||
|
||||
return s.db.AddInboundPeek(context.TODO(), orp.ServerName, orp.RoomID, orp.PeekID, orp.RenewalInterval)
|
||||
}
|
||||
|
||||
// processMessage updates the list of currently joined hosts in the room
|
||||
// and then sends the event to the hosts that were joined before the event.
|
||||
func (s *OutputRoomEventConsumer) processMessage(ore api.OutputNewRoomEvent) error {
|
||||
|
|
@ -163,6 +192,10 @@ func (s *OutputRoomEventConsumer) processMessage(ore api.OutputNewRoomEvent) err
|
|||
return err
|
||||
}
|
||||
|
||||
// TODO: do housekeeping to evict unrenewed peeking hosts
|
||||
|
||||
// TODO: implement query to let the fedapi check whether a given peek is live or not
|
||||
|
||||
// Send the event.
|
||||
return s.queues.SendEvent(
|
||||
ore.Event, gomatrixserverlib.ServerName(ore.SendAsServer), joinedHostsAtEvent,
|
||||
|
|
@ -170,7 +203,7 @@ func (s *OutputRoomEventConsumer) processMessage(ore api.OutputNewRoomEvent) err
|
|||
}
|
||||
|
||||
// joinedHostsAtEvent works out a list of matrix servers that were joined to
|
||||
// the room at the event.
|
||||
// the room at the event (including peeking ones)
|
||||
// It is important to use the state at the event for sending messages because:
|
||||
// 1) We shouldn't send messages to servers that weren't in the room.
|
||||
// 2) If a server is kicked from the rooms it should still be told about the
|
||||
|
|
@ -221,6 +254,15 @@ func (s *OutputRoomEventConsumer) joinedHostsAtEvent(
|
|||
joined[joinedHost.ServerName] = true
|
||||
}
|
||||
|
||||
// handle peeking hosts
|
||||
inboundPeeks, err := s.db.GetInboundPeeks(context.TODO(), ore.Event.Event.RoomID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, inboundPeek := range inboundPeeks {
|
||||
joined[inboundPeek.ServerName] = true
|
||||
}
|
||||
|
||||
var result []gomatrixserverlib.ServerName
|
||||
for serverName, include := range joined {
|
||||
if include {
|
||||
|
|
|
|||
|
|
@ -59,7 +59,8 @@ func NewInternalAPI(
|
|||
consumer, _ := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
|
||||
|
||||
queues := queue.NewOutgoingQueues(
|
||||
federationSenderDB, cfg.Matrix.DisableFederation,
|
||||
federationSenderDB, base.ProcessContext,
|
||||
cfg.Matrix.DisableFederation,
|
||||
cfg.Matrix.ServerName, federation, rsAPI, stats,
|
||||
&queue.SigningInfo{
|
||||
KeyID: cfg.Matrix.KeyID,
|
||||
|
|
@ -69,7 +70,7 @@ func NewInternalAPI(
|
|||
)
|
||||
|
||||
rsConsumer := consumers.NewOutputRoomEventConsumer(
|
||||
cfg, consumer, queues,
|
||||
base.ProcessContext, cfg, consumer, queues,
|
||||
federationSenderDB, rsAPI,
|
||||
)
|
||||
if err = rsConsumer.Start(); err != nil {
|
||||
|
|
@ -77,13 +78,13 @@ func NewInternalAPI(
|
|||
}
|
||||
|
||||
tsConsumer := consumers.NewOutputEDUConsumer(
|
||||
cfg, consumer, queues, federationSenderDB,
|
||||
base.ProcessContext, cfg, consumer, queues, federationSenderDB,
|
||||
)
|
||||
if err := tsConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panic("failed to start typing server consumer")
|
||||
}
|
||||
keyConsumer := consumers.NewKeyChangeConsumer(
|
||||
&base.Cfg.KeyServer, consumer, queues, federationSenderDB, rsAPI,
|
||||
base.ProcessContext, &base.Cfg.KeyServer, consumer, queues, federationSenderDB, rsAPI,
|
||||
)
|
||||
if err := keyConsumer.Start(); err != nil {
|
||||
logrus.WithError(err).Panic("failed to start key server consumer")
|
||||
|
|
|
|||
|
|
@ -244,3 +244,17 @@ func (a *FederationSenderInternalAPI) MSC2836EventRelationships(
|
|||
}
|
||||
return ires.(gomatrixserverlib.MSC2836EventRelationshipsResponse), nil
|
||||
}
|
||||
|
||||
func (a *FederationSenderInternalAPI) MSC2946Spaces(
|
||||
ctx context.Context, s gomatrixserverlib.ServerName, roomID string, r gomatrixserverlib.MSC2946SpacesRequest,
|
||||
) (res gomatrixserverlib.MSC2946SpacesResponse, err error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
||||
defer cancel()
|
||||
ires, err := a.doRequest(s, func() (interface{}, error) {
|
||||
return a.federation.MSC2946Spaces(ctx, s, roomID, r)
|
||||
})
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
return ires.(gomatrixserverlib.MSC2946SpacesResponse), nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationsender/api"
|
||||
"github.com/matrix-org/dendrite/federationsender/internal/perform"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/roomserver/version"
|
||||
"github.com/matrix-org/gomatrix"
|
||||
|
|
@ -218,9 +217,9 @@ func (r *FederationSenderInternalAPI) performJoinUsingServer(
|
|||
|
||||
// Sanity-check the join response to ensure that it has a create
|
||||
// event, that the room version is known, etc.
|
||||
if err := sanityCheckSendJoinResponse(respSendJoin); err != nil {
|
||||
if err := sanityCheckAuthChain(respSendJoin.AuthEvents); err != nil {
|
||||
cancel()
|
||||
return fmt.Errorf("sanityCheckSendJoinResponse: %w", err)
|
||||
return fmt.Errorf("sanityCheckAuthChain: %w", err)
|
||||
}
|
||||
|
||||
// Process the join response in a goroutine. The idea here is
|
||||
|
|
@ -231,11 +230,9 @@ func (r *FederationSenderInternalAPI) performJoinUsingServer(
|
|||
go func() {
|
||||
defer cancel()
|
||||
|
||||
// Check that the send_join response was valid.
|
||||
joinCtx := perform.JoinContext(r.federation, r.keyRing)
|
||||
respState, err := joinCtx.CheckSendJoinResponse(
|
||||
ctx, event, serverName, respMakeJoin, respSendJoin,
|
||||
)
|
||||
// TODO: Can we expand Check here to return a list of missing auth
|
||||
// events rather than failing one at a time?
|
||||
respState, err := respSendJoin.Check(ctx, r.keyRing, event, federatedAuthProvider(ctx, r.federation, r.keyRing, serverName))
|
||||
if err != nil {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"room_id": roomID,
|
||||
|
|
@ -266,6 +263,181 @@ func (r *FederationSenderInternalAPI) performJoinUsingServer(
|
|||
return nil
|
||||
}
|
||||
|
||||
// PerformOutboundPeekRequest implements api.FederationSenderInternalAPI
|
||||
func (r *FederationSenderInternalAPI) PerformOutboundPeek(
|
||||
ctx context.Context,
|
||||
request *api.PerformOutboundPeekRequest,
|
||||
response *api.PerformOutboundPeekResponse,
|
||||
) error {
|
||||
// Look up the supported room versions.
|
||||
var supportedVersions []gomatrixserverlib.RoomVersion
|
||||
for version := range version.SupportedRoomVersions() {
|
||||
supportedVersions = append(supportedVersions, version)
|
||||
}
|
||||
|
||||
// Deduplicate the server names we were provided but keep the ordering
|
||||
// as this encodes useful information about which servers are most likely
|
||||
// to respond.
|
||||
seenSet := make(map[gomatrixserverlib.ServerName]bool)
|
||||
var uniqueList []gomatrixserverlib.ServerName
|
||||
for _, srv := range request.ServerNames {
|
||||
if seenSet[srv] {
|
||||
continue
|
||||
}
|
||||
seenSet[srv] = true
|
||||
uniqueList = append(uniqueList, srv)
|
||||
}
|
||||
request.ServerNames = uniqueList
|
||||
|
||||
// See if there's an existing outbound peek for this room ID with
|
||||
// one of the specified servers.
|
||||
if peeks, err := r.db.GetOutboundPeeks(ctx, request.RoomID); err == nil {
|
||||
for _, peek := range peeks {
|
||||
if _, ok := seenSet[peek.ServerName]; ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try each server that we were provided until we land on one that
|
||||
// successfully completes the peek
|
||||
var lastErr error
|
||||
for _, serverName := range request.ServerNames {
|
||||
if err := r.performOutboundPeekUsingServer(
|
||||
ctx,
|
||||
request.RoomID,
|
||||
serverName,
|
||||
supportedVersions,
|
||||
); err != nil {
|
||||
logrus.WithError(err).WithFields(logrus.Fields{
|
||||
"server_name": serverName,
|
||||
"room_id": request.RoomID,
|
||||
}).Warnf("Failed to peek room through server")
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
// We're all good.
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we reach here then we didn't complete a peek for some reason.
|
||||
var httpErr gomatrix.HTTPError
|
||||
if ok := errors.As(lastErr, &httpErr); ok {
|
||||
httpErr.Message = string(httpErr.Contents)
|
||||
// Clear the wrapped error, else serialising to JSON (in polylith mode) will fail
|
||||
httpErr.WrappedError = nil
|
||||
response.LastError = &httpErr
|
||||
} else {
|
||||
response.LastError = &gomatrix.HTTPError{
|
||||
Code: 0,
|
||||
WrappedError: nil,
|
||||
Message: lastErr.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Errorf(
|
||||
"failed to peek room %q through %d server(s): last error %s",
|
||||
request.RoomID, len(request.ServerNames), lastErr,
|
||||
)
|
||||
|
||||
return lastErr
|
||||
}
|
||||
|
||||
func (r *FederationSenderInternalAPI) performOutboundPeekUsingServer(
|
||||
ctx context.Context,
|
||||
roomID string,
|
||||
serverName gomatrixserverlib.ServerName,
|
||||
supportedVersions []gomatrixserverlib.RoomVersion,
|
||||
) error {
|
||||
// create a unique ID for this peek.
|
||||
// for now we just use the room ID again. In future, if we ever
|
||||
// support concurrent peeks to the same room with different filters
|
||||
// then we would need to disambiguate further.
|
||||
peekID := roomID
|
||||
|
||||
// check whether we're peeking already to try to avoid needlessly
|
||||
// re-peeking on the server. we don't need a transaction for this,
|
||||
// given this is a nice-to-have.
|
||||
outboundPeek, err := r.db.GetOutboundPeek(ctx, serverName, roomID, peekID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
renewing := false
|
||||
if outboundPeek != nil {
|
||||
nowMilli := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
if nowMilli > outboundPeek.RenewedTimestamp+outboundPeek.RenewalInterval {
|
||||
logrus.Infof("stale outbound peek to %s for %s already exists; renewing", serverName, roomID)
|
||||
renewing = true
|
||||
} else {
|
||||
logrus.Infof("live outbound peek to %s for %s already exists", serverName, roomID)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Try to perform an outbound /peek using the information supplied in the
|
||||
// request.
|
||||
respPeek, err := r.federation.Peek(
|
||||
ctx,
|
||||
serverName,
|
||||
roomID,
|
||||
peekID,
|
||||
supportedVersions,
|
||||
)
|
||||
if err != nil {
|
||||
r.statistics.ForServer(serverName).Failure()
|
||||
return fmt.Errorf("r.federation.Peek: %w", err)
|
||||
}
|
||||
r.statistics.ForServer(serverName).Success()
|
||||
|
||||
// Work out if we support the room version that has been supplied in
|
||||
// the peek response.
|
||||
if respPeek.RoomVersion == "" {
|
||||
respPeek.RoomVersion = gomatrixserverlib.RoomVersionV1
|
||||
}
|
||||
if _, err = respPeek.RoomVersion.EventFormat(); err != nil {
|
||||
return fmt.Errorf("respPeek.RoomVersion.EventFormat: %w", err)
|
||||
}
|
||||
|
||||
// we have the peek state now so let's process regardless of whether upstream gives up
|
||||
ctx = context.Background()
|
||||
|
||||
respState := respPeek.ToRespState()
|
||||
// authenticate the state returned (check its auth events etc)
|
||||
// the equivalent of CheckSendJoinResponse()
|
||||
if err = sanityCheckAuthChain(respState.AuthEvents); err != nil {
|
||||
return fmt.Errorf("sanityCheckAuthChain: %w", err)
|
||||
}
|
||||
if err = respState.Check(ctx, r.keyRing, federatedAuthProvider(ctx, r.federation, r.keyRing, serverName)); err != nil {
|
||||
return fmt.Errorf("Error checking state returned from peeking: %w", err)
|
||||
}
|
||||
|
||||
// If we've got this far, the remote server is peeking.
|
||||
if renewing {
|
||||
if err = r.db.RenewOutboundPeek(ctx, serverName, roomID, peekID, respPeek.RenewalInterval); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err = r.db.AddOutboundPeek(ctx, serverName, roomID, peekID, respPeek.RenewalInterval); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// logrus.Warnf("got respPeek %#v", respPeek)
|
||||
// Send the newly returned state to the roomserver to update our local view.
|
||||
if err = roomserverAPI.SendEventWithState(
|
||||
ctx, r.rsAPI,
|
||||
roomserverAPI.KindNew,
|
||||
&respState,
|
||||
respPeek.LatestEvent.Headered(respPeek.RoomVersion),
|
||||
nil,
|
||||
); err != nil {
|
||||
return fmt.Errorf("r.producer.SendEventWithState: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PerformLeaveRequest implements api.FederationSenderInternalAPI
|
||||
func (r *FederationSenderInternalAPI) PerformLeave(
|
||||
ctx context.Context,
|
||||
|
|
@ -441,9 +613,9 @@ func (r *FederationSenderInternalAPI) PerformBroadcastEDU(
|
|||
return nil
|
||||
}
|
||||
|
||||
func sanityCheckSendJoinResponse(respSendJoin gomatrixserverlib.RespSendJoin) error {
|
||||
func sanityCheckAuthChain(authChain []*gomatrixserverlib.Event) error {
|
||||
// sanity check we have a create event and it has a known room version
|
||||
for _, ev := range respSendJoin.AuthEvents {
|
||||
for _, ev := range authChain {
|
||||
if ev.Type() == gomatrixserverlib.MRoomCreate && ev.StateKeyEquals("") {
|
||||
// make sure the room version is known
|
||||
content := ev.Content()
|
||||
|
|
@ -461,12 +633,12 @@ func sanityCheckSendJoinResponse(respSendJoin gomatrixserverlib.RespSendJoin) er
|
|||
}
|
||||
knownVersions := gomatrixserverlib.RoomVersions()
|
||||
if _, ok := knownVersions[gomatrixserverlib.RoomVersion(verBody.Version)]; !ok {
|
||||
return fmt.Errorf("send_join m.room.create event has an unknown room version: %s", verBody.Version)
|
||||
return fmt.Errorf("auth chain m.room.create event has an unknown room version: %s", verBody.Version)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("send_join response is missing m.room.create event")
|
||||
return fmt.Errorf("auth chain response is missing m.room.create event")
|
||||
}
|
||||
|
||||
func setDefaultRoomVersionFromJoinEvent(joinEvent gomatrixserverlib.EventBuilder) gomatrixserverlib.RoomVersion {
|
||||
|
|
@ -490,3 +662,71 @@ func setDefaultRoomVersionFromJoinEvent(joinEvent gomatrixserverlib.EventBuilder
|
|||
}
|
||||
return gomatrixserverlib.RoomVersionV4
|
||||
}
|
||||
|
||||
// FederatedAuthProvider is an auth chain provider which fetches events from the server provided
|
||||
func federatedAuthProvider(
|
||||
ctx context.Context, federation *gomatrixserverlib.FederationClient,
|
||||
keyRing gomatrixserverlib.JSONVerifier, server gomatrixserverlib.ServerName,
|
||||
) gomatrixserverlib.AuthChainProvider {
|
||||
// A list of events that we have retried, if they were not included in
|
||||
// the auth events supplied in the send_join.
|
||||
retries := map[string][]*gomatrixserverlib.Event{}
|
||||
|
||||
// Define a function which we can pass to Check to retrieve missing
|
||||
// auth events inline. This greatly increases our chances of not having
|
||||
// to repeat the entire set of checks just for a missing event or two.
|
||||
return func(roomVersion gomatrixserverlib.RoomVersion, eventIDs []string) ([]*gomatrixserverlib.Event, error) {
|
||||
returning := []*gomatrixserverlib.Event{}
|
||||
|
||||
// See if we have retry entries for each of the supplied event IDs.
|
||||
for _, eventID := range eventIDs {
|
||||
// If we've already satisfied a request for this event ID before then
|
||||
// just append the results. We won't retry the request.
|
||||
if retry, ok := retries[eventID]; ok {
|
||||
if retry == nil {
|
||||
return nil, fmt.Errorf("missingAuth: not retrying failed event ID %q", eventID)
|
||||
}
|
||||
returning = append(returning, retry...)
|
||||
continue
|
||||
}
|
||||
|
||||
// Make a note of the fact that we tried to do something with this
|
||||
// event ID, even if we don't succeed.
|
||||
retries[eventID] = nil
|
||||
|
||||
// Try to retrieve the event from the server that sent us the send
|
||||
// join response.
|
||||
tx, txerr := federation.GetEvent(ctx, server, eventID)
|
||||
if txerr != nil {
|
||||
return nil, fmt.Errorf("missingAuth r.federation.GetEvent: %w", txerr)
|
||||
}
|
||||
|
||||
// For each event returned, add it to the set of return events. We
|
||||
// also will populate the retries, in case someone asks for this
|
||||
// event ID again.
|
||||
for _, pdu := range tx.PDUs {
|
||||
// Try to parse the event.
|
||||
ev, everr := gomatrixserverlib.NewEventFromUntrustedJSON(pdu, roomVersion)
|
||||
if everr != nil {
|
||||
return nil, fmt.Errorf("missingAuth gomatrixserverlib.NewEventFromUntrustedJSON: %w", everr)
|
||||
}
|
||||
|
||||
// Check the signatures of the event.
|
||||
if res, err := gomatrixserverlib.VerifyEventSignatures(ctx, []*gomatrixserverlib.Event{ev}, keyRing); err != nil {
|
||||
return nil, fmt.Errorf("missingAuth VerifyEventSignatures: %w", err)
|
||||
} else {
|
||||
for _, err := range res {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("missingAuth VerifyEventSignatures: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the event is OK then add it to the results and the retry map.
|
||||
returning = append(returning, ev)
|
||||
retries[ev.EventID()] = append(retries[ev.EventID()], ev)
|
||||
}
|
||||
}
|
||||
return returning, nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,105 +0,0 @@
|
|||
package perform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
// This file contains helpers for the PerformJoin function.
|
||||
|
||||
type joinContext struct {
|
||||
federation *gomatrixserverlib.FederationClient
|
||||
keyRing *gomatrixserverlib.KeyRing
|
||||
}
|
||||
|
||||
// Returns a new join context.
|
||||
func JoinContext(f *gomatrixserverlib.FederationClient, k *gomatrixserverlib.KeyRing) *joinContext {
|
||||
return &joinContext{
|
||||
federation: f,
|
||||
keyRing: k,
|
||||
}
|
||||
}
|
||||
|
||||
// checkSendJoinResponse checks that all of the signatures are correct
|
||||
// and that the join is allowed by the supplied state.
|
||||
func (r joinContext) CheckSendJoinResponse(
|
||||
ctx context.Context,
|
||||
event *gomatrixserverlib.Event,
|
||||
server gomatrixserverlib.ServerName,
|
||||
respMakeJoin gomatrixserverlib.RespMakeJoin,
|
||||
respSendJoin gomatrixserverlib.RespSendJoin,
|
||||
) (*gomatrixserverlib.RespState, error) {
|
||||
// A list of events that we have retried, if they were not included in
|
||||
// the auth events supplied in the send_join.
|
||||
retries := map[string][]*gomatrixserverlib.Event{}
|
||||
|
||||
// Define a function which we can pass to Check to retrieve missing
|
||||
// auth events inline. This greatly increases our chances of not having
|
||||
// to repeat the entire set of checks just for a missing event or two.
|
||||
missingAuth := func(roomVersion gomatrixserverlib.RoomVersion, eventIDs []string) ([]*gomatrixserverlib.Event, error) {
|
||||
returning := []*gomatrixserverlib.Event{}
|
||||
|
||||
// See if we have retry entries for each of the supplied event IDs.
|
||||
for _, eventID := range eventIDs {
|
||||
// If we've already satisfied a request for this event ID before then
|
||||
// just append the results. We won't retry the request.
|
||||
if retry, ok := retries[eventID]; ok {
|
||||
if retry == nil {
|
||||
return nil, fmt.Errorf("missingAuth: not retrying failed event ID %q", eventID)
|
||||
}
|
||||
returning = append(returning, retry...)
|
||||
continue
|
||||
}
|
||||
|
||||
// Make a note of the fact that we tried to do something with this
|
||||
// event ID, even if we don't succeed.
|
||||
retries[event.EventID()] = nil
|
||||
|
||||
// Try to retrieve the event from the server that sent us the send
|
||||
// join response.
|
||||
tx, txerr := r.federation.GetEvent(ctx, server, eventID)
|
||||
if txerr != nil {
|
||||
return nil, fmt.Errorf("missingAuth r.federation.GetEvent: %w", txerr)
|
||||
}
|
||||
|
||||
// For each event returned, add it to the set of return events. We
|
||||
// also will populate the retries, in case someone asks for this
|
||||
// event ID again.
|
||||
for _, pdu := range tx.PDUs {
|
||||
// Try to parse the event.
|
||||
ev, everr := gomatrixserverlib.NewEventFromUntrustedJSON(pdu, roomVersion)
|
||||
if everr != nil {
|
||||
return nil, fmt.Errorf("missingAuth gomatrixserverlib.NewEventFromUntrustedJSON: %w", everr)
|
||||
}
|
||||
|
||||
// Check the signatures of the event.
|
||||
if res, err := gomatrixserverlib.VerifyEventSignatures(ctx, []*gomatrixserverlib.Event{ev}, r.keyRing); err != nil {
|
||||
return nil, fmt.Errorf("missingAuth VerifyEventSignatures: %w", err)
|
||||
} else {
|
||||
for _, err := range res {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("missingAuth VerifyEventSignatures: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the event is OK then add it to the results and the retry map.
|
||||
returning = append(returning, ev)
|
||||
retries[event.EventID()] = append(retries[event.EventID()], ev)
|
||||
retries[ev.EventID()] = append(retries[ev.EventID()], ev)
|
||||
}
|
||||
}
|
||||
|
||||
return returning, nil
|
||||
}
|
||||
|
||||
// TODO: Can we expand Check here to return a list of missing auth
|
||||
// events rather than failing one at a time?
|
||||
rs, err := respSendJoin.Check(ctx, r.keyRing, event, missingAuth)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("respSendJoin: %w", err)
|
||||
}
|
||||
return rs, nil
|
||||
}
|
||||
|
|
@ -20,6 +20,7 @@ const (
|
|||
FederationSenderPerformJoinRequestPath = "/federationsender/performJoinRequest"
|
||||
FederationSenderPerformLeaveRequestPath = "/federationsender/performLeaveRequest"
|
||||
FederationSenderPerformInviteRequestPath = "/federationsender/performInviteRequest"
|
||||
FederationSenderPerformOutboundPeekRequestPath = "/federationsender/performOutboundPeekRequest"
|
||||
FederationSenderPerformServersAlivePath = "/federationsender/performServersAlive"
|
||||
FederationSenderPerformBroadcastEDUPath = "/federationsender/performBroadcastEDU"
|
||||
|
||||
|
|
@ -33,6 +34,7 @@ const (
|
|||
FederationSenderGetServerKeysPath = "/federationsender/client/getServerKeys"
|
||||
FederationSenderLookupServerKeysPath = "/federationsender/client/lookupServerKeys"
|
||||
FederationSenderEventRelationshipsPath = "/federationsender/client/msc2836eventRelationships"
|
||||
FederationSenderSpacesSummaryPath = "/federationsender/client/msc2946spacesSummary"
|
||||
)
|
||||
|
||||
// NewFederationSenderClient creates a FederationSenderInternalAPI implemented by talking to a HTTP POST API.
|
||||
|
|
@ -75,6 +77,19 @@ func (h *httpFederationSenderInternalAPI) PerformInvite(
|
|||
return httputil.PostJSON(ctx, span, h.httpClient, apiURL, request, response)
|
||||
}
|
||||
|
||||
// Handle starting a peek on a remote server.
|
||||
func (h *httpFederationSenderInternalAPI) PerformOutboundPeek(
|
||||
ctx context.Context,
|
||||
request *api.PerformOutboundPeekRequest,
|
||||
response *api.PerformOutboundPeekResponse,
|
||||
) error {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "PerformOutboundPeekRequest")
|
||||
defer span.Finish()
|
||||
|
||||
apiURL := h.federationSenderURL + FederationSenderPerformOutboundPeekRequestPath
|
||||
return httputil.PostJSON(ctx, span, h.httpClient, apiURL, request, response)
|
||||
}
|
||||
|
||||
func (h *httpFederationSenderInternalAPI) PerformServersAlive(
|
||||
ctx context.Context,
|
||||
request *api.PerformServersAliveRequest,
|
||||
|
|
@ -449,3 +464,34 @@ func (h *httpFederationSenderInternalAPI) MSC2836EventRelationships(
|
|||
}
|
||||
return response.Res, nil
|
||||
}
|
||||
|
||||
type spacesReq struct {
|
||||
S gomatrixserverlib.ServerName
|
||||
Req gomatrixserverlib.MSC2946SpacesRequest
|
||||
RoomID string
|
||||
Res gomatrixserverlib.MSC2946SpacesResponse
|
||||
Err *api.FederationClientError
|
||||
}
|
||||
|
||||
func (h *httpFederationSenderInternalAPI) MSC2946Spaces(
|
||||
ctx context.Context, dst gomatrixserverlib.ServerName, roomID string, r gomatrixserverlib.MSC2946SpacesRequest,
|
||||
) (res gomatrixserverlib.MSC2946SpacesResponse, err error) {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "MSC2946Spaces")
|
||||
defer span.Finish()
|
||||
|
||||
request := spacesReq{
|
||||
S: dst,
|
||||
Req: r,
|
||||
RoomID: roomID,
|
||||
}
|
||||
var response spacesReq
|
||||
apiURL := h.federationSenderURL + FederationSenderSpacesSummaryPath
|
||||
err = httputil.PostJSON(ctx, span, h.httpClient, apiURL, &request, &response)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
if response.Err != nil {
|
||||
return res, response.Err
|
||||
}
|
||||
return response.Res, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -329,4 +329,26 @@ func AddRoutes(intAPI api.FederationSenderInternalAPI, internalAPIMux *mux.Route
|
|||
return util.JSONResponse{Code: http.StatusOK, JSON: request}
|
||||
}),
|
||||
)
|
||||
internalAPIMux.Handle(
|
||||
FederationSenderSpacesSummaryPath,
|
||||
httputil.MakeInternalAPI("MSC2946SpacesSummary", func(req *http.Request) util.JSONResponse {
|
||||
var request spacesReq
|
||||
if err := json.NewDecoder(req.Body).Decode(&request); err != nil {
|
||||
return util.MessageResponse(http.StatusBadRequest, err.Error())
|
||||
}
|
||||
res, err := intAPI.MSC2946Spaces(req.Context(), request.S, request.RoomID, request.Req)
|
||||
if err != nil {
|
||||
ferr, ok := err.(*api.FederationClientError)
|
||||
if ok {
|
||||
request.Err = ferr
|
||||
} else {
|
||||
request.Err = &api.FederationClientError{
|
||||
Err: err.Error(),
|
||||
}
|
||||
}
|
||||
}
|
||||
request.Res = res
|
||||
return util.JSONResponse{Code: http.StatusOK, JSON: request}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/federationsender/storage"
|
||||
"github.com/matrix-org/dendrite/federationsender/storage/shared"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrix"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -45,7 +46,9 @@ const (
|
|||
// ensures that only one request is in flight to a given destination
|
||||
// at a time.
|
||||
type destinationQueue struct {
|
||||
queues *OutgoingQueues
|
||||
db storage.Database
|
||||
process *process.ProcessContext
|
||||
signing *SigningInfo
|
||||
rsAPI api.RoomserverInternalAPI
|
||||
client *gomatrixserverlib.FederationClient // federation client
|
||||
|
|
@ -244,6 +247,7 @@ func (oq *destinationQueue) backgroundSend() {
|
|||
}
|
||||
destinationQueueRunning.Inc()
|
||||
defer destinationQueueRunning.Dec()
|
||||
defer oq.queues.clearQueue(oq)
|
||||
defer oq.running.Store(false)
|
||||
|
||||
// Mark the queue as overflowed, so we will consult the database
|
||||
|
|
@ -411,7 +415,7 @@ func (oq *destinationQueue) nextTransaction(
|
|||
// TODO: we should check for 500-ish fails vs 400-ish here,
|
||||
// since we shouldn't queue things indefinitely in response
|
||||
// to a 400-ish error
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5)
|
||||
ctx, cancel := context.WithTimeout(oq.process.Context(), time.Minute*5)
|
||||
defer cancel()
|
||||
_, err := oq.client.SendTransaction(ctx, t)
|
||||
switch err.(type) {
|
||||
|
|
@ -442,7 +446,7 @@ func (oq *destinationQueue) nextTransaction(
|
|||
log.WithFields(log.Fields{
|
||||
"destination": oq.destination,
|
||||
log.ErrorKey: err,
|
||||
}).Infof("Failed to send transaction %q", t.TransactionID)
|
||||
}).Debugf("Failed to send transaction %q", t.TransactionID)
|
||||
return false, 0, 0, err
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/federationsender/storage"
|
||||
"github.com/matrix-org/dendrite/federationsender/storage/shared"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
|
@ -36,6 +37,7 @@ import (
|
|||
// matrix servers
|
||||
type OutgoingQueues struct {
|
||||
db storage.Database
|
||||
process *process.ProcessContext
|
||||
disabled bool
|
||||
rsAPI api.RoomserverInternalAPI
|
||||
origin gomatrixserverlib.ServerName
|
||||
|
|
@ -80,6 +82,7 @@ var destinationQueueBackingOff = prometheus.NewGauge(
|
|||
// NewOutgoingQueues makes a new OutgoingQueues
|
||||
func NewOutgoingQueues(
|
||||
db storage.Database,
|
||||
process *process.ProcessContext,
|
||||
disabled bool,
|
||||
origin gomatrixserverlib.ServerName,
|
||||
client *gomatrixserverlib.FederationClient,
|
||||
|
|
@ -89,6 +92,7 @@ func NewOutgoingQueues(
|
|||
) *OutgoingQueues {
|
||||
queues := &OutgoingQueues{
|
||||
disabled: disabled,
|
||||
process: process,
|
||||
db: db,
|
||||
rsAPI: rsAPI,
|
||||
origin: origin,
|
||||
|
|
@ -116,7 +120,7 @@ func NewOutgoingQueues(
|
|||
log.WithError(err).Error("Failed to get EDU server names for destination queue hydration")
|
||||
}
|
||||
for serverName := range serverNames {
|
||||
if queue := queues.getQueue(serverName); !queue.statistics.Blacklisted() {
|
||||
if queue := queues.getQueue(serverName); queue != nil {
|
||||
queue.wakeQueueIfNeeded()
|
||||
}
|
||||
}
|
||||
|
|
@ -144,13 +148,18 @@ type queuedEDU struct {
|
|||
}
|
||||
|
||||
func (oqs *OutgoingQueues) getQueue(destination gomatrixserverlib.ServerName) *destinationQueue {
|
||||
if oqs.statistics.ForServer(destination).Blacklisted() {
|
||||
return nil
|
||||
}
|
||||
oqs.queuesMutex.Lock()
|
||||
defer oqs.queuesMutex.Unlock()
|
||||
oq := oqs.queues[destination]
|
||||
if oq == nil {
|
||||
oq, ok := oqs.queues[destination]
|
||||
if !ok || oq != nil {
|
||||
destinationQueueTotal.Inc()
|
||||
oq = &destinationQueue{
|
||||
queues: oqs,
|
||||
db: oqs.db,
|
||||
process: oqs.process,
|
||||
rsAPI: oqs.rsAPI,
|
||||
origin: oqs.origin,
|
||||
destination: destination,
|
||||
|
|
@ -165,6 +174,14 @@ func (oqs *OutgoingQueues) getQueue(destination gomatrixserverlib.ServerName) *d
|
|||
return oq
|
||||
}
|
||||
|
||||
func (oqs *OutgoingQueues) clearQueue(oq *destinationQueue) {
|
||||
oqs.queuesMutex.Lock()
|
||||
defer oqs.queuesMutex.Unlock()
|
||||
|
||||
delete(oqs.queues, oq.destination)
|
||||
destinationQueueTotal.Dec()
|
||||
}
|
||||
|
||||
type ErrorFederationDisabled struct {
|
||||
Message string
|
||||
}
|
||||
|
|
@ -231,7 +248,9 @@ func (oqs *OutgoingQueues) SendEvent(
|
|||
}
|
||||
|
||||
for destination := range destmap {
|
||||
oqs.getQueue(destination).sendEvent(ev, nid)
|
||||
if queue := oqs.getQueue(destination); queue != nil {
|
||||
queue.sendEvent(ev, nid)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -301,7 +320,9 @@ func (oqs *OutgoingQueues) SendEDU(
|
|||
}
|
||||
|
||||
for destination := range destmap {
|
||||
oqs.getQueue(destination).sendEDU(e, nid)
|
||||
if queue := oqs.getQueue(destination); queue != nil {
|
||||
queue.sendEDU(e, nid)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -312,9 +333,7 @@ func (oqs *OutgoingQueues) RetryServer(srv gomatrixserverlib.ServerName) {
|
|||
if oqs.disabled {
|
||||
return
|
||||
}
|
||||
q := oqs.getQueue(srv)
|
||||
if q == nil {
|
||||
return
|
||||
if queue := oqs.getQueue(srv); queue != nil {
|
||||
queue.wakeQueueIfNeeded()
|
||||
}
|
||||
q.wakeQueueIfNeeded()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,7 +51,18 @@ type Database interface {
|
|||
GetPendingPDUServerNames(ctx context.Context) ([]gomatrixserverlib.ServerName, error)
|
||||
GetPendingEDUServerNames(ctx context.Context) ([]gomatrixserverlib.ServerName, error)
|
||||
|
||||
// these don't have contexts passed in as we want things to happen regardless of the request context
|
||||
AddServerToBlacklist(serverName gomatrixserverlib.ServerName) error
|
||||
RemoveServerFromBlacklist(serverName gomatrixserverlib.ServerName) error
|
||||
IsServerBlacklisted(serverName gomatrixserverlib.ServerName) (bool, error)
|
||||
|
||||
AddOutboundPeek(ctx context.Context, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64) error
|
||||
RenewOutboundPeek(ctx context.Context, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64) error
|
||||
GetOutboundPeek(ctx context.Context, serverName gomatrixserverlib.ServerName, roomID, peekID string) (*types.OutboundPeek, error)
|
||||
GetOutboundPeeks(ctx context.Context, roomID string) ([]types.OutboundPeek, error)
|
||||
|
||||
AddInboundPeek(ctx context.Context, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64) error
|
||||
RenewInboundPeek(ctx context.Context, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64) error
|
||||
GetInboundPeek(ctx context.Context, serverName gomatrixserverlib.ServerName, roomID, peekID string) (*types.InboundPeek, error)
|
||||
GetInboundPeeks(ctx context.Context, roomID string) ([]types.InboundPeek, error)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package deltas
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/pressly/goose"
|
||||
)
|
||||
|
||||
func LoadFromGoose() {
|
||||
goose.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
||||
}
|
||||
|
||||
func LoadRemoveRoomsTable(m *sqlutil.Migrations) {
|
||||
m.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
||||
}
|
||||
|
||||
func UpRemoveRoomsTable(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
DROP TABLE IF EXISTS federationsender_rooms;
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DownRemoveRoomsTable(tx *sql.Tx) error {
|
||||
// We can't reverse this.
|
||||
return nil
|
||||
}
|
||||
176
federationsender/storage/postgres/inbound_peeks_table.go
Normal file
176
federationsender/storage/postgres/inbound_peeks_table.go
Normal file
|
|
@ -0,0 +1,176 @@
|
|||
// Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationsender/types"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
const inboundPeeksSchema = `
|
||||
CREATE TABLE IF NOT EXISTS federationsender_inbound_peeks (
|
||||
room_id TEXT NOT NULL,
|
||||
server_name TEXT NOT NULL,
|
||||
peek_id TEXT NOT NULL,
|
||||
creation_ts BIGINT NOT NULL,
|
||||
renewed_ts BIGINT NOT NULL,
|
||||
renewal_interval BIGINT NOT NULL,
|
||||
UNIQUE (room_id, server_name, peek_id)
|
||||
);
|
||||
`
|
||||
|
||||
const insertInboundPeekSQL = "" +
|
||||
"INSERT INTO federationsender_inbound_peeks (room_id, server_name, peek_id, creation_ts, renewed_ts, renewal_interval) VALUES ($1, $2, $3, $4, $5, $6)"
|
||||
|
||||
const selectInboundPeekSQL = "" +
|
||||
"SELECT room_id, server_name, peek_id, creation_ts, renewed_ts, renewal_interval FROM federationsender_inbound_peeks WHERE room_id = $1 and server_name = $2 and peek_id = $3"
|
||||
|
||||
const selectInboundPeeksSQL = "" +
|
||||
"SELECT room_id, server_name, peek_id, creation_ts, renewed_ts, renewal_interval FROM federationsender_inbound_peeks WHERE room_id = $1"
|
||||
|
||||
const renewInboundPeekSQL = "" +
|
||||
"UPDATE federationsender_inbound_peeks SET renewed_ts=$1, renewal_interval=$2 WHERE room_id = $3 and server_name = $4 and peek_id = $5"
|
||||
|
||||
const deleteInboundPeekSQL = "" +
|
||||
"DELETE FROM federationsender_inbound_peeks WHERE room_id = $1 and server_name = $2"
|
||||
|
||||
const deleteInboundPeeksSQL = "" +
|
||||
"DELETE FROM federationsender_inbound_peeks WHERE room_id = $1"
|
||||
|
||||
type inboundPeeksStatements struct {
|
||||
db *sql.DB
|
||||
insertInboundPeekStmt *sql.Stmt
|
||||
selectInboundPeekStmt *sql.Stmt
|
||||
selectInboundPeeksStmt *sql.Stmt
|
||||
renewInboundPeekStmt *sql.Stmt
|
||||
deleteInboundPeekStmt *sql.Stmt
|
||||
deleteInboundPeeksStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresInboundPeeksTable(db *sql.DB) (s *inboundPeeksStatements, err error) {
|
||||
s = &inboundPeeksStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err = db.Exec(inboundPeeksSchema)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if s.insertInboundPeekStmt, err = db.Prepare(insertInboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectInboundPeekStmt, err = db.Prepare(selectInboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectInboundPeeksStmt, err = db.Prepare(selectInboundPeeksSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.renewInboundPeekStmt, err = db.Prepare(renewInboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.deleteInboundPeeksStmt, err = db.Prepare(deleteInboundPeeksSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.deleteInboundPeekStmt, err = db.Prepare(deleteInboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *inboundPeeksStatements) InsertInboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64,
|
||||
) (err error) {
|
||||
nowMilli := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
stmt := sqlutil.TxStmt(txn, s.insertInboundPeekStmt)
|
||||
_, err = stmt.ExecContext(ctx, roomID, serverName, peekID, nowMilli, nowMilli, renewalInterval)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *inboundPeeksStatements) RenewInboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64,
|
||||
) (err error) {
|
||||
nowMilli := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
_, err = sqlutil.TxStmt(txn, s.renewInboundPeekStmt).ExecContext(ctx, nowMilli, renewalInterval, roomID, serverName, peekID)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *inboundPeeksStatements) SelectInboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string,
|
||||
) (*types.InboundPeek, error) {
|
||||
row := sqlutil.TxStmt(txn, s.selectInboundPeeksStmt).QueryRowContext(ctx, roomID)
|
||||
inboundPeek := types.InboundPeek{}
|
||||
err := row.Scan(
|
||||
&inboundPeek.RoomID,
|
||||
&inboundPeek.ServerName,
|
||||
&inboundPeek.PeekID,
|
||||
&inboundPeek.CreationTimestamp,
|
||||
&inboundPeek.RenewedTimestamp,
|
||||
&inboundPeek.RenewalInterval,
|
||||
)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &inboundPeek, nil
|
||||
}
|
||||
|
||||
func (s *inboundPeeksStatements) SelectInboundPeeks(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) (inboundPeeks []types.InboundPeek, err error) {
|
||||
rows, err := sqlutil.TxStmt(txn, s.selectInboundPeeksStmt).QueryContext(ctx, roomID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "SelectInboundPeeks: rows.close() failed")
|
||||
|
||||
for rows.Next() {
|
||||
inboundPeek := types.InboundPeek{}
|
||||
if err = rows.Scan(
|
||||
&inboundPeek.RoomID,
|
||||
&inboundPeek.ServerName,
|
||||
&inboundPeek.PeekID,
|
||||
&inboundPeek.CreationTimestamp,
|
||||
&inboundPeek.RenewedTimestamp,
|
||||
&inboundPeek.RenewalInterval,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
inboundPeeks = append(inboundPeeks, inboundPeek)
|
||||
}
|
||||
|
||||
return inboundPeeks, rows.Err()
|
||||
}
|
||||
|
||||
func (s *inboundPeeksStatements) DeleteInboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string,
|
||||
) (err error) {
|
||||
_, err = sqlutil.TxStmt(txn, s.deleteInboundPeekStmt).ExecContext(ctx, roomID, serverName, peekID)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *inboundPeeksStatements) DeleteInboundPeeks(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) (err error) {
|
||||
_, err = sqlutil.TxStmt(txn, s.deleteInboundPeeksStmt).ExecContext(ctx, roomID)
|
||||
return
|
||||
}
|
||||
176
federationsender/storage/postgres/outbound_peeks_table.go
Normal file
176
federationsender/storage/postgres/outbound_peeks_table.go
Normal file
|
|
@ -0,0 +1,176 @@
|
|||
// Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationsender/types"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
const outboundPeeksSchema = `
|
||||
CREATE TABLE IF NOT EXISTS federationsender_outbound_peeks (
|
||||
room_id TEXT NOT NULL,
|
||||
server_name TEXT NOT NULL,
|
||||
peek_id TEXT NOT NULL,
|
||||
creation_ts BIGINT NOT NULL,
|
||||
renewed_ts BIGINT NOT NULL,
|
||||
renewal_interval BIGINT NOT NULL,
|
||||
UNIQUE (room_id, server_name, peek_id)
|
||||
);
|
||||
`
|
||||
|
||||
const insertOutboundPeekSQL = "" +
|
||||
"INSERT INTO federationsender_outbound_peeks (room_id, server_name, peek_id, creation_ts, renewed_ts, renewal_interval) VALUES ($1, $2, $3, $4, $5, $6)"
|
||||
|
||||
const selectOutboundPeekSQL = "" +
|
||||
"SELECT room_id, server_name, peek_id, creation_ts, renewed_ts, renewal_interval FROM federationsender_outbound_peeks WHERE room_id = $1 and server_name = $2 and peek_id = $3"
|
||||
|
||||
const selectOutboundPeeksSQL = "" +
|
||||
"SELECT room_id, server_name, peek_id, creation_ts, renewed_ts, renewal_interval FROM federationsender_outbound_peeks WHERE room_id = $1"
|
||||
|
||||
const renewOutboundPeekSQL = "" +
|
||||
"UPDATE federationsender_outbound_peeks SET renewed_ts=$1, renewal_interval=$2 WHERE room_id = $3 and server_name = $4 and peek_id = $5"
|
||||
|
||||
const deleteOutboundPeekSQL = "" +
|
||||
"DELETE FROM federationsender_outbound_peeks WHERE room_id = $1 and server_name = $2"
|
||||
|
||||
const deleteOutboundPeeksSQL = "" +
|
||||
"DELETE FROM federationsender_outbound_peeks WHERE room_id = $1"
|
||||
|
||||
type outboundPeeksStatements struct {
|
||||
db *sql.DB
|
||||
insertOutboundPeekStmt *sql.Stmt
|
||||
selectOutboundPeekStmt *sql.Stmt
|
||||
selectOutboundPeeksStmt *sql.Stmt
|
||||
renewOutboundPeekStmt *sql.Stmt
|
||||
deleteOutboundPeekStmt *sql.Stmt
|
||||
deleteOutboundPeeksStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresOutboundPeeksTable(db *sql.DB) (s *outboundPeeksStatements, err error) {
|
||||
s = &outboundPeeksStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err = db.Exec(outboundPeeksSchema)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if s.insertOutboundPeekStmt, err = db.Prepare(insertOutboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectOutboundPeekStmt, err = db.Prepare(selectOutboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectOutboundPeeksStmt, err = db.Prepare(selectOutboundPeeksSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.renewOutboundPeekStmt, err = db.Prepare(renewOutboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.deleteOutboundPeeksStmt, err = db.Prepare(deleteOutboundPeeksSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.deleteOutboundPeekStmt, err = db.Prepare(deleteOutboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *outboundPeeksStatements) InsertOutboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64,
|
||||
) (err error) {
|
||||
nowMilli := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
stmt := sqlutil.TxStmt(txn, s.insertOutboundPeekStmt)
|
||||
_, err = stmt.ExecContext(ctx, roomID, serverName, peekID, nowMilli, nowMilli, renewalInterval)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *outboundPeeksStatements) RenewOutboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64,
|
||||
) (err error) {
|
||||
nowMilli := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
_, err = sqlutil.TxStmt(txn, s.renewOutboundPeekStmt).ExecContext(ctx, nowMilli, renewalInterval, roomID, serverName, peekID)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *outboundPeeksStatements) SelectOutboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string,
|
||||
) (*types.OutboundPeek, error) {
|
||||
row := sqlutil.TxStmt(txn, s.selectOutboundPeeksStmt).QueryRowContext(ctx, roomID)
|
||||
outboundPeek := types.OutboundPeek{}
|
||||
err := row.Scan(
|
||||
&outboundPeek.RoomID,
|
||||
&outboundPeek.ServerName,
|
||||
&outboundPeek.PeekID,
|
||||
&outboundPeek.CreationTimestamp,
|
||||
&outboundPeek.RenewedTimestamp,
|
||||
&outboundPeek.RenewalInterval,
|
||||
)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &outboundPeek, nil
|
||||
}
|
||||
|
||||
func (s *outboundPeeksStatements) SelectOutboundPeeks(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) (outboundPeeks []types.OutboundPeek, err error) {
|
||||
rows, err := sqlutil.TxStmt(txn, s.selectOutboundPeeksStmt).QueryContext(ctx, roomID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "SelectOutboundPeeks: rows.close() failed")
|
||||
|
||||
for rows.Next() {
|
||||
outboundPeek := types.OutboundPeek{}
|
||||
if err = rows.Scan(
|
||||
&outboundPeek.RoomID,
|
||||
&outboundPeek.ServerName,
|
||||
&outboundPeek.PeekID,
|
||||
&outboundPeek.CreationTimestamp,
|
||||
&outboundPeek.RenewedTimestamp,
|
||||
&outboundPeek.RenewalInterval,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
outboundPeeks = append(outboundPeeks, outboundPeek)
|
||||
}
|
||||
|
||||
return outboundPeeks, rows.Err()
|
||||
}
|
||||
|
||||
func (s *outboundPeeksStatements) DeleteOutboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string,
|
||||
) (err error) {
|
||||
_, err = sqlutil.TxStmt(txn, s.deleteOutboundPeekStmt).ExecContext(ctx, roomID, serverName, peekID)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *outboundPeeksStatements) DeleteOutboundPeeks(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) (err error) {
|
||||
_, err = sqlutil.TxStmt(txn, s.deleteOutboundPeeksStmt).ExecContext(ctx, roomID)
|
||||
return
|
||||
}
|
||||
|
|
@ -1,104 +0,0 @@
|
|||
// Copyright 2017-2018 New Vector Ltd
|
||||
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
)
|
||||
|
||||
const roomSchema = `
|
||||
CREATE TABLE IF NOT EXISTS federationsender_rooms (
|
||||
-- The string ID of the room
|
||||
room_id TEXT PRIMARY KEY,
|
||||
-- The most recent event state by the room server.
|
||||
-- We can use this to tell if our view of the room state has become
|
||||
-- desynchronised.
|
||||
last_event_id TEXT NOT NULL
|
||||
);`
|
||||
|
||||
const insertRoomSQL = "" +
|
||||
"INSERT INTO federationsender_rooms (room_id, last_event_id) VALUES ($1, '')" +
|
||||
" ON CONFLICT DO NOTHING"
|
||||
|
||||
const selectRoomForUpdateSQL = "" +
|
||||
"SELECT last_event_id FROM federationsender_rooms WHERE room_id = $1 FOR UPDATE"
|
||||
|
||||
const updateRoomSQL = "" +
|
||||
"UPDATE federationsender_rooms SET last_event_id = $2 WHERE room_id = $1"
|
||||
|
||||
type roomStatements struct {
|
||||
db *sql.DB
|
||||
insertRoomStmt *sql.Stmt
|
||||
selectRoomForUpdateStmt *sql.Stmt
|
||||
updateRoomStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresRoomsTable(db *sql.DB) (s *roomStatements, err error) {
|
||||
s = &roomStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err = s.db.Exec(roomSchema)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if s.insertRoomStmt, err = s.db.Prepare(insertRoomSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectRoomForUpdateStmt, err = s.db.Prepare(selectRoomForUpdateSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.updateRoomStmt, err = s.db.Prepare(updateRoomSQL); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// insertRoom inserts the room if it didn't already exist.
|
||||
// If the room didn't exist then last_event_id is set to the empty string.
|
||||
func (s *roomStatements) InsertRoom(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) error {
|
||||
_, err := sqlutil.TxStmt(txn, s.insertRoomStmt).ExecContext(ctx, roomID)
|
||||
return err
|
||||
}
|
||||
|
||||
// selectRoomForUpdate locks the row for the room and returns the last_event_id.
|
||||
// The row must already exist in the table. Callers can ensure that the row
|
||||
// exists by calling insertRoom first.
|
||||
func (s *roomStatements) SelectRoomForUpdate(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) (string, error) {
|
||||
var lastEventID string
|
||||
stmt := sqlutil.TxStmt(txn, s.selectRoomForUpdateStmt)
|
||||
err := stmt.QueryRowContext(ctx, roomID).Scan(&lastEventID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return lastEventID, nil
|
||||
}
|
||||
|
||||
// updateRoom updates the last_event_id for the room. selectRoomForUpdate should
|
||||
// have already been called earlier within the transaction.
|
||||
func (s *roomStatements) UpdateRoom(
|
||||
ctx context.Context, txn *sql.Tx, roomID, lastEventID string,
|
||||
) error {
|
||||
stmt := sqlutil.TxStmt(txn, s.updateRoomStmt)
|
||||
_, err := stmt.ExecContext(ctx, roomID, lastEventID)
|
||||
return err
|
||||
}
|
||||
|
|
@ -18,6 +18,7 @@ package postgres
|
|||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationsender/storage/postgres/deltas"
|
||||
"github.com/matrix-org/dendrite/federationsender/storage/shared"
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
|
|
@ -56,24 +57,34 @@ func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationS
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rooms, err := NewPostgresRoomsTable(d.db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blacklist, err := NewPostgresBlacklistTable(d.db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inboundPeeks, err := NewPostgresInboundPeeksTable(d.db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outboundPeeks, err := NewPostgresOutboundPeeksTable(d.db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := sqlutil.NewMigrations()
|
||||
deltas.LoadRemoveRoomsTable(m)
|
||||
if err = m.RunDeltas(d.db, dbProperties); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.Database = shared.Database{
|
||||
DB: d.db,
|
||||
Cache: cache,
|
||||
Writer: d.writer,
|
||||
FederationSenderJoinedHosts: joinedHosts,
|
||||
FederationSenderQueuePDUs: queuePDUs,
|
||||
FederationSenderQueueEDUs: queueEDUs,
|
||||
FederationSenderQueueJSON: queueJSON,
|
||||
FederationSenderRooms: rooms,
|
||||
FederationSenderBlacklist: blacklist,
|
||||
DB: d.db,
|
||||
Cache: cache,
|
||||
Writer: d.writer,
|
||||
FederationSenderJoinedHosts: joinedHosts,
|
||||
FederationSenderQueuePDUs: queuePDUs,
|
||||
FederationSenderQueueEDUs: queueEDUs,
|
||||
FederationSenderQueueJSON: queueJSON,
|
||||
FederationSenderBlacklist: blacklist,
|
||||
FederationSenderInboundPeeks: inboundPeeks,
|
||||
FederationSenderOutboundPeeks: outboundPeeks,
|
||||
}
|
||||
if err = d.PartitionOffsetStatements.Prepare(d.db, d.writer, "federationsender"); err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -27,15 +27,16 @@ import (
|
|||
)
|
||||
|
||||
type Database struct {
|
||||
DB *sql.DB
|
||||
Cache caching.FederationSenderCache
|
||||
Writer sqlutil.Writer
|
||||
FederationSenderQueuePDUs tables.FederationSenderQueuePDUs
|
||||
FederationSenderQueueEDUs tables.FederationSenderQueueEDUs
|
||||
FederationSenderQueueJSON tables.FederationSenderQueueJSON
|
||||
FederationSenderJoinedHosts tables.FederationSenderJoinedHosts
|
||||
FederationSenderRooms tables.FederationSenderRooms
|
||||
FederationSenderBlacklist tables.FederationSenderBlacklist
|
||||
DB *sql.DB
|
||||
Cache caching.FederationSenderCache
|
||||
Writer sqlutil.Writer
|
||||
FederationSenderQueuePDUs tables.FederationSenderQueuePDUs
|
||||
FederationSenderQueueEDUs tables.FederationSenderQueueEDUs
|
||||
FederationSenderQueueJSON tables.FederationSenderQueueJSON
|
||||
FederationSenderJoinedHosts tables.FederationSenderJoinedHosts
|
||||
FederationSenderBlacklist tables.FederationSenderBlacklist
|
||||
FederationSenderOutboundPeeks tables.FederationSenderOutboundPeeks
|
||||
FederationSenderInboundPeeks tables.FederationSenderInboundPeeks
|
||||
}
|
||||
|
||||
// An Receipt contains the NIDs of a call to GetNextTransactionPDUs/EDUs.
|
||||
|
|
@ -62,29 +63,6 @@ func (d *Database) UpdateRoom(
|
|||
removeHosts []string,
|
||||
) (joinedHosts []types.JoinedHost, err error) {
|
||||
err = d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||
err = d.FederationSenderRooms.InsertRoom(ctx, txn, roomID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastSentEventID, err := d.FederationSenderRooms.SelectRoomForUpdate(ctx, txn, roomID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if lastSentEventID == newEventID {
|
||||
// We've handled this message before, so let's just ignore it.
|
||||
// We can only get a duplicate for the last message we processed,
|
||||
// so its enough just to compare the newEventID with lastSentEventID
|
||||
return nil
|
||||
}
|
||||
|
||||
if lastSentEventID != "" && lastSentEventID != oldEventID {
|
||||
return types.EventIDMismatchError{
|
||||
DatabaseID: lastSentEventID, RoomServerID: oldEventID,
|
||||
}
|
||||
}
|
||||
|
||||
joinedHosts, err = d.FederationSenderJoinedHosts.SelectJoinedHostsWithTx(ctx, txn, roomID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -99,7 +77,7 @@ func (d *Database) UpdateRoom(
|
|||
if err = d.FederationSenderJoinedHosts.DeleteJoinedHosts(ctx, txn, removeHosts); err != nil {
|
||||
return err
|
||||
}
|
||||
return d.FederationSenderRooms.UpdateRoom(ctx, txn, roomID, newEventID)
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
|
@ -173,3 +151,43 @@ func (d *Database) RemoveServerFromBlacklist(serverName gomatrixserverlib.Server
|
|||
func (d *Database) IsServerBlacklisted(serverName gomatrixserverlib.ServerName) (bool, error) {
|
||||
return d.FederationSenderBlacklist.SelectBlacklist(context.TODO(), nil, serverName)
|
||||
}
|
||||
|
||||
func (d *Database) AddOutboundPeek(ctx context.Context, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64) error {
|
||||
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||
return d.FederationSenderOutboundPeeks.InsertOutboundPeek(ctx, txn, serverName, roomID, peekID, renewalInterval)
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Database) RenewOutboundPeek(ctx context.Context, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64) error {
|
||||
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||
return d.FederationSenderOutboundPeeks.RenewOutboundPeek(ctx, txn, serverName, roomID, peekID, renewalInterval)
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Database) GetOutboundPeek(ctx context.Context, serverName gomatrixserverlib.ServerName, roomID, peekID string) (*types.OutboundPeek, error) {
|
||||
return d.FederationSenderOutboundPeeks.SelectOutboundPeek(ctx, nil, serverName, roomID, peekID)
|
||||
}
|
||||
|
||||
func (d *Database) GetOutboundPeeks(ctx context.Context, roomID string) ([]types.OutboundPeek, error) {
|
||||
return d.FederationSenderOutboundPeeks.SelectOutboundPeeks(ctx, nil, roomID)
|
||||
}
|
||||
|
||||
func (d *Database) AddInboundPeek(ctx context.Context, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64) error {
|
||||
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||
return d.FederationSenderInboundPeeks.InsertInboundPeek(ctx, txn, serverName, roomID, peekID, renewalInterval)
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Database) RenewInboundPeek(ctx context.Context, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64) error {
|
||||
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||
return d.FederationSenderInboundPeeks.RenewInboundPeek(ctx, txn, serverName, roomID, peekID, renewalInterval)
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Database) GetInboundPeek(ctx context.Context, serverName gomatrixserverlib.ServerName, roomID, peekID string) (*types.InboundPeek, error) {
|
||||
return d.FederationSenderInboundPeeks.SelectInboundPeek(ctx, nil, serverName, roomID, peekID)
|
||||
}
|
||||
|
||||
func (d *Database) GetInboundPeeks(ctx context.Context, roomID string) ([]types.InboundPeek, error) {
|
||||
return d.FederationSenderInboundPeeks.SelectInboundPeeks(ctx, nil, roomID)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package deltas
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/pressly/goose"
|
||||
)
|
||||
|
||||
func LoadFromGoose() {
|
||||
goose.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
||||
}
|
||||
|
||||
func LoadRemoveRoomsTable(m *sqlutil.Migrations) {
|
||||
m.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
|
||||
}
|
||||
|
||||
func UpRemoveRoomsTable(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
DROP TABLE IF EXISTS federationsender_rooms;
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DownRemoveRoomsTable(tx *sql.Tx) error {
|
||||
// We can't reverse this.
|
||||
return nil
|
||||
}
|
||||
176
federationsender/storage/sqlite3/inbound_peeks_table.go
Normal file
176
federationsender/storage/sqlite3/inbound_peeks_table.go
Normal file
|
|
@ -0,0 +1,176 @@
|
|||
// Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sqlite3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationsender/types"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
const inboundPeeksSchema = `
|
||||
CREATE TABLE IF NOT EXISTS federationsender_inbound_peeks (
|
||||
room_id TEXT NOT NULL,
|
||||
server_name TEXT NOT NULL,
|
||||
peek_id TEXT NOT NULL,
|
||||
creation_ts INTEGER NOT NULL,
|
||||
renewed_ts INTEGER NOT NULL,
|
||||
renewal_interval INTEGER NOT NULL,
|
||||
UNIQUE (room_id, server_name, peek_id)
|
||||
);
|
||||
`
|
||||
|
||||
const insertInboundPeekSQL = "" +
|
||||
"INSERT INTO federationsender_inbound_peeks (room_id, server_name, peek_id, creation_ts, renewed_ts, renewal_interval) VALUES ($1, $2, $3, $4, $5, $6)"
|
||||
|
||||
const selectInboundPeekSQL = "" +
|
||||
"SELECT room_id, server_name, peek_id, creation_ts, renewed_ts, renewal_interval FROM federationsender_inbound_peeks WHERE room_id = $1 and server_name = $2 and peek_id = $3"
|
||||
|
||||
const selectInboundPeeksSQL = "" +
|
||||
"SELECT room_id, server_name, peek_id, creation_ts, renewed_ts, renewal_interval FROM federationsender_inbound_peeks WHERE room_id = $1"
|
||||
|
||||
const renewInboundPeekSQL = "" +
|
||||
"UPDATE federationsender_inbound_peeks SET renewed_ts=$1, renewal_interval=$2 WHERE room_id = $3 and server_name = $4 and peek_id = $5"
|
||||
|
||||
const deleteInboundPeekSQL = "" +
|
||||
"DELETE FROM federationsender_inbound_peeks WHERE room_id = $1 and server_name = $2"
|
||||
|
||||
const deleteInboundPeeksSQL = "" +
|
||||
"DELETE FROM federationsender_inbound_peeks WHERE room_id = $1"
|
||||
|
||||
type inboundPeeksStatements struct {
|
||||
db *sql.DB
|
||||
insertInboundPeekStmt *sql.Stmt
|
||||
selectInboundPeekStmt *sql.Stmt
|
||||
selectInboundPeeksStmt *sql.Stmt
|
||||
renewInboundPeekStmt *sql.Stmt
|
||||
deleteInboundPeekStmt *sql.Stmt
|
||||
deleteInboundPeeksStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSQLiteInboundPeeksTable(db *sql.DB) (s *inboundPeeksStatements, err error) {
|
||||
s = &inboundPeeksStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err = db.Exec(inboundPeeksSchema)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if s.insertInboundPeekStmt, err = db.Prepare(insertInboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectInboundPeekStmt, err = db.Prepare(selectInboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectInboundPeeksStmt, err = db.Prepare(selectInboundPeeksSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.renewInboundPeekStmt, err = db.Prepare(renewInboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.deleteInboundPeeksStmt, err = db.Prepare(deleteInboundPeeksSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.deleteInboundPeekStmt, err = db.Prepare(deleteInboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *inboundPeeksStatements) InsertInboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64,
|
||||
) (err error) {
|
||||
nowMilli := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
stmt := sqlutil.TxStmt(txn, s.insertInboundPeekStmt)
|
||||
_, err = stmt.ExecContext(ctx, roomID, serverName, peekID, nowMilli, nowMilli, renewalInterval)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *inboundPeeksStatements) RenewInboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64,
|
||||
) (err error) {
|
||||
nowMilli := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
_, err = sqlutil.TxStmt(txn, s.renewInboundPeekStmt).ExecContext(ctx, nowMilli, renewalInterval, roomID, serverName, peekID)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *inboundPeeksStatements) SelectInboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string,
|
||||
) (*types.InboundPeek, error) {
|
||||
row := sqlutil.TxStmt(txn, s.selectInboundPeeksStmt).QueryRowContext(ctx, roomID)
|
||||
inboundPeek := types.InboundPeek{}
|
||||
err := row.Scan(
|
||||
&inboundPeek.RoomID,
|
||||
&inboundPeek.ServerName,
|
||||
&inboundPeek.PeekID,
|
||||
&inboundPeek.CreationTimestamp,
|
||||
&inboundPeek.RenewedTimestamp,
|
||||
&inboundPeek.RenewalInterval,
|
||||
)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &inboundPeek, nil
|
||||
}
|
||||
|
||||
func (s *inboundPeeksStatements) SelectInboundPeeks(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) (inboundPeeks []types.InboundPeek, err error) {
|
||||
rows, err := sqlutil.TxStmt(txn, s.selectInboundPeeksStmt).QueryContext(ctx, roomID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "SelectInboundPeeks: rows.close() failed")
|
||||
|
||||
for rows.Next() {
|
||||
inboundPeek := types.InboundPeek{}
|
||||
if err = rows.Scan(
|
||||
&inboundPeek.RoomID,
|
||||
&inboundPeek.ServerName,
|
||||
&inboundPeek.PeekID,
|
||||
&inboundPeek.CreationTimestamp,
|
||||
&inboundPeek.RenewedTimestamp,
|
||||
&inboundPeek.RenewalInterval,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
inboundPeeks = append(inboundPeeks, inboundPeek)
|
||||
}
|
||||
|
||||
return inboundPeeks, rows.Err()
|
||||
}
|
||||
|
||||
func (s *inboundPeeksStatements) DeleteInboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string,
|
||||
) (err error) {
|
||||
_, err = sqlutil.TxStmt(txn, s.deleteInboundPeekStmt).ExecContext(ctx, roomID, serverName, peekID)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *inboundPeeksStatements) DeleteInboundPeeks(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) (err error) {
|
||||
_, err = sqlutil.TxStmt(txn, s.deleteInboundPeeksStmt).ExecContext(ctx, roomID)
|
||||
return
|
||||
}
|
||||
176
federationsender/storage/sqlite3/outbound_peeks_table.go
Normal file
176
federationsender/storage/sqlite3/outbound_peeks_table.go
Normal file
|
|
@ -0,0 +1,176 @@
|
|||
// Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sqlite3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationsender/types"
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
const outboundPeeksSchema = `
|
||||
CREATE TABLE IF NOT EXISTS federationsender_outbound_peeks (
|
||||
room_id TEXT NOT NULL,
|
||||
server_name TEXT NOT NULL,
|
||||
peek_id TEXT NOT NULL,
|
||||
creation_ts INTEGER NOT NULL,
|
||||
renewed_ts INTEGER NOT NULL,
|
||||
renewal_interval INTEGER NOT NULL,
|
||||
UNIQUE (room_id, server_name, peek_id)
|
||||
);
|
||||
`
|
||||
|
||||
const insertOutboundPeekSQL = "" +
|
||||
"INSERT INTO federationsender_outbound_peeks (room_id, server_name, peek_id, creation_ts, renewed_ts, renewal_interval) VALUES ($1, $2, $3, $4, $5, $6)"
|
||||
|
||||
const selectOutboundPeekSQL = "" +
|
||||
"SELECT room_id, server_name, peek_id, creation_ts, renewed_ts, renewal_interval FROM federationsender_outbound_peeks WHERE room_id = $1 and server_name = $2 and peek_id = $3"
|
||||
|
||||
const selectOutboundPeeksSQL = "" +
|
||||
"SELECT room_id, server_name, peek_id, creation_ts, renewed_ts, renewal_interval FROM federationsender_outbound_peeks WHERE room_id = $1"
|
||||
|
||||
const renewOutboundPeekSQL = "" +
|
||||
"UPDATE federationsender_outbound_peeks SET renewed_ts=$1, renewal_interval=$2 WHERE room_id = $3 and server_name = $4 and peek_id = $5"
|
||||
|
||||
const deleteOutboundPeekSQL = "" +
|
||||
"DELETE FROM federationsender_outbound_peeks WHERE room_id = $1 and server_name = $2"
|
||||
|
||||
const deleteOutboundPeeksSQL = "" +
|
||||
"DELETE FROM federationsender_outbound_peeks WHERE room_id = $1"
|
||||
|
||||
type outboundPeeksStatements struct {
|
||||
db *sql.DB
|
||||
insertOutboundPeekStmt *sql.Stmt
|
||||
selectOutboundPeekStmt *sql.Stmt
|
||||
selectOutboundPeeksStmt *sql.Stmt
|
||||
renewOutboundPeekStmt *sql.Stmt
|
||||
deleteOutboundPeekStmt *sql.Stmt
|
||||
deleteOutboundPeeksStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSQLiteOutboundPeeksTable(db *sql.DB) (s *outboundPeeksStatements, err error) {
|
||||
s = &outboundPeeksStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err = db.Exec(outboundPeeksSchema)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if s.insertOutboundPeekStmt, err = db.Prepare(insertOutboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectOutboundPeekStmt, err = db.Prepare(selectOutboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectOutboundPeeksStmt, err = db.Prepare(selectOutboundPeeksSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.renewOutboundPeekStmt, err = db.Prepare(renewOutboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.deleteOutboundPeeksStmt, err = db.Prepare(deleteOutboundPeeksSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.deleteOutboundPeekStmt, err = db.Prepare(deleteOutboundPeekSQL); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *outboundPeeksStatements) InsertOutboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64,
|
||||
) (err error) {
|
||||
nowMilli := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
stmt := sqlutil.TxStmt(txn, s.insertOutboundPeekStmt)
|
||||
_, err = stmt.ExecContext(ctx, roomID, serverName, peekID, nowMilli, nowMilli, renewalInterval)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *outboundPeeksStatements) RenewOutboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64,
|
||||
) (err error) {
|
||||
nowMilli := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
_, err = sqlutil.TxStmt(txn, s.renewOutboundPeekStmt).ExecContext(ctx, nowMilli, renewalInterval, roomID, serverName, peekID)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *outboundPeeksStatements) SelectOutboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string,
|
||||
) (*types.OutboundPeek, error) {
|
||||
row := sqlutil.TxStmt(txn, s.selectOutboundPeeksStmt).QueryRowContext(ctx, roomID)
|
||||
outboundPeek := types.OutboundPeek{}
|
||||
err := row.Scan(
|
||||
&outboundPeek.RoomID,
|
||||
&outboundPeek.ServerName,
|
||||
&outboundPeek.PeekID,
|
||||
&outboundPeek.CreationTimestamp,
|
||||
&outboundPeek.RenewedTimestamp,
|
||||
&outboundPeek.RenewalInterval,
|
||||
)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &outboundPeek, nil
|
||||
}
|
||||
|
||||
func (s *outboundPeeksStatements) SelectOutboundPeeks(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) (outboundPeeks []types.OutboundPeek, err error) {
|
||||
rows, err := sqlutil.TxStmt(txn, s.selectOutboundPeeksStmt).QueryContext(ctx, roomID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "SelectOutboundPeeks: rows.close() failed")
|
||||
|
||||
for rows.Next() {
|
||||
outboundPeek := types.OutboundPeek{}
|
||||
if err = rows.Scan(
|
||||
&outboundPeek.RoomID,
|
||||
&outboundPeek.ServerName,
|
||||
&outboundPeek.PeekID,
|
||||
&outboundPeek.CreationTimestamp,
|
||||
&outboundPeek.RenewedTimestamp,
|
||||
&outboundPeek.RenewalInterval,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
outboundPeeks = append(outboundPeeks, outboundPeek)
|
||||
}
|
||||
|
||||
return outboundPeeks, rows.Err()
|
||||
}
|
||||
|
||||
func (s *outboundPeeksStatements) DeleteOutboundPeek(
|
||||
ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string,
|
||||
) (err error) {
|
||||
_, err = sqlutil.TxStmt(txn, s.deleteOutboundPeekStmt).ExecContext(ctx, roomID, serverName, peekID)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *outboundPeeksStatements) DeleteOutboundPeeks(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) (err error) {
|
||||
_, err = sqlutil.TxStmt(txn, s.deleteOutboundPeeksStmt).ExecContext(ctx, roomID)
|
||||
return
|
||||
}
|
||||
|
|
@ -1,105 +0,0 @@
|
|||
// Copyright 2017-2018 New Vector Ltd
|
||||
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sqlite3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
)
|
||||
|
||||
const roomSchema = `
|
||||
CREATE TABLE IF NOT EXISTS federationsender_rooms (
|
||||
-- The string ID of the room
|
||||
room_id TEXT PRIMARY KEY,
|
||||
-- The most recent event state by the room server.
|
||||
-- We can use this to tell if our view of the room state has become
|
||||
-- desynchronised.
|
||||
last_event_id TEXT NOT NULL
|
||||
);`
|
||||
|
||||
const insertRoomSQL = "" +
|
||||
"INSERT INTO federationsender_rooms (room_id, last_event_id) VALUES ($1, '')" +
|
||||
" ON CONFLICT DO NOTHING"
|
||||
|
||||
const selectRoomForUpdateSQL = "" +
|
||||
"SELECT last_event_id FROM federationsender_rooms WHERE room_id = $1"
|
||||
|
||||
const updateRoomSQL = "" +
|
||||
"UPDATE federationsender_rooms SET last_event_id = $2 WHERE room_id = $1"
|
||||
|
||||
type roomStatements struct {
|
||||
db *sql.DB
|
||||
insertRoomStmt *sql.Stmt
|
||||
selectRoomForUpdateStmt *sql.Stmt
|
||||
updateRoomStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSQLiteRoomsTable(db *sql.DB) (s *roomStatements, err error) {
|
||||
s = &roomStatements{
|
||||
db: db,
|
||||
}
|
||||
_, err = db.Exec(roomSchema)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if s.insertRoomStmt, err = db.Prepare(insertRoomSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectRoomForUpdateStmt, err = db.Prepare(selectRoomForUpdateSQL); err != nil {
|
||||
return
|
||||
}
|
||||
if s.updateRoomStmt, err = db.Prepare(updateRoomSQL); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// insertRoom inserts the room if it didn't already exist.
|
||||
// If the room didn't exist then last_event_id is set to the empty string.
|
||||
func (s *roomStatements) InsertRoom(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) error {
|
||||
_, err := sqlutil.TxStmt(txn, s.insertRoomStmt).ExecContext(ctx, roomID)
|
||||
return err
|
||||
}
|
||||
|
||||
// selectRoomForUpdate locks the row for the room and returns the last_event_id.
|
||||
// The row must already exist in the table. Callers can ensure that the row
|
||||
// exists by calling insertRoom first.
|
||||
func (s *roomStatements) SelectRoomForUpdate(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) (string, error) {
|
||||
var lastEventID string
|
||||
stmt := sqlutil.TxStmt(txn, s.selectRoomForUpdateStmt)
|
||||
err := stmt.QueryRowContext(ctx, roomID).Scan(&lastEventID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return lastEventID, nil
|
||||
}
|
||||
|
||||
// updateRoom updates the last_event_id for the room. selectRoomForUpdate should
|
||||
// have already been called earlier within the transaction.
|
||||
func (s *roomStatements) UpdateRoom(
|
||||
ctx context.Context, txn *sql.Tx, roomID, lastEventID string,
|
||||
) error {
|
||||
stmt := sqlutil.TxStmt(txn, s.updateRoomStmt)
|
||||
_, err := stmt.ExecContext(ctx, roomID, lastEventID)
|
||||
return err
|
||||
}
|
||||
|
|
@ -21,6 +21,7 @@ import (
|
|||
_ "github.com/mattn/go-sqlite3"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationsender/storage/shared"
|
||||
"github.com/matrix-org/dendrite/federationsender/storage/sqlite3/deltas"
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
|
|
@ -46,10 +47,6 @@ func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationS
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rooms, err := NewSQLiteRoomsTable(d.db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
queuePDUs, err := NewSQLiteQueuePDUsTable(d.db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -66,16 +63,30 @@ func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationS
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outboundPeeks, err := NewSQLiteOutboundPeeksTable(d.db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inboundPeeks, err := NewSQLiteInboundPeeksTable(d.db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := sqlutil.NewMigrations()
|
||||
deltas.LoadRemoveRoomsTable(m)
|
||||
if err = m.RunDeltas(d.db, dbProperties); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.Database = shared.Database{
|
||||
DB: d.db,
|
||||
Cache: cache,
|
||||
Writer: d.writer,
|
||||
FederationSenderJoinedHosts: joinedHosts,
|
||||
FederationSenderQueuePDUs: queuePDUs,
|
||||
FederationSenderQueueEDUs: queueEDUs,
|
||||
FederationSenderQueueJSON: queueJSON,
|
||||
FederationSenderRooms: rooms,
|
||||
FederationSenderBlacklist: blacklist,
|
||||
DB: d.db,
|
||||
Cache: cache,
|
||||
Writer: d.writer,
|
||||
FederationSenderJoinedHosts: joinedHosts,
|
||||
FederationSenderQueuePDUs: queuePDUs,
|
||||
FederationSenderQueueEDUs: queueEDUs,
|
||||
FederationSenderQueueJSON: queueJSON,
|
||||
FederationSenderBlacklist: blacklist,
|
||||
FederationSenderOutboundPeeks: outboundPeeks,
|
||||
FederationSenderInboundPeeks: inboundPeeks,
|
||||
}
|
||||
if err = d.PartitionOffsetStatements.Prepare(d.db, d.writer, "federationsender"); err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -56,14 +56,26 @@ type FederationSenderJoinedHosts interface {
|
|||
SelectJoinedHostsForRooms(ctx context.Context, roomIDs []string) ([]gomatrixserverlib.ServerName, error)
|
||||
}
|
||||
|
||||
type FederationSenderRooms interface {
|
||||
InsertRoom(ctx context.Context, txn *sql.Tx, roomID string) error
|
||||
SelectRoomForUpdate(ctx context.Context, txn *sql.Tx, roomID string) (string, error)
|
||||
UpdateRoom(ctx context.Context, txn *sql.Tx, roomID, lastEventID string) error
|
||||
}
|
||||
|
||||
type FederationSenderBlacklist interface {
|
||||
InsertBlacklist(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName) error
|
||||
SelectBlacklist(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName) (bool, error)
|
||||
DeleteBlacklist(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName) error
|
||||
}
|
||||
|
||||
type FederationSenderOutboundPeeks interface {
|
||||
InsertOutboundPeek(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64) (err error)
|
||||
RenewOutboundPeek(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64) (err error)
|
||||
SelectOutboundPeek(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string) (outboundPeek *types.OutboundPeek, err error)
|
||||
SelectOutboundPeeks(ctx context.Context, txn *sql.Tx, roomID string) (outboundPeeks []types.OutboundPeek, err error)
|
||||
DeleteOutboundPeek(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string) (err error)
|
||||
DeleteOutboundPeeks(ctx context.Context, txn *sql.Tx, roomID string) (err error)
|
||||
}
|
||||
|
||||
type FederationSenderInboundPeeks interface {
|
||||
InsertInboundPeek(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64) (err error)
|
||||
RenewInboundPeek(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string, renewalInterval int64) (err error)
|
||||
SelectInboundPeek(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string) (inboundPeek *types.InboundPeek, err error)
|
||||
SelectInboundPeeks(ctx context.Context, txn *sql.Tx, roomID string) (inboundPeeks []types.InboundPeek, err error)
|
||||
DeleteInboundPeek(ctx context.Context, txn *sql.Tx, serverName gomatrixserverlib.ServerName, roomID, peekID string) (err error)
|
||||
DeleteInboundPeeks(ctx context.Context, txn *sql.Tx, roomID string) (err error)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,8 +15,6 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
|
|
@ -34,18 +32,22 @@ func (s ServerNames) Len() int { return len(s) }
|
|||
func (s ServerNames) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s ServerNames) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
// A EventIDMismatchError indicates that we have got out of sync with the
|
||||
// room server.
|
||||
type EventIDMismatchError struct {
|
||||
// The event ID we have stored in our local database.
|
||||
DatabaseID string
|
||||
// The event ID received from the room server.
|
||||
RoomServerID string
|
||||
// tracks peeks we're performing on another server over federation
|
||||
type OutboundPeek struct {
|
||||
PeekID string
|
||||
RoomID string
|
||||
ServerName gomatrixserverlib.ServerName
|
||||
CreationTimestamp int64
|
||||
RenewedTimestamp int64
|
||||
RenewalInterval int64
|
||||
}
|
||||
|
||||
func (e EventIDMismatchError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"mismatched last sent event ID: had %q in database got %q from room server",
|
||||
e.DatabaseID, e.RoomServerID,
|
||||
)
|
||||
// tracks peeks other servers are performing on us over federation
|
||||
type InboundPeek struct {
|
||||
PeekID string
|
||||
RoomID string
|
||||
ServerName gomatrixserverlib.ServerName
|
||||
CreationTimestamp int64
|
||||
RenewedTimestamp int64
|
||||
RenewalInterval int64
|
||||
}
|
||||
|
|
|
|||
29
go.mod
29
go.mod
|
|
@ -8,21 +8,20 @@ require (
|
|||
github.com/gorilla/mux v1.8.0
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/lib/pq v1.8.0
|
||||
github.com/libp2p/go-libp2p v0.11.0
|
||||
github.com/libp2p/go-libp2p-circuit v0.3.1
|
||||
github.com/libp2p/go-libp2p-core v0.6.1
|
||||
github.com/libp2p/go-libp2p-gostream v0.2.1
|
||||
github.com/libp2p/go-libp2p-http v0.1.5
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.9.0
|
||||
github.com/libp2p/go-libp2p-pubsub v0.3.5
|
||||
github.com/libp2p/go-libp2p v0.13.0
|
||||
github.com/libp2p/go-libp2p-circuit v0.4.0
|
||||
github.com/libp2p/go-libp2p-core v0.8.3
|
||||
github.com/libp2p/go-libp2p-gostream v0.3.1
|
||||
github.com/libp2p/go-libp2p-http v0.2.0
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.11.1
|
||||
github.com/libp2p/go-libp2p-pubsub v0.4.1
|
||||
github.com/libp2p/go-libp2p-record v0.1.3
|
||||
github.com/libp2p/go-yamux v1.3.9 // indirect
|
||||
github.com/lucas-clemente/quic-go v0.17.3
|
||||
github.com/matrix-org/dugong v0.0.0-20180820122854-51a565b5666b
|
||||
github.com/matrix-org/go-http-js-libp2p v0.0.0-20200518170932-783164aeeda4
|
||||
github.com/matrix-org/go-sqlite3-js v0.0.0-20200522092705-bc8506ccbcf3
|
||||
github.com/matrix-org/gomatrix v0.0.0-20200827122206-7dd5e2a05bcd
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20201209172200-eb6a8903f9fb
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210216163908-bab1f2be20d0
|
||||
github.com/matrix-org/naffka v0.0.0-20200901083833-bcdd62999a91
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
|
||||
github.com/mattn/go-sqlite3 v1.14.2
|
||||
|
|
@ -33,16 +32,14 @@ require (
|
|||
github.com/pressly/goose v2.7.0-rc5+incompatible
|
||||
github.com/prometheus/client_golang v1.7.1
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/tidwall/gjson v1.6.3
|
||||
github.com/tidwall/match v1.0.2 // indirect
|
||||
github.com/tidwall/sjson v1.1.2
|
||||
github.com/tidwall/gjson v1.6.7
|
||||
github.com/tidwall/sjson v1.1.4
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.3.15-0.20201006093556-760d9a7fd5ee
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.3.15-0.20210218094457-e77ca8019daa
|
||||
go.uber.org/atomic v1.6.0
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9
|
||||
golang.org/x/net v0.0.0-20200528225125-3c3fba18258b
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 // indirect
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777
|
||||
gopkg.in/h2non/bimg.v1 v1.1.4
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
)
|
||||
|
|
|
|||
153
go.sum
153
go.sum
|
|
@ -57,6 +57,7 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku
|
|||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
|
|
@ -65,6 +66,7 @@ github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitf
|
|||
github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
|
||||
github.com/cheggaaa/pb/v3 v3.0.4/go.mod h1:7rgWxLrAUcFMkvJuv09+DYi7mMUYi8nO9iOWcvGJPfw=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
|
|
@ -96,6 +98,9 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8
|
|||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as=
|
||||
|
|
@ -139,6 +144,7 @@ github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a
|
|||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
|
|
@ -175,6 +181,8 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI
|
|||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
|
|
@ -338,14 +346,13 @@ github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS
|
|||
github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM=
|
||||
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
|
||||
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
|
||||
github.com/libp2p/go-libp2p v0.6.0 h1:EFArryT9N7AVA70LCcOh8zxsW+FeDnxwcpWQx9k7+GM=
|
||||
github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg=
|
||||
github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54=
|
||||
github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k=
|
||||
github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw=
|
||||
github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o=
|
||||
github.com/libp2p/go-libp2p v0.11.0 h1:jb5mqdqYEBAybTEhD8io43Cz5LzVKuWxOK7znSN69jE=
|
||||
github.com/libp2p/go-libp2p v0.11.0/go.mod h1:3/ogJDXsbbepEfqtZKBR/DedzxJXCeK17t2Z9RE9bEE=
|
||||
github.com/libp2p/go-libp2p v0.12.0/go.mod h1:FpHZrfC1q7nA8jitvdjKBDF31hguaC676g/nT9PgQM0=
|
||||
github.com/libp2p/go-libp2p v0.13.0 h1:tDdrXARSghmusdm0nf1U/4M8aj8Rr0V2IzQOXmbzQ3s=
|
||||
github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo=
|
||||
github.com/libp2p/go-libp2p-autonat v0.1.1 h1:WLBZcIRsjZlWdAZj9CiBSvU2wQXoUOiS1Zk1tM7DTJI=
|
||||
|
|
@ -353,8 +360,8 @@ github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/
|
|||
github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI=
|
||||
github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI=
|
||||
github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A=
|
||||
github.com/libp2p/go-libp2p-autonat v0.3.2 h1:OhDSwVVaq7liTaRIsFFYvsaPp0pn2yi0WazejZ4DUmo=
|
||||
github.com/libp2p/go-libp2p-autonat v0.3.2/go.mod h1:0OzOi1/cVc7UcxfOddemYD5vzEqi4fwRbnZcJGLi68U=
|
||||
github.com/libp2p/go-libp2p-autonat v0.4.0 h1:3y8XQbpr+ssX8QfZUHekjHCYK64sj6/4hnf/awD4+Ug=
|
||||
github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk=
|
||||
github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro=
|
||||
github.com/libp2p/go-libp2p-blankhost v0.1.4 h1:I96SWjR4rK9irDHcHq3XHN6hawCRTPUADzkJacgZLvk=
|
||||
github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU=
|
||||
|
|
@ -363,8 +370,8 @@ github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vM
|
|||
github.com/libp2p/go-libp2p-circuit v0.1.4 h1:Phzbmrg3BkVzbqd4ZZ149JxCuUWu2wZcXf/Kr6hZJj8=
|
||||
github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU=
|
||||
github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo=
|
||||
github.com/libp2p/go-libp2p-circuit v0.3.1 h1:69ENDoGnNN45BNDnBd+8SXSetDuw0eJFcGmOvvtOgBw=
|
||||
github.com/libp2p/go-libp2p-circuit v0.3.1/go.mod h1:8RMIlivu1+RxhebipJwFDA45DasLx+kkrp4IlJj53F4=
|
||||
github.com/libp2p/go-libp2p-circuit v0.4.0 h1:eqQ3sEYkGTtybWgr6JLqJY6QLtPWRErvFjFDfAOO1wc=
|
||||
github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA=
|
||||
github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w=
|
||||
github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0=
|
||||
github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco=
|
||||
|
|
@ -388,6 +395,10 @@ github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX
|
|||
github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo=
|
||||
github.com/libp2p/go-libp2p-core v0.6.1 h1:XS+Goh+QegCDojUZp00CaPMfiEADCrLjNZskWE7pvqs=
|
||||
github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
|
||||
github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
|
||||
github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
|
||||
github.com/libp2p/go-libp2p-core v0.8.3 h1:BZTReEF6o8g/n4DwxTyeFannOeae35Xy0TD+mES3CNE=
|
||||
github.com/libp2p/go-libp2p-core v0.8.3/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
|
||||
github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ=
|
||||
github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI=
|
||||
github.com/libp2p/go-libp2p-discovery v0.2.0 h1:1p3YSOq7VsgaL+xVHPi8XAmtGyas6D2J6rWBEfz/aiY=
|
||||
|
|
@ -395,12 +406,13 @@ github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfx
|
|||
github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw=
|
||||
github.com/libp2p/go-libp2p-discovery v0.5.0 h1:Qfl+e5+lfDgwdrXdu4YNCWyEo3fWuP+WgN9mN0iWviQ=
|
||||
github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug=
|
||||
github.com/libp2p/go-libp2p-gostream v0.2.1 h1:JjA9roGokaR2BgWmaI/3HQu1/+jSbVVDLatQGnVdGjI=
|
||||
github.com/libp2p/go-libp2p-gostream v0.2.1/go.mod h1:1Mjp3LDmkqICe5tH9yLVNCqFaRTy6OwBvuJV6j1b9Nk=
|
||||
github.com/libp2p/go-libp2p-http v0.1.5 h1:FfLnzjlEzV4/6UCXCpPXRYZNoGCfogqCFjd7eF0Jbm8=
|
||||
github.com/libp2p/go-libp2p-http v0.1.5/go.mod h1:2YfPjsQxUlBGFQl2u461unkQ7ukwiSs7NX2eSslOJiU=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.9.0 h1:AKeFYZvfAa/32Sgm0LrPDxGXB62AUtU8MRqqMobBfUM=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.9.0/go.mod h1:LEKcCFHxnvypOPaqZ0m6h0fLQ9Y8t1iZMOg7a0aQDD4=
|
||||
github.com/libp2p/go-libp2p-gostream v0.3.0/go.mod h1:pLBQu8db7vBMNINGsAwLL/ZCE8wng5V1FThoaE5rNjc=
|
||||
github.com/libp2p/go-libp2p-gostream v0.3.1 h1:XlwohsPn6uopGluEWs1Csv1QCEjrTXf2ZQagzZ5paAg=
|
||||
github.com/libp2p/go-libp2p-gostream v0.3.1/go.mod h1:1V3b+u4Zhaq407UUY9JLCpboaeufAeVQbnvAt12LRsI=
|
||||
github.com/libp2p/go-libp2p-http v0.2.0 h1:GYeVd+RZzkRa8XFLITqOpcrIQG6KbFLPJqII6HHBHzY=
|
||||
github.com/libp2p/go-libp2p-http v0.2.0/go.mod h1:GlNKFqDZHe25LVy2CvnZKx75/jLtMaD3VxZV6N39X7E=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.11.1 h1:FsriVQhOUZpCotWIjyFSjEDNJmUzuMma/RyyTDZanwc=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.11.1/go.mod h1:5ojtR2acDPqh/jXf5orWy8YGb8bHQDS+qeDcoscL/PI=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.4.7/go.mod h1:XyVo99AfQH0foSf176k4jY1xUJ2+jUJIZCSDm7r2YKk=
|
||||
github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8=
|
||||
|
|
@ -411,8 +423,10 @@ github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiY
|
|||
github.com/libp2p/go-libp2p-mplex v0.2.2 h1:+Ld7YDAfVERQ0E+qqjE7o6fHwKuM0SqTzYiwN1lVVSA=
|
||||
github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo=
|
||||
github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek=
|
||||
github.com/libp2p/go-libp2p-mplex v0.2.4 h1:XFFXaN4jhqnIuJVjYOR3k6bnRj0mFfJOlIuDVww+4Zo=
|
||||
github.com/libp2p/go-libp2p-mplex v0.2.4/go.mod h1:mI7iOezdWFOisvUwaYd3IDrJ4oVmgoXK8H331ui39CE=
|
||||
github.com/libp2p/go-libp2p-mplex v0.3.0/go.mod h1:l9QWxRbbb5/hQMECEb908GbS9Sm2UAR2KFZKUJEynEs=
|
||||
github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw=
|
||||
github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aDN3Fx1sc=
|
||||
github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g=
|
||||
github.com/libp2p/go-libp2p-nat v0.0.5 h1:/mH8pXFVKleflDL1YwqMg27W9GD8kjEx7NY0P6eGc98=
|
||||
github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE=
|
||||
github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGSfvTkU=
|
||||
|
|
@ -435,8 +449,8 @@ github.com/libp2p/go-libp2p-peerstore v0.2.6 h1:2ACefBX23iMdJU9Ke+dcXt3w86MIryes
|
|||
github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s=
|
||||
github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k=
|
||||
github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.3.5 h1:iF75GWpcxKEUQU8tTkgLy69qIQvfhL+t6U6ndQrB6ho=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.3.5/go.mod h1:DTMSVmZZfXodB/pvdTGrY2eHPZ9W2ev7hzTH83OKHrI=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.4.1 h1:j4umIg5nyus+sqNfU+FWvb9aeYFQH/A+nDFhWj+8yy8=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.4.1/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ=
|
||||
github.com/libp2p/go-libp2p-record v0.1.2 h1:M50VKzWnmUrk/M5/Dz99qO9Xh4vs8ijsK+7HkJvRP+0=
|
||||
github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk=
|
||||
github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0=
|
||||
|
|
@ -453,14 +467,20 @@ github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaT
|
|||
github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM=
|
||||
github.com/libp2p/go-libp2p-swarm v0.2.8 h1:cIUUvytBzNQmGSjnXFlI6UpoBGsaud82mJPIJVfkDlg=
|
||||
github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM=
|
||||
github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk=
|
||||
github.com/libp2p/go-libp2p-swarm v0.3.1/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk=
|
||||
github.com/libp2p/go-libp2p-swarm v0.4.0 h1:hahq/ijRoeH6dgROOM8x7SeaKK5VgjjIr96vdrT+NUA=
|
||||
github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw=
|
||||
github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||
github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||
github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||
github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0=
|
||||
github.com/libp2p/go-libp2p-testing v0.1.1 h1:U03z3HnGI7Ni8Xx6ONVZvUFOAzWYmolWf5W5jAOPNmU=
|
||||
github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0=
|
||||
github.com/libp2p/go-libp2p-testing v0.2.0 h1:DdC8Dthjf97Hz3t3siZCRD1U3nuNxQgEyTWvLh6ayvw=
|
||||
github.com/libp2p/go-libp2p-testing v0.2.0/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc=
|
||||
github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc=
|
||||
github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g=
|
||||
github.com/libp2p/go-libp2p-testing v0.4.0 h1:PrwHRi0IGqOwVQWR3xzgigSlhlLfxgfXgkHxr77EghQ=
|
||||
github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0=
|
||||
github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM=
|
||||
github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.1.1 h1:PZMS9lhjK9VytzMCW3tWHAXtKXmlURSc3ZdvwEcKCzw=
|
||||
|
|
@ -469,6 +489,8 @@ github.com/libp2p/go-libp2p-transport-upgrader v0.2.0 h1:5EhPgQhXZNyfL22ERZTUoVp
|
|||
github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.3.0 h1:q3ULhsknEQ34eVDhv4YwKS8iet69ffs9+Fir6a7weN4=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.4.0 h1:xwj4h3hJdBrxqMOyMUjwscjoVst0AASTsKtZiTChoHI=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s=
|
||||
github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8=
|
||||
github.com/libp2p/go-libp2p-yamux v0.2.2 h1:eGvbqWqWY9S5lrpe2gA0UCOLCdzCgYSAR3vo/xCsNQg=
|
||||
github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw=
|
||||
|
|
@ -476,6 +498,10 @@ github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ
|
|||
github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU=
|
||||
github.com/libp2p/go-libp2p-yamux v0.2.8 h1:0s3ELSLu2O7hWKfX1YjzudBKCP0kZ+m9e2+0veXzkn4=
|
||||
github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4=
|
||||
github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30=
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po=
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.1 h1:sX4WQPHMhRxJE5UZTfjEuBvlQWXB5Bo3A2JK9ZJ9EM0=
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4=
|
||||
github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q=
|
||||
github.com/libp2p/go-maddr-filter v0.0.5 h1:CW3AgbMO6vUvT4kf87y4N+0P8KUl2aqLYhrGyDUbLSg=
|
||||
github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M=
|
||||
|
|
@ -487,6 +513,9 @@ github.com/libp2p/go-mplex v0.1.1 h1:huPH/GGRJzmsHR9IZJJsrSwIM5YE2gL4ssgl1YWb/ps
|
|||
github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk=
|
||||
github.com/libp2p/go-mplex v0.1.2 h1:qOg1s+WdGLlpkrczDqmhYzyk3vCfsQ8+RxRTQjOZWwI=
|
||||
github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk=
|
||||
github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ=
|
||||
github.com/libp2p/go-mplex v0.3.0 h1:U1T+vmCYJaEoDJPV1aq31N56hS+lJgb397GsylNSgrU=
|
||||
github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ=
|
||||
github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
|
||||
github.com/libp2p/go-msgio v0.0.4 h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA=
|
||||
github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
|
||||
|
|
@ -536,6 +565,8 @@ github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzl
|
|||
github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk=
|
||||
github.com/libp2p/go-ws-transport v0.3.1 h1:ZX5rWB8nhRRJVaPO6tmkGI/Xx8XNboYX20PW5hXIscw=
|
||||
github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk=
|
||||
github.com/libp2p/go-ws-transport v0.4.0 h1:9tvtQ9xbws6cA5LvqdE6Ne3vcmGB4f1z9SByggk4s0k=
|
||||
github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA=
|
||||
github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
|
||||
github.com/libp2p/go-yamux v1.3.0 h1:FsYzT16Wq2XqUGJsBbOxoz9g+dFklvNi7jN6YFPfl7U=
|
||||
github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
|
||||
|
|
@ -543,13 +574,16 @@ github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZ
|
|||
github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
|
||||
github.com/libp2p/go-yamux v1.3.7 h1:v40A1eSPJDIZwz2AvrV3cxpTZEGDP11QJbukmEhYyQI=
|
||||
github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
|
||||
github.com/libp2p/go-yamux v1.3.9 h1:aGCyO39LVHspu5+GARu+koNBzr6aYtxyAbwS1njzmrA=
|
||||
github.com/libp2p/go-yamux v1.3.9/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
|
||||
github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
|
||||
github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI=
|
||||
github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
|
||||
github.com/libp2p/go-yamux/v2 v2.0.0 h1:vSGhAy5u6iHBq11ZDcyHH4Blcf9xlBhT4WQDoOE90LU=
|
||||
github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U=
|
||||
github.com/lucas-clemente/quic-go v0.17.3 h1:jMX/MmDNCljfisgMmPGUcBJ+zUh9w3d3ia4YJjYS3TM=
|
||||
github.com/lucas-clemente/quic-go v0.17.3/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/lxn/walk v0.0.0-20191128110447-55ccb3a9f5c1/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
|
||||
github.com/lxn/win v0.0.0-20191128105842-2da648fda5b4/go.mod h1:ouWl4wViUNh8tPSIwxTVMuS014WakR1hqvBc2I0bMoA=
|
||||
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
|
||||
github.com/lxn/win v0.0.0-20201111105847-2a20daff6a55/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
|
|
@ -567,8 +601,8 @@ github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26 h1:Hr3zjRsq2bh
|
|||
github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26/go.mod h1:3fxX6gUjWyI/2Bt7J1OLhpCzOfO/bB3AiX0cJtEKud0=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20200827122206-7dd5e2a05bcd h1:xVrqJK3xHREMNjwjljkAUaadalWc0rRbmVuQatzmgwg=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20200827122206-7dd5e2a05bcd/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20201209172200-eb6a8903f9fb h1:UlhiSebJupQ+qAM93cdVGg4nAJ6bnxwAA5/EBygtYoo=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20201209172200-eb6a8903f9fb/go.mod h1:JsAzE1Ll3+gDWS9JSUHPJiiyAksvOOnGWF2nXdg4ZzU=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210216163908-bab1f2be20d0 h1:eP8t7DaLKkNz0IT9GcJeG6UTKjfvihIxbAXKN0I7j6g=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20210216163908-bab1f2be20d0/go.mod h1:JsAzE1Ll3+gDWS9JSUHPJiiyAksvOOnGWF2nXdg4ZzU=
|
||||
github.com/matrix-org/naffka v0.0.0-20200901083833-bcdd62999a91 h1:HJ6U3S3ljJqNffYMcIeAncp5qT/i+ZMiJ2JC2F0aXP4=
|
||||
github.com/matrix-org/naffka v0.0.0-20200901083833-bcdd62999a91/go.mod h1:sjyPyRxKM5uw1nD2cJ6O2OxI6GOqyVBfNXqKjBZTBZE=
|
||||
github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7 h1:ntrLa/8xVzeSs8vHFHK25k0C+NV74sYMJnNSg5NoSRo=
|
||||
|
|
@ -669,8 +703,8 @@ github.com/multiformats/go-multistream v0.1.0 h1:UpO6jrsjqs46mqAK3n6wKRYFhugss9A
|
|||
github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
|
||||
github.com/multiformats/go-multistream v0.1.1 h1:JlAdpIFhBhGRLxe9W6Om0w++Gd6KMWoFPZL/dEnm9nI=
|
||||
github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38=
|
||||
github.com/multiformats/go-multistream v0.1.2 h1:knyamLYMPFPngQjGQ0lhnlys3jtVR/3xV6TREUJr+fE=
|
||||
github.com/multiformats/go-multistream v0.1.2/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k=
|
||||
github.com/multiformats/go-multistream v0.2.0 h1:6AuNmQVKUkRnddw2YiDjt5Elit40SFxMJkVnhmETXtU=
|
||||
github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k=
|
||||
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg=
|
||||
|
|
@ -734,6 +768,7 @@ github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNja
|
|||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
|
|
@ -803,6 +838,7 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
|||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho=
|
||||
github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
|
@ -810,13 +846,12 @@ github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpP
|
|||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc=
|
||||
github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
|
||||
github.com/tidwall/gjson v1.6.1/go.mod h1:BaHyNc5bjzYkPqgLq7mdVzeiRtULKULXLgZFKsxEHI0=
|
||||
github.com/tidwall/gjson v1.6.3 h1:aHoiiem0dr7GHkW001T1SMTJ7X5PvyekH5WX0whWGnI=
|
||||
github.com/tidwall/gjson v1.6.3/go.mod h1:BaHyNc5bjzYkPqgLq7mdVzeiRtULKULXLgZFKsxEHI0=
|
||||
github.com/tidwall/gjson v1.6.7 h1:Mb1M9HZCRWEcXQ8ieJo7auYyyiSux6w9XN3AdTpxJrE=
|
||||
github.com/tidwall/gjson v1.6.7/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI=
|
||||
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
|
||||
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
|
||||
github.com/tidwall/match v1.0.2 h1:uuqvHuBGSedK7awZ2YoAtpnimfwBGFjHuWLuLqQj+bU=
|
||||
github.com/tidwall/match v1.0.2/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
|
||||
github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE=
|
||||
github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tidwall/pretty v1.0.1 h1:WE4RBSZ1x6McVVC8S/Md+Qse8YUv6HRObAx6ke00NY8=
|
||||
github.com/tidwall/pretty v1.0.1/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
|
|
@ -824,8 +859,8 @@ github.com/tidwall/pretty v1.0.2 h1:Z7S3cePv9Jwm1KwS0513MRaoUe3S01WPbLNV40pwWZU=
|
|||
github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tidwall/sjson v1.0.3 h1:DeF+0LZqvIt4fKYw41aPB29ZGlvwVkHKktoXJ1YW9Y8=
|
||||
github.com/tidwall/sjson v1.0.3/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y=
|
||||
github.com/tidwall/sjson v1.1.2 h1:NC5okI+tQ8OG/oyzchvwXXxRxCV/FVdhODbPKkQ25jQ=
|
||||
github.com/tidwall/sjson v1.1.2/go.mod h1:SEzaDwxiPzKzNfUEO4HbYF/m4UCSJDsGgNqsS1LvdoY=
|
||||
github.com/tidwall/sjson v1.1.4 h1:bTSsPLdAYF5QNLSwYsKfBKKTnlGbIuhqL3CpRsjzGhg=
|
||||
github.com/tidwall/sjson v1.1.4/go.mod h1:wXpKXu8CtDjKAZ+3DrKY5ROCorDFahq8l0tey/Lx1fg=
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
|
||||
|
|
@ -851,9 +886,8 @@ github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7V
|
|||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yggdrasil-network/yggdrasil-extras v0.0.0-20200525205615-6c8a4a2e8855/go.mod h1:xQdsh08Io6nV4WRnOVTe6gI8/2iTvfLDQ0CYa5aMt+I=
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.3.15-0.20201006093556-760d9a7fd5ee h1:Kot820OfxWfYrk5di5f4S5s0jXXrQj8w8BG5826HAv4=
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.3.15-0.20201006093556-760d9a7fd5ee/go.mod h1:d+Nz6SPeG6kmeSPFL0cvfWfgwEql75fUnZiAONgvyBE=
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.3.15-0.20210218094457-e77ca8019daa h1:YHeZ1KN4KmuAjqmBSan1JtwyoPQoklzMjMqIbaS5Ywo=
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.3.15-0.20210218094457-e77ca8019daa/go.mod h1:G716RAw9WTLbLFI7lVj1GKTU16wb9MYl6iE9j4JlWeI=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
|
||||
|
|
@ -879,6 +913,8 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
|||
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
|
||||
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM=
|
||||
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
|
|
@ -896,7 +932,6 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
|
|
@ -906,8 +941,8 @@ golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzht
|
|||
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9 h1:phUcVbl53swtrUN8kQEXFhUxPlIlWyBfKmidCu7P95o=
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
|
@ -936,15 +971,15 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200528225125-3c3fba18258b h1:IYiJPiJfzktmDAO1HQiwjMjwjlYKHAL7KzeD544RJPs=
|
||||
golang.org/x/net v0.0.0-20200528225125-3c3fba18258b/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
|
|
@ -981,7 +1016,6 @@ golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -990,22 +1024,24 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200301040627-c5d0d7b4ec88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
|
||||
golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210105210732-16f7687f5001/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3-0.20191230102452-929e72ca90de h1:aYKJLPSrddB2N7/6OKyFqJ337SXpo61bBuvO5p1+7iY=
|
||||
golang.org/x/text v0.3.3-0.20191230102452-929e72ca90de/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
|
@ -1019,7 +1055,9 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
|
|||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c h1:IGkKhmfzcztjm6gYkykvu/NiS8kaqbCWAEWWAyf8J5U=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
|
|
@ -1029,9 +1067,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
|||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.zx2c4.com/wireguard v0.0.20200122-0.20200214175355-9cbcff10dd3e/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4=
|
||||
golang.zx2c4.com/wireguard v0.0.20200320/go.mod h1:lDian4Sw4poJ04SgHh35nzMVwGSYlPumkdnHcucAQoY=
|
||||
golang.zx2c4.com/wireguard/windows v0.1.0/go.mod h1:EK7CxrFnicmYJ0ZCF6crBh2/EMMeSxMlqgLlwN0Kv9s=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20210203165646-9c7bd73be2cc/go.mod h1:r0ExowOoGFfDoLDxx+M9SYbNVsoZ0xviLL+K4f2mt+A=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20210212170059-7a0fb5bbb172/go.mod h1:r0ExowOoGFfDoLDxx+M9SYbNVsoZ0xviLL+K4f2mt+A=
|
||||
golang.zx2c4.com/wireguard/windows v0.3.5/go.mod h1:ATrIFNoq3rsK735WJiQzfWYyNFc9xLBhMMjW9DWIvnU=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||
|
|
@ -1046,12 +1084,18 @@ google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk
|
|||
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
|
@ -1112,6 +1156,7 @@ honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099 h1:XJP7lxbSxWLOMNdBE4B/STaqVy6L73o0knwj2vIlxnw=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package caching
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
|
@ -72,6 +73,11 @@ func NewInMemoryLRUCache(enablePrometheus bool) (*Caches, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go cacheCleaner(
|
||||
roomVersions, serverKeys, roomServerStateKeyNIDs,
|
||||
roomServerEventTypeNIDs, roomServerRoomIDs,
|
||||
roomInfos, federationEvents,
|
||||
)
|
||||
return &Caches{
|
||||
RoomVersions: roomVersions,
|
||||
ServerKeys: serverKeys,
|
||||
|
|
@ -83,6 +89,20 @@ func NewInMemoryLRUCache(enablePrometheus bool) (*Caches, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func cacheCleaner(caches ...*InMemoryLRUCachePartition) {
|
||||
for {
|
||||
time.Sleep(time.Minute)
|
||||
for _, cache := range caches {
|
||||
// Hold onto the last 10% of the cache entries, since
|
||||
// otherwise a quiet period might cause us to evict all
|
||||
// cache entries entirely.
|
||||
if cache.lru.Len() > cache.maxEntries/10 {
|
||||
cache.lru.RemoveOldest()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type InMemoryLRUCachePartition struct {
|
||||
name string
|
||||
mutable bool
|
||||
|
|
|
|||
|
|
@ -20,6 +20,8 @@ import (
|
|||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// A PartitionStorer has the storage APIs needed by the consumer.
|
||||
|
|
@ -33,6 +35,9 @@ type PartitionStorer interface {
|
|||
// A ContinualConsumer continually consumes logs even across restarts. It requires a PartitionStorer to
|
||||
// remember the offset it reached.
|
||||
type ContinualConsumer struct {
|
||||
// The parent context for the listener, stop consuming when this context is done
|
||||
Process *process.ProcessContext
|
||||
// The component name
|
||||
ComponentName string
|
||||
// The kafkaesque topic to consume events from.
|
||||
// This is the name used in kafka to identify the stream to consume events from.
|
||||
|
|
@ -100,6 +105,15 @@ func (c *ContinualConsumer) StartOffsets() ([]sqlutil.PartitionOffset, error) {
|
|||
}
|
||||
for _, pc := range partitionConsumers {
|
||||
go c.consumePartition(pc)
|
||||
if c.Process != nil {
|
||||
c.Process.ComponentStarted()
|
||||
go func(pc sarama.PartitionConsumer) {
|
||||
<-c.Process.WaitForShutdown()
|
||||
_ = pc.Close()
|
||||
c.Process.ComponentFinished()
|
||||
logrus.Infof("Stopped consumer for %q topic %q", c.ComponentName, c.Topic)
|
||||
}(pc)
|
||||
}
|
||||
}
|
||||
|
||||
return storedOffsets, nil
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ var build string
|
|||
const (
|
||||
VersionMajor = 0
|
||||
VersionMinor = 3
|
||||
VersionPatch = 4
|
||||
VersionPatch = 10
|
||||
VersionTag = "" // example: "rc1"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -245,7 +245,7 @@ func (u *DeviceListUpdater) notifyWorkers(userID string) {
|
|||
}
|
||||
hash := fnv.New32a()
|
||||
_, _ = hash.Write([]byte(remoteServer))
|
||||
index := int(hash.Sum32()) % len(u.workerChans)
|
||||
index := int(int64(hash.Sum32()) % int64(len(u.workerChans)))
|
||||
|
||||
ch := u.assignChannel(userID)
|
||||
u.workerChans[index] <- remoteServer
|
||||
|
|
@ -319,7 +319,7 @@ func (u *DeviceListUpdater) worker(ch chan gomatrixserverlib.ServerName) {
|
|||
}
|
||||
|
||||
func (u *DeviceListUpdater) processServer(serverName gomatrixserverlib.ServerName) (time.Duration, bool) {
|
||||
requestTimeout := time.Minute // max amount of time we want to spend on each request
|
||||
requestTimeout := time.Second * 30 // max amount of time we want to spend on each request
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
defer cancel()
|
||||
logger := util.GetLogger(ctx).WithField("server_name", serverName)
|
||||
|
|
|
|||
|
|
@ -106,9 +106,11 @@ func (t *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
|||
func newFedClient(tripper func(*http.Request) (*http.Response, error)) *gomatrixserverlib.FederationClient {
|
||||
_, pkey, _ := ed25519.GenerateKey(nil)
|
||||
fedClient := gomatrixserverlib.NewFederationClient(
|
||||
gomatrixserverlib.ServerName("example.test"), gomatrixserverlib.KeyID("ed25519:test"), pkey, true,
|
||||
gomatrixserverlib.ServerName("example.test"), gomatrixserverlib.KeyID("ed25519:test"), pkey,
|
||||
)
|
||||
fedClient.Client = *gomatrixserverlib.NewClient(
|
||||
gomatrixserverlib.WithTransport(&roundTripper{tripper}),
|
||||
)
|
||||
fedClient.Client = *gomatrixserverlib.NewClientWithTransport(&roundTripper{tripper})
|
||||
return fedClient
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -109,7 +109,7 @@ func RemoveDir(dir types.Path, logger *log.Entry) {
|
|||
// WriteTempFile writes to a new temporary file.
|
||||
// The file is deleted if there was an error while writing.
|
||||
func WriteTempFile(
|
||||
ctx context.Context, reqReader io.Reader, maxFileSizeBytes config.FileSizeBytes, absBasePath config.Path,
|
||||
ctx context.Context, reqReader io.Reader, absBasePath config.Path,
|
||||
) (hash types.Base64Hash, size types.FileSizeBytes, path types.Path, err error) {
|
||||
size = -1
|
||||
logger := util.GetLogger(ctx)
|
||||
|
|
@ -124,18 +124,11 @@ func WriteTempFile(
|
|||
}
|
||||
}()
|
||||
|
||||
// If the max_file_size_bytes configuration option is set to a positive
|
||||
// number then limit the upload to that size. Otherwise, just read the
|
||||
// whole file.
|
||||
limitedReader := reqReader
|
||||
if maxFileSizeBytes > 0 {
|
||||
limitedReader = io.LimitReader(reqReader, int64(maxFileSizeBytes))
|
||||
}
|
||||
// Hash the file data. The hash will be returned. The hash is useful as a
|
||||
// method of deduplicating files to save storage, as well as a way to conduct
|
||||
// integrity checks on the file data in the repository.
|
||||
hasher := sha256.New()
|
||||
teeReader := io.TeeReader(limitedReader, hasher)
|
||||
teeReader := io.TeeReader(reqReader, hasher)
|
||||
bytesWritten, err := io.Copy(tmpFileWriter, teeReader)
|
||||
if err != nil && err != io.EOF {
|
||||
RemoveDir(tmpDir, logger)
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
|
@ -214,7 +215,7 @@ func (r *downloadRequest) doDownload(
|
|||
ctx, r.MediaMetadata.MediaID, r.MediaMetadata.Origin,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error querying the database")
|
||||
return nil, fmt.Errorf("db.GetMediaMetadata: %w", err)
|
||||
}
|
||||
if mediaMetadata == nil {
|
||||
if r.MediaMetadata.Origin == cfg.Matrix.ServerName {
|
||||
|
|
@ -253,16 +254,16 @@ func (r *downloadRequest) respondFromLocalFile(
|
|||
) (*types.MediaMetadata, error) {
|
||||
filePath, err := fileutils.GetPathFromBase64Hash(r.MediaMetadata.Base64Hash, absBasePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get file path from metadata")
|
||||
return nil, fmt.Errorf("fileutils.GetPathFromBase64Hash: %w", err)
|
||||
}
|
||||
file, err := os.Open(filePath)
|
||||
defer file.Close() // nolint: errcheck, staticcheck, megacheck
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open file")
|
||||
return nil, fmt.Errorf("os.Open: %w", err)
|
||||
}
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to stat file")
|
||||
return nil, fmt.Errorf("file.Stat: %w", err)
|
||||
}
|
||||
|
||||
if r.MediaMetadata.FileSizeBytes > 0 && int64(r.MediaMetadata.FileSizeBytes) != stat.Size() {
|
||||
|
|
@ -324,7 +325,7 @@ func (r *downloadRequest) respondFromLocalFile(
|
|||
w.Header().Set("Content-Security-Policy", contentSecurityPolicy)
|
||||
|
||||
if _, err := io.Copy(w, responseFile); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to copy from cache")
|
||||
return nil, fmt.Errorf("io.Copy: %w", err)
|
||||
}
|
||||
return responseMetadata, nil
|
||||
}
|
||||
|
|
@ -421,7 +422,7 @@ func (r *downloadRequest) getThumbnailFile(
|
|||
ctx, r.MediaMetadata.MediaID, r.MediaMetadata.Origin,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error looking up thumbnails")
|
||||
return nil, nil, fmt.Errorf("db.GetThumbnails: %w", err)
|
||||
}
|
||||
|
||||
// If we get a thumbnailSize, a pre-generated thumbnail would be best but it is not yet generated.
|
||||
|
|
@ -459,12 +460,12 @@ func (r *downloadRequest) getThumbnailFile(
|
|||
thumbFile, err := os.Open(string(thumbPath))
|
||||
if err != nil {
|
||||
thumbFile.Close() // nolint: errcheck
|
||||
return nil, nil, errors.Wrap(err, "failed to open file")
|
||||
return nil, nil, fmt.Errorf("os.Open: %w", err)
|
||||
}
|
||||
thumbStat, err := thumbFile.Stat()
|
||||
if err != nil {
|
||||
thumbFile.Close() // nolint: errcheck
|
||||
return nil, nil, errors.Wrap(err, "failed to stat file")
|
||||
return nil, nil, fmt.Errorf("thumbFile.Stat: %w", err)
|
||||
}
|
||||
if types.FileSizeBytes(thumbStat.Size()) != thumbnail.MediaMetadata.FileSizeBytes {
|
||||
thumbFile.Close() // nolint: errcheck
|
||||
|
|
@ -491,7 +492,7 @@ func (r *downloadRequest) generateThumbnail(
|
|||
activeThumbnailGeneration, maxThumbnailGenerators, db, r.Logger,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating thumbnail")
|
||||
return nil, fmt.Errorf("thumbnailer.GenerateThumbnail: %w", err)
|
||||
}
|
||||
if busy {
|
||||
return nil, nil
|
||||
|
|
@ -502,7 +503,7 @@ func (r *downloadRequest) generateThumbnail(
|
|||
thumbnailSize.Width, thumbnailSize.Height, thumbnailSize.ResizeMethod,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error looking up thumbnail")
|
||||
return nil, fmt.Errorf("db.GetThumbnail: %w", err)
|
||||
}
|
||||
return thumbnail, nil
|
||||
}
|
||||
|
|
@ -543,7 +544,7 @@ func (r *downloadRequest) getRemoteFile(
|
|||
ctx, r.MediaMetadata.MediaID, r.MediaMetadata.Origin,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error querying the database.")
|
||||
return fmt.Errorf("db.GetMediaMetadata: %w", err)
|
||||
}
|
||||
|
||||
if mediaMetadata == nil {
|
||||
|
|
@ -555,7 +556,7 @@ func (r *downloadRequest) getRemoteFile(
|
|||
cfg.MaxThumbnailGenerators,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error querying the database.")
|
||||
return fmt.Errorf("r.fetchRemoteFileAndStoreMetadata: %w", err)
|
||||
}
|
||||
} else {
|
||||
// If we have a record, we can respond from the local file
|
||||
|
|
@ -673,6 +674,43 @@ func (r *downloadRequest) fetchRemoteFileAndStoreMetadata(
|
|||
return nil
|
||||
}
|
||||
|
||||
func (r *downloadRequest) GetContentLengthAndReader(contentLengthHeader string, body *io.ReadCloser, maxFileSizeBytes config.FileSizeBytes) (int64, io.Reader, error) {
|
||||
reader := *body
|
||||
var contentLength int64
|
||||
|
||||
if contentLengthHeader != "" {
|
||||
// A Content-Length header is provided. Let's try to parse it.
|
||||
parsedLength, parseErr := strconv.ParseInt(contentLengthHeader, 10, 64)
|
||||
if parseErr != nil {
|
||||
r.Logger.WithError(parseErr).Warn("Failed to parse content length")
|
||||
return 0, nil, fmt.Errorf("strconv.ParseInt: %w", parseErr)
|
||||
}
|
||||
if parsedLength > int64(maxFileSizeBytes) {
|
||||
return 0, nil, fmt.Errorf(
|
||||
"remote file size (%d bytes) exceeds locally configured max media size (%d bytes)",
|
||||
parsedLength, maxFileSizeBytes,
|
||||
)
|
||||
}
|
||||
|
||||
// We successfully parsed the Content-Length, so we'll return a limited
|
||||
// reader that restricts us to reading only up to this size.
|
||||
reader = ioutil.NopCloser(io.LimitReader(*body, parsedLength))
|
||||
contentLength = parsedLength
|
||||
} else {
|
||||
// Content-Length header is missing. If we have a maximum file size
|
||||
// configured then we'll just make sure that the reader is limited to
|
||||
// that size. We'll return a zero content length, but that's OK, since
|
||||
// ultimately it will get rewritten later when the temp file is written
|
||||
// to disk.
|
||||
if maxFileSizeBytes > 0 {
|
||||
reader = ioutil.NopCloser(io.LimitReader(*body, int64(maxFileSizeBytes)))
|
||||
}
|
||||
contentLength = 0
|
||||
}
|
||||
|
||||
return contentLength, reader, nil
|
||||
}
|
||||
|
||||
func (r *downloadRequest) fetchRemoteFile(
|
||||
ctx context.Context,
|
||||
client *gomatrixserverlib.Client,
|
||||
|
|
@ -692,16 +730,18 @@ func (r *downloadRequest) fetchRemoteFile(
|
|||
}
|
||||
defer resp.Body.Close() // nolint: errcheck
|
||||
|
||||
// get metadata from request and set metadata on response
|
||||
contentLength, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
||||
if err != nil {
|
||||
r.Logger.WithError(err).Warn("Failed to parse content length")
|
||||
return "", false, errors.Wrap(err, "invalid response from remote server")
|
||||
// The reader returned here will be limited either by the Content-Length
|
||||
// and/or the configured maximum media size.
|
||||
contentLength, reader, parseErr := r.GetContentLengthAndReader(resp.Header.Get("Content-Length"), &resp.Body, maxFileSizeBytes)
|
||||
if parseErr != nil {
|
||||
return "", false, parseErr
|
||||
}
|
||||
|
||||
if contentLength > int64(maxFileSizeBytes) {
|
||||
// TODO: Bubble up this as a 413
|
||||
return "", false, fmt.Errorf("remote file is too large (%v > %v bytes)", contentLength, maxFileSizeBytes)
|
||||
}
|
||||
|
||||
r.MediaMetadata.FileSizeBytes = types.FileSizeBytes(contentLength)
|
||||
r.MediaMetadata.ContentType = types.ContentType(resp.Header.Get("Content-Type"))
|
||||
|
||||
|
|
@ -728,7 +768,7 @@ func (r *downloadRequest) fetchRemoteFile(
|
|||
// method of deduplicating files to save storage, as well as a way to conduct
|
||||
// integrity checks on the file data in the repository.
|
||||
// Data is truncated to maxFileSizeBytes. Content-Length was reported as 0 < Content-Length <= maxFileSizeBytes so this is OK.
|
||||
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, resp.Body, maxFileSizeBytes, absBasePath)
|
||||
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, reader, absBasePath)
|
||||
if err != nil {
|
||||
r.Logger.WithError(err).WithFields(log.Fields{
|
||||
"MaxFileSizeBytes": maxFileSizeBytes,
|
||||
|
|
@ -747,7 +787,7 @@ func (r *downloadRequest) fetchRemoteFile(
|
|||
// The database is the source of truth so we need to have moved the file first
|
||||
finalPath, duplicate, err := fileutils.MoveFileWithHashCheck(tmpDir, r.MediaMetadata, absBasePath, r.Logger)
|
||||
if err != nil {
|
||||
return "", false, errors.Wrap(err, "failed to move file")
|
||||
return "", false, fmt.Errorf("fileutils.MoveFileWithHashCheck: %w", err)
|
||||
}
|
||||
if duplicate {
|
||||
r.Logger.WithField("dst", finalPath).Info("File was stored previously - discarding duplicate")
|
||||
|
|
|
|||
|
|
@ -147,7 +147,7 @@ func (r *uploadRequest) doUpload(
|
|||
// r.storeFileAndMetadata(ctx, tmpDir, ...)
|
||||
// before you return from doUpload else we will leak a temp file. We could make this nicer with a `WithTransaction` style of
|
||||
// nested function to guarantee either storage or cleanup.
|
||||
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, reqReader, *cfg.MaxFileSizeBytes, cfg.AbsBasePath)
|
||||
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, reqReader, cfg.AbsBasePath)
|
||||
if err != nil {
|
||||
r.Logger.WithError(err).WithFields(log.Fields{
|
||||
"MaxFileSizeBytes": *cfg.MaxFileSizeBytes,
|
||||
|
|
|
|||
|
|
@ -56,6 +56,12 @@ type RoomserverInternalAPI interface {
|
|||
res *PerformPublishResponse,
|
||||
)
|
||||
|
||||
PerformInboundPeek(
|
||||
ctx context.Context,
|
||||
req *PerformInboundPeekRequest,
|
||||
res *PerformInboundPeekResponse,
|
||||
) error
|
||||
|
||||
QueryPublishedRooms(
|
||||
ctx context.Context,
|
||||
req *QueryPublishedRoomsRequest,
|
||||
|
|
|
|||
|
|
@ -88,6 +88,16 @@ func (t *RoomserverInternalAPITrace) PerformPublish(
|
|||
util.GetLogger(ctx).Infof("PerformPublish req=%+v res=%+v", js(req), js(res))
|
||||
}
|
||||
|
||||
func (t *RoomserverInternalAPITrace) PerformInboundPeek(
|
||||
ctx context.Context,
|
||||
req *PerformInboundPeekRequest,
|
||||
res *PerformInboundPeekResponse,
|
||||
) error {
|
||||
err := t.Impl.PerformInboundPeek(ctx, req, res)
|
||||
util.GetLogger(ctx).Infof("PerformInboundPeek req=%+v res=%+v", js(req), js(res))
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *RoomserverInternalAPITrace) QueryPublishedRooms(
|
||||
ctx context.Context,
|
||||
req *QueryPublishedRoomsRequest,
|
||||
|
|
|
|||
|
|
@ -51,6 +51,8 @@ const (
|
|||
|
||||
// OutputTypeNewPeek indicates that the kafka event is an OutputNewPeek
|
||||
OutputTypeNewPeek OutputType = "new_peek"
|
||||
// OutputTypeNewInboundPeek indicates that the kafka event is an OutputNewInboundPeek
|
||||
OutputTypeNewInboundPeek OutputType = "new_inbound_peek"
|
||||
// OutputTypeRetirePeek indicates that the kafka event is an OutputRetirePeek
|
||||
OutputTypeRetirePeek OutputType = "retire_peek"
|
||||
)
|
||||
|
|
@ -72,6 +74,8 @@ type OutputEvent struct {
|
|||
RedactedEvent *OutputRedactedEvent `json:"redacted_event,omitempty"`
|
||||
// The content of event with type OutputTypeNewPeek
|
||||
NewPeek *OutputNewPeek `json:"new_peek,omitempty"`
|
||||
// The content of event with type OutputTypeNewInboundPeek
|
||||
NewInboundPeek *OutputNewInboundPeek `json:"new_inbound_peek,omitempty"`
|
||||
// The content of event with type OutputTypeRetirePeek
|
||||
RetirePeek *OutputRetirePeek `json:"retire_peek,omitempty"`
|
||||
}
|
||||
|
|
@ -245,6 +249,19 @@ type OutputNewPeek struct {
|
|||
DeviceID string
|
||||
}
|
||||
|
||||
// An OutputNewInboundPeek is written whenever a server starts peeking into a room
|
||||
type OutputNewInboundPeek struct {
|
||||
RoomID string
|
||||
PeekID string
|
||||
// the event ID at which the peek begins (so we can avoid
|
||||
// a race between tracking the state returned by /peek and emitting subsequent
|
||||
// peeked events)
|
||||
LatestEventID string
|
||||
ServerName gomatrixserverlib.ServerName
|
||||
// how often we told the peeking server to renew the peek
|
||||
RenewalInterval int64
|
||||
}
|
||||
|
||||
// An OutputRetirePeek is written whenever a user stops peeking into a room.
|
||||
type OutputRetirePeek struct {
|
||||
RoomID string
|
||||
|
|
|
|||
|
|
@ -172,6 +172,28 @@ type PerformPublishResponse struct {
|
|||
Error *PerformError
|
||||
}
|
||||
|
||||
type PerformInboundPeekRequest struct {
|
||||
UserID string `json:"user_id"`
|
||||
RoomID string `json:"room_id"`
|
||||
PeekID string `json:"peek_id"`
|
||||
ServerName gomatrixserverlib.ServerName `json:"server_name"`
|
||||
RenewalInterval int64 `json:"renewal_interval"`
|
||||
}
|
||||
|
||||
type PerformInboundPeekResponse struct {
|
||||
// Does the room exist on this roomserver?
|
||||
// If the room doesn't exist this will be false and StateEvents will be empty.
|
||||
RoomExists bool `json:"room_exists"`
|
||||
// The room version of the room.
|
||||
RoomVersion gomatrixserverlib.RoomVersion `json:"room_version"`
|
||||
// The current state and auth chain events.
|
||||
// The lists will be in an arbitrary order.
|
||||
StateEvents []*gomatrixserverlib.HeaderedEvent `json:"state_events"`
|
||||
AuthChainEvents []*gomatrixserverlib.HeaderedEvent `json:"auth_chain_events"`
|
||||
// The event at which this state was captured
|
||||
LatestEvent *gomatrixserverlib.HeaderedEvent `json:"latest_event"`
|
||||
}
|
||||
|
||||
// PerformForgetRequest is a request to PerformForget
|
||||
type PerformForgetRequest struct {
|
||||
RoomID string `json:"room_id"`
|
||||
|
|
|
|||
|
|
@ -221,7 +221,7 @@ type QueryStateAndAuthChainRequest struct {
|
|||
// The room ID to query the state in.
|
||||
RoomID string `json:"room_id"`
|
||||
// The list of prev events for the event. Used to calculate the state at
|
||||
// the event
|
||||
// the event.
|
||||
PrevEventIDs []string `json:"prev_event_ids"`
|
||||
// The list of auth events for the event. Used to calculate the auth chain
|
||||
AuthEventIDs []string `json:"auth_event_ids"`
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ func SendEvents(
|
|||
|
||||
// SendEventWithState writes an event with the specified kind to the roomserver
|
||||
// with the state at the event as KindOutlier before it. Will not send any event that is
|
||||
// marked as `true` in haveEventIDs
|
||||
// marked as `true` in haveEventIDs.
|
||||
func SendEventWithState(
|
||||
ctx context.Context, rsAPI RoomserverInternalAPI, kind Kind,
|
||||
state *gomatrixserverlib.RespState, event *gomatrixserverlib.HeaderedEvent,
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ type RoomserverInternalAPI struct {
|
|||
*perform.Inviter
|
||||
*perform.Joiner
|
||||
*perform.Peeker
|
||||
*perform.InboundPeeker
|
||||
*perform.Unpeeker
|
||||
*perform.Leaver
|
||||
*perform.Publisher
|
||||
|
|
@ -97,6 +98,10 @@ func (r *RoomserverInternalAPI) SetFederationSenderAPI(fsAPI fsAPI.FederationSen
|
|||
FSAPI: r.fsAPI,
|
||||
Inputer: r.Inputer,
|
||||
}
|
||||
r.InboundPeeker = &perform.InboundPeeker{
|
||||
DB: r.DB,
|
||||
Inputer: r.Inputer,
|
||||
}
|
||||
r.Unpeeker = &perform.Unpeeker{
|
||||
ServerName: r.Cfg.Matrix.ServerName,
|
||||
Cfg: r.Cfg,
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
|
|
@ -28,9 +29,29 @@ import (
|
|||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(processRoomEventDuration)
|
||||
}
|
||||
|
||||
var processRoomEventDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "dendrite",
|
||||
Subsystem: "roomserver",
|
||||
Name: "processroomevent_duration_millis",
|
||||
Help: "How long it takes the roomserver to process an event",
|
||||
Buckets: []float64{ // milliseconds
|
||||
5, 10, 25, 50, 75, 100, 250, 500,
|
||||
1000, 2000, 3000, 4000, 5000, 6000,
|
||||
7000, 8000, 9000, 10000, 15000, 20000,
|
||||
},
|
||||
},
|
||||
[]string{"room_id"},
|
||||
)
|
||||
|
||||
// processRoomEvent can only be called once at a time
|
||||
//
|
||||
// TODO(#375): This should be rewritten to allow concurrent calls. The
|
||||
|
|
@ -42,6 +63,15 @@ func (r *Inputer) processRoomEvent(
|
|||
ctx context.Context,
|
||||
input *api.InputRoomEvent,
|
||||
) (eventID string, err error) {
|
||||
// Measure how long it takes to process this event.
|
||||
started := time.Now()
|
||||
defer func() {
|
||||
timetaken := time.Since(started)
|
||||
processRoomEventDuration.With(prometheus.Labels{
|
||||
"room_id": input.Event.RoomID(),
|
||||
}).Observe(float64(timetaken.Milliseconds()))
|
||||
}()
|
||||
|
||||
// Parse and validate the event JSON
|
||||
headered := input.Event
|
||||
event := headered.Unwrap()
|
||||
|
|
|
|||
|
|
@ -100,7 +100,8 @@ type latestEventsUpdater struct {
|
|||
// The eventID of the event that was processed before this one.
|
||||
lastEventIDSent string
|
||||
// The latest events in the room after processing this event.
|
||||
latest []types.StateAtEventAndReference
|
||||
oldLatest []types.StateAtEventAndReference
|
||||
latest []types.StateAtEventAndReference
|
||||
// The state entries removed from and added to the current state of the
|
||||
// room as a result of processing this event. They are sorted lists.
|
||||
removed []types.StateEntry
|
||||
|
|
@ -123,10 +124,10 @@ func (u *latestEventsUpdater) doUpdateLatestEvents() error {
|
|||
// state snapshot from somewhere else, e.g. a federated room join,
|
||||
// then start with an empty set - none of the forward extremities
|
||||
// that we knew about before matter anymore.
|
||||
oldLatest := []types.StateAtEventAndReference{}
|
||||
u.oldLatest = []types.StateAtEventAndReference{}
|
||||
if !u.rewritesState {
|
||||
u.oldStateNID = u.updater.CurrentStateSnapshotNID()
|
||||
oldLatest = u.updater.LatestEvents()
|
||||
u.oldLatest = u.updater.LatestEvents()
|
||||
}
|
||||
|
||||
// If the event has already been written to the output log then we
|
||||
|
|
@ -140,7 +141,7 @@ func (u *latestEventsUpdater) doUpdateLatestEvents() error {
|
|||
// Work out what the latest events are. This will include the new
|
||||
// event if it is not already referenced.
|
||||
extremitiesChanged, err := u.calculateLatest(
|
||||
oldLatest, u.event,
|
||||
u.oldLatest, u.event,
|
||||
types.StateAtEventAndReference{
|
||||
EventReference: u.event.EventReference(),
|
||||
StateAtEvent: u.stateAtEvent,
|
||||
|
|
@ -200,6 +201,37 @@ func (u *latestEventsUpdater) latestState() error {
|
|||
var err error
|
||||
roomState := state.NewStateResolution(u.api.DB, *u.roomInfo)
|
||||
|
||||
// Work out if the state at the extremities has actually changed
|
||||
// or not. If they haven't then we won't bother doing all of the
|
||||
// hard work.
|
||||
if u.event.StateKey() == nil {
|
||||
stateChanged := false
|
||||
oldStateNIDs := make([]types.StateSnapshotNID, 0, len(u.oldLatest))
|
||||
newStateNIDs := make([]types.StateSnapshotNID, 0, len(u.latest))
|
||||
for _, old := range u.oldLatest {
|
||||
oldStateNIDs = append(oldStateNIDs, old.BeforeStateSnapshotNID)
|
||||
}
|
||||
for _, new := range u.latest {
|
||||
newStateNIDs = append(newStateNIDs, new.BeforeStateSnapshotNID)
|
||||
}
|
||||
oldStateNIDs = state.UniqueStateSnapshotNIDs(oldStateNIDs)
|
||||
newStateNIDs = state.UniqueStateSnapshotNIDs(newStateNIDs)
|
||||
if len(oldStateNIDs) != len(newStateNIDs) {
|
||||
stateChanged = true
|
||||
} else {
|
||||
for i := range oldStateNIDs {
|
||||
if oldStateNIDs[i] != newStateNIDs[i] {
|
||||
stateChanged = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !stateChanged {
|
||||
u.newStateNID = u.oldStateNID
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Get a list of the current latest events. This may or may not
|
||||
// include the new event from the input path, depending on whether
|
||||
// it is a forward extremity or not.
|
||||
|
|
@ -259,34 +291,8 @@ func (u *latestEventsUpdater) calculateLatest(
|
|||
// First of all, get a list of all of the events in our current
|
||||
// set of forward extremities.
|
||||
existingRefs := make(map[string]*types.StateAtEventAndReference)
|
||||
existingNIDs := make([]types.EventNID, len(oldLatest))
|
||||
for i, old := range oldLatest {
|
||||
existingRefs[old.EventID] = &oldLatest[i]
|
||||
existingNIDs[i] = old.EventNID
|
||||
}
|
||||
|
||||
// Look up the old extremity events. This allows us to find their
|
||||
// prev events.
|
||||
events, err := u.api.DB.Events(u.ctx, existingNIDs)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("u.api.DB.Events: %w", err)
|
||||
}
|
||||
|
||||
// Make a list of all of the prev events as referenced by all of
|
||||
// the current forward extremities.
|
||||
existingPrevs := make(map[string]struct{})
|
||||
for _, old := range events {
|
||||
for _, prevEventID := range old.PrevEventIDs() {
|
||||
existingPrevs[prevEventID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// If the "new" event is already referenced by a forward extremity
|
||||
// then do nothing - it's not a candidate to be a new extremity if
|
||||
// it has been referenced.
|
||||
if _, ok := existingPrevs[newEvent.EventID()]; ok {
|
||||
u.latest = oldLatest
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If the "new" event is already a forward extremity then stop, as
|
||||
|
|
@ -296,6 +302,29 @@ func (u *latestEventsUpdater) calculateLatest(
|
|||
return false, nil
|
||||
}
|
||||
|
||||
// If the "new" event is already referenced by an existing event
|
||||
// then do nothing - it's not a candidate to be a new extremity if
|
||||
// it has been referenced.
|
||||
if referenced, err := u.updater.IsReferenced(newEvent.EventReference()); err != nil {
|
||||
return false, fmt.Errorf("u.updater.IsReferenced(new): %w", err)
|
||||
} else if referenced {
|
||||
u.latest = oldLatest
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Then let's see if any of the existing forward extremities now
|
||||
// have entries in the previous events table. If they do then we
|
||||
// will no longer include them as forward extremities.
|
||||
existingPrevs := make(map[string]struct{})
|
||||
for _, l := range existingRefs {
|
||||
referenced, err := u.updater.IsReferenced(l.EventReference)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("u.updater.IsReferenced: %w", err)
|
||||
} else if referenced {
|
||||
existingPrevs[l.EventID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Include our new event in the extremities.
|
||||
newLatest := []types.StateAtEventAndReference{newStateAndRef}
|
||||
|
||||
|
|
|
|||
|
|
@ -107,11 +107,21 @@ func (r *Inputer) updateMembership(
|
|||
return updates, nil
|
||||
}
|
||||
|
||||
// In an ideal world, we shouldn't ever have "add" be nil and "remove" be
|
||||
// set, as this implies that we're deleting a state event without replacing
|
||||
// it (a thing that ordinarily shouldn't happen in Matrix). However, state
|
||||
// resets are sadly a thing occasionally and we have to account for that.
|
||||
// Beforehand there used to be a check here which stopped dead if we hit
|
||||
// this scenario, but that meant that the membership table got out of sync
|
||||
// after a state reset, often thinking that the user was still joined to
|
||||
// the room even though the room state said otherwise, and this would prevent
|
||||
// the user from being able to attempt to rejoin the room without modifying
|
||||
// the database. So instead what we'll do is we'll just update the membership
|
||||
// table to say that the user is "leave" and we'll use the old event to
|
||||
// avoid nil pointer exceptions on the code path that follows.
|
||||
if add == nil {
|
||||
// This can happen when we have rejoined a room and suddenly we have a
|
||||
// divergence between the former state and the new one. We don't want to
|
||||
// act on removals and apparently there are no adds, so stop here.
|
||||
return updates, nil
|
||||
add = remove
|
||||
newMembership = gomatrixserverlib.Leave
|
||||
}
|
||||
|
||||
mu, err := updater.MembershipUpdater(targetUserNID, r.isLocalTarget(add))
|
||||
|
|
|
|||
129
roomserver/internal/perform/perform_inbound_peek.go
Normal file
129
roomserver/internal/perform/perform_inbound_peek.go
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
// Copyright 2020 New Vector Ltd
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package perform
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/roomserver/internal/helpers"
|
||||
"github.com/matrix-org/dendrite/roomserver/internal/input"
|
||||
"github.com/matrix-org/dendrite/roomserver/internal/query"
|
||||
"github.com/matrix-org/dendrite/roomserver/state"
|
||||
"github.com/matrix-org/dendrite/roomserver/storage"
|
||||
"github.com/matrix-org/dendrite/roomserver/types"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
type InboundPeeker struct {
|
||||
DB storage.Database
|
||||
Inputer *input.Inputer
|
||||
}
|
||||
|
||||
// PerformInboundPeek handles peeking into matrix rooms, including over
|
||||
// federation by talking to the federationsender. called when a remote server
|
||||
// initiates a /peek over federation.
|
||||
//
|
||||
// It should atomically figure out the current state of the room (for the
|
||||
// response to /peek) while adding the new inbound peek to the kafka stream so the
|
||||
// fed sender can start sending peeked events without a race between the state
|
||||
// snapshot and the stream of peeked events.
|
||||
func (r *InboundPeeker) PerformInboundPeek(
|
||||
ctx context.Context,
|
||||
request *api.PerformInboundPeekRequest,
|
||||
response *api.PerformInboundPeekResponse,
|
||||
) error {
|
||||
info, err := r.DB.RoomInfo(ctx, request.RoomID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info == nil || info.IsStub {
|
||||
return nil
|
||||
}
|
||||
response.RoomExists = true
|
||||
response.RoomVersion = info.RoomVersion
|
||||
|
||||
var stateEvents []*gomatrixserverlib.Event
|
||||
|
||||
var currentStateSnapshotNID types.StateSnapshotNID
|
||||
latestEventRefs, currentStateSnapshotNID, _, err :=
|
||||
r.DB.LatestEventIDs(ctx, info.RoomNID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
latestEvents, err := r.DB.EventsFromIDs(ctx, []string{latestEventRefs[0].EventID})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var sortedLatestEvents []*gomatrixserverlib.Event
|
||||
for _, ev := range latestEvents {
|
||||
sortedLatestEvents = append(sortedLatestEvents, ev.Event)
|
||||
}
|
||||
sortedLatestEvents = gomatrixserverlib.ReverseTopologicalOrdering(
|
||||
sortedLatestEvents,
|
||||
gomatrixserverlib.TopologicalOrderByPrevEvents,
|
||||
)
|
||||
response.LatestEvent = sortedLatestEvents[0].Headered(info.RoomVersion)
|
||||
|
||||
// XXX: do we actually need to do a state resolution here?
|
||||
roomState := state.NewStateResolution(r.DB, *info)
|
||||
|
||||
var stateEntries []types.StateEntry
|
||||
stateEntries, err = roomState.LoadStateAtSnapshot(
|
||||
ctx, currentStateSnapshotNID,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateEvents, err = helpers.LoadStateEvents(ctx, r.DB, stateEntries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get the auth event IDs for the current state events
|
||||
var authEventIDs []string
|
||||
for _, se := range stateEvents {
|
||||
authEventIDs = append(authEventIDs, se.AuthEventIDs()...)
|
||||
}
|
||||
authEventIDs = util.UniqueStrings(authEventIDs) // de-dupe
|
||||
|
||||
authEvents, err := query.GetAuthChain(ctx, r.DB.EventsFromIDs, authEventIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, event := range stateEvents {
|
||||
response.StateEvents = append(response.StateEvents, event.Headered(info.RoomVersion))
|
||||
}
|
||||
|
||||
for _, event := range authEvents {
|
||||
response.AuthChainEvents = append(response.AuthChainEvents, event.Headered(info.RoomVersion))
|
||||
}
|
||||
|
||||
err = r.Inputer.WriteOutputEvents(request.RoomID, []api.OutputEvent{
|
||||
{
|
||||
Type: api.OutputTypeNewInboundPeek,
|
||||
NewInboundPeek: &api.OutputNewInboundPeek{
|
||||
RoomID: request.RoomID,
|
||||
PeekID: request.PeekID,
|
||||
LatestEventID: latestEvents[0].EventID(),
|
||||
ServerName: request.ServerName,
|
||||
RenewalInterval: request.RenewalInterval,
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
|
@ -225,7 +225,7 @@ func buildInviteStrippedState(
|
|||
for _, t := range []string{
|
||||
gomatrixserverlib.MRoomName, gomatrixserverlib.MRoomCanonicalAlias,
|
||||
gomatrixserverlib.MRoomAliases, gomatrixserverlib.MRoomJoinRules,
|
||||
"m.room.avatar", "m.room.encryption",
|
||||
"m.room.avatar", "m.room.encryption", gomatrixserverlib.MRoomCreate,
|
||||
} {
|
||||
stateWanted = append(stateWanted, gomatrixserverlib.StateKeyTuple{
|
||||
EventType: t,
|
||||
|
|
|
|||
|
|
@ -151,11 +151,28 @@ func (r *Peeker) performPeekRoomByID(
|
|||
}
|
||||
}
|
||||
|
||||
// If the server name in the room ID isn't ours then it's a
|
||||
// possible candidate for finding the room via federation. Add
|
||||
// it to the list of servers to try.
|
||||
// handle federated peeks
|
||||
// FIXME: don't create an outbound peek if we already have one going.
|
||||
if domain != r.Cfg.Matrix.ServerName {
|
||||
// If the server name in the room ID isn't ours then it's a
|
||||
// possible candidate for finding the room via federation. Add
|
||||
// it to the list of servers to try.
|
||||
req.ServerNames = append(req.ServerNames, domain)
|
||||
|
||||
// Try peeking by all of the supplied server names.
|
||||
fedReq := fsAPI.PerformOutboundPeekRequest{
|
||||
RoomID: req.RoomIDOrAlias, // the room ID to try and peek
|
||||
ServerNames: req.ServerNames, // the servers to try peeking via
|
||||
}
|
||||
fedRes := fsAPI.PerformOutboundPeekResponse{}
|
||||
_ = r.FSAPI.PerformOutboundPeek(ctx, &fedReq, &fedRes)
|
||||
if fedRes.LastError != nil {
|
||||
return "", &api.PerformError{
|
||||
Code: api.PerformErrRemote,
|
||||
Msg: fedRes.LastError.Message,
|
||||
RemoteCode: fedRes.LastError.Code,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If this room isn't world_readable, we reject.
|
||||
|
|
|
|||
|
|
@ -107,12 +107,12 @@ func (r *Queryer) QueryStateAfterEvents(
|
|||
}
|
||||
authEventIDs = util.UniqueStrings(authEventIDs)
|
||||
|
||||
authEvents, err := getAuthChain(ctx, r.DB.EventsFromIDs, authEventIDs)
|
||||
authEvents, err := GetAuthChain(ctx, r.DB.EventsFromIDs, authEventIDs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getAuthChain: %w", err)
|
||||
}
|
||||
|
||||
stateEvents, err = state.ResolveConflictsAdhoc(info.RoomVersion, stateEvents, authEvents)
|
||||
stateEvents, err = gomatrixserverlib.ResolveConflicts(info.RoomVersion, stateEvents, authEvents)
|
||||
if err != nil {
|
||||
return fmt.Errorf("state.ResolveConflictsAdhoc: %w", err)
|
||||
}
|
||||
|
|
@ -447,10 +447,12 @@ func (r *Queryer) QueryStateAndAuthChain(
|
|||
response.RoomExists = true
|
||||
response.RoomVersion = info.RoomVersion
|
||||
|
||||
stateEvents, err := r.loadStateAtEventIDs(ctx, *info, request.PrevEventIDs)
|
||||
var stateEvents []*gomatrixserverlib.Event
|
||||
stateEvents, err = r.loadStateAtEventIDs(ctx, *info, request.PrevEventIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
response.PrevEventsExist = true
|
||||
|
||||
// add the auth event IDs for the current state events too
|
||||
|
|
@ -461,13 +463,13 @@ func (r *Queryer) QueryStateAndAuthChain(
|
|||
}
|
||||
authEventIDs = util.UniqueStrings(authEventIDs) // de-dupe
|
||||
|
||||
authEvents, err := getAuthChain(ctx, r.DB.EventsFromIDs, authEventIDs)
|
||||
authEvents, err := GetAuthChain(ctx, r.DB.EventsFromIDs, authEventIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if request.ResolveState {
|
||||
if stateEvents, err = state.ResolveConflictsAdhoc(
|
||||
if stateEvents, err = gomatrixserverlib.ResolveConflicts(
|
||||
info.RoomVersion, stateEvents, authEvents,
|
||||
); err != nil {
|
||||
return err
|
||||
|
|
@ -510,11 +512,11 @@ func (r *Queryer) loadStateAtEventIDs(ctx context.Context, roomInfo types.RoomIn
|
|||
|
||||
type eventsFromIDs func(context.Context, []string) ([]types.Event, error)
|
||||
|
||||
// getAuthChain fetches the auth chain for the given auth events. An auth chain
|
||||
// GetAuthChain fetches the auth chain for the given auth events. An auth chain
|
||||
// is the list of all events that are referenced in the auth_events section, and
|
||||
// all their auth_events, recursively. The returned set of events contain the
|
||||
// given events. Will *not* error if we don't have all auth events.
|
||||
func getAuthChain(
|
||||
func GetAuthChain(
|
||||
ctx context.Context, fn eventsFromIDs, authEventIDs []string,
|
||||
) ([]*gomatrixserverlib.Event, error) {
|
||||
// List of event IDs to fetch. On each pass, these events will be requested
|
||||
|
|
@ -718,7 +720,7 @@ func (r *Queryer) QueryServerBannedFromRoom(ctx context.Context, req *api.QueryS
|
|||
}
|
||||
|
||||
func (r *Queryer) QueryAuthChain(ctx context.Context, req *api.QueryAuthChainRequest, res *api.QueryAuthChainResponse) error {
|
||||
chain, err := getAuthChain(ctx, r.DB.EventsFromIDs, req.EventIDs)
|
||||
chain, err := GetAuthChain(ctx, r.DB.EventsFromIDs, req.EventIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ func TestGetAuthChainSingle(t *testing.T) {
|
|||
t.Fatalf("Failed to add events to db: %v", err)
|
||||
}
|
||||
|
||||
result, err := getAuthChain(context.TODO(), db.EventsFromIDs, []string{"e"})
|
||||
result, err := GetAuthChain(context.TODO(), db.EventsFromIDs, []string{"e"})
|
||||
if err != nil {
|
||||
t.Fatalf("getAuthChain failed: %v", err)
|
||||
}
|
||||
|
|
@ -139,7 +139,7 @@ func TestGetAuthChainMultiple(t *testing.T) {
|
|||
t.Fatalf("Failed to add events to db: %v", err)
|
||||
}
|
||||
|
||||
result, err := getAuthChain(context.TODO(), db.EventsFromIDs, []string{"e", "f"})
|
||||
result, err := GetAuthChain(context.TODO(), db.EventsFromIDs, []string{"e", "f"})
|
||||
if err != nil {
|
||||
t.Fatalf("getAuthChain failed: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,14 +26,15 @@ const (
|
|||
RoomserverInputRoomEventsPath = "/roomserver/inputRoomEvents"
|
||||
|
||||
// Perform operations
|
||||
RoomserverPerformInvitePath = "/roomserver/performInvite"
|
||||
RoomserverPerformPeekPath = "/roomserver/performPeek"
|
||||
RoomserverPerformUnpeekPath = "/roomserver/performUnpeek"
|
||||
RoomserverPerformJoinPath = "/roomserver/performJoin"
|
||||
RoomserverPerformLeavePath = "/roomserver/performLeave"
|
||||
RoomserverPerformBackfillPath = "/roomserver/performBackfill"
|
||||
RoomserverPerformPublishPath = "/roomserver/performPublish"
|
||||
RoomserverPerformForgetPath = "/roomserver/performForget"
|
||||
RoomserverPerformInvitePath = "/roomserver/performInvite"
|
||||
RoomserverPerformPeekPath = "/roomserver/performPeek"
|
||||
RoomserverPerformUnpeekPath = "/roomserver/performUnpeek"
|
||||
RoomserverPerformJoinPath = "/roomserver/performJoin"
|
||||
RoomserverPerformLeavePath = "/roomserver/performLeave"
|
||||
RoomserverPerformBackfillPath = "/roomserver/performBackfill"
|
||||
RoomserverPerformPublishPath = "/roomserver/performPublish"
|
||||
RoomserverPerformInboundPeekPath = "/roomserver/performInboundPeek"
|
||||
RoomserverPerformForgetPath = "/roomserver/performForget"
|
||||
|
||||
// Query operations
|
||||
RoomserverQueryLatestEventsAndStatePath = "/roomserver/queryLatestEventsAndState"
|
||||
|
|
@ -216,6 +217,18 @@ func (h *httpRoomserverInternalAPI) PerformPeek(
|
|||
}
|
||||
}
|
||||
|
||||
func (h *httpRoomserverInternalAPI) PerformInboundPeek(
|
||||
ctx context.Context,
|
||||
request *api.PerformInboundPeekRequest,
|
||||
response *api.PerformInboundPeekResponse,
|
||||
) error {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "PerformInboundPeek")
|
||||
defer span.Finish()
|
||||
|
||||
apiURL := h.roomserverURL + RoomserverPerformInboundPeekPath
|
||||
return httputil.PostJSON(ctx, span, h.httpClient, apiURL, request, response)
|
||||
}
|
||||
|
||||
func (h *httpRoomserverInternalAPI) PerformUnpeek(
|
||||
ctx context.Context,
|
||||
request *api.PerformUnpeekRequest,
|
||||
|
|
|
|||
|
|
@ -72,6 +72,19 @@ func AddRoutes(r api.RoomserverInternalAPI, internalAPIMux *mux.Router) {
|
|||
return util.JSONResponse{Code: http.StatusOK, JSON: &response}
|
||||
}),
|
||||
)
|
||||
internalAPIMux.Handle(RoomserverPerformInboundPeekPath,
|
||||
httputil.MakeInternalAPI("performInboundPeek", func(req *http.Request) util.JSONResponse {
|
||||
var request api.PerformInboundPeekRequest
|
||||
var response api.PerformInboundPeekResponse
|
||||
if err := json.NewDecoder(req.Body).Decode(&request); err != nil {
|
||||
return util.MessageResponse(http.StatusBadRequest, err.Error())
|
||||
}
|
||||
if err := r.PerformInboundPeek(req.Context(), &request, &response); err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return util.JSONResponse{Code: http.StatusOK, JSON: &response}
|
||||
}),
|
||||
)
|
||||
internalAPIMux.Handle(RoomserverPerformPeekPath,
|
||||
httputil.MakeInternalAPI("performUnpeek", func(req *http.Request) util.JSONResponse {
|
||||
var request api.PerformUnpeekRequest
|
||||
|
|
|
|||
|
|
@ -33,19 +33,21 @@ import (
|
|||
type StateResolution struct {
|
||||
db storage.Database
|
||||
roomInfo types.RoomInfo
|
||||
events map[types.EventNID]*gomatrixserverlib.Event
|
||||
}
|
||||
|
||||
func NewStateResolution(db storage.Database, roomInfo types.RoomInfo) StateResolution {
|
||||
return StateResolution{
|
||||
db: db,
|
||||
roomInfo: roomInfo,
|
||||
events: make(map[types.EventNID]*gomatrixserverlib.Event),
|
||||
}
|
||||
}
|
||||
|
||||
// LoadStateAtSnapshot loads the full state of a room at a particular snapshot.
|
||||
// This is typically the state before an event or the current state of a room.
|
||||
// Returns a sorted list of state entries or an error if there was a problem talking to the database.
|
||||
func (v StateResolution) LoadStateAtSnapshot(
|
||||
func (v *StateResolution) LoadStateAtSnapshot(
|
||||
ctx context.Context, stateNID types.StateSnapshotNID,
|
||||
) ([]types.StateEntry, error) {
|
||||
stateBlockNIDLists, err := v.db.StateBlockNIDs(ctx, []types.StateSnapshotNID{stateNID})
|
||||
|
|
@ -83,7 +85,7 @@ func (v StateResolution) LoadStateAtSnapshot(
|
|||
}
|
||||
|
||||
// LoadStateAtEvent loads the full state of a room before a particular event.
|
||||
func (v StateResolution) LoadStateAtEvent(
|
||||
func (v *StateResolution) LoadStateAtEvent(
|
||||
ctx context.Context, eventID string,
|
||||
) ([]types.StateEntry, error) {
|
||||
snapshotNID, err := v.db.SnapshotNIDFromEventID(ctx, eventID)
|
||||
|
|
@ -105,7 +107,7 @@ func (v StateResolution) LoadStateAtEvent(
|
|||
// LoadCombinedStateAfterEvents loads a snapshot of the state after each of the events
|
||||
// and combines those snapshots together into a single list. At this point it is
|
||||
// possible to run into duplicate (type, state key) tuples.
|
||||
func (v StateResolution) LoadCombinedStateAfterEvents(
|
||||
func (v *StateResolution) LoadCombinedStateAfterEvents(
|
||||
ctx context.Context, prevStates []types.StateAtEvent,
|
||||
) ([]types.StateEntry, error) {
|
||||
stateNIDs := make([]types.StateSnapshotNID, len(prevStates))
|
||||
|
|
@ -116,7 +118,7 @@ func (v StateResolution) LoadCombinedStateAfterEvents(
|
|||
// Deduplicate the IDs before passing them to the database.
|
||||
// There could be duplicates because the events could be state events where
|
||||
// the snapshot of the room state before them was the same.
|
||||
stateBlockNIDLists, err := v.db.StateBlockNIDs(ctx, uniqueStateSnapshotNIDs(stateNIDs))
|
||||
stateBlockNIDLists, err := v.db.StateBlockNIDs(ctx, UniqueStateSnapshotNIDs(stateNIDs))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("v.db.StateBlockNIDs: %w", err)
|
||||
}
|
||||
|
|
@ -177,7 +179,7 @@ func (v StateResolution) LoadCombinedStateAfterEvents(
|
|||
}
|
||||
|
||||
// DifferenceBetweeenStateSnapshots works out which state entries have been added and removed between two snapshots.
|
||||
func (v StateResolution) DifferenceBetweeenStateSnapshots(
|
||||
func (v *StateResolution) DifferenceBetweeenStateSnapshots(
|
||||
ctx context.Context, oldStateNID, newStateNID types.StateSnapshotNID,
|
||||
) (removed, added []types.StateEntry, err error) {
|
||||
if oldStateNID == newStateNID {
|
||||
|
|
@ -236,7 +238,7 @@ func (v StateResolution) DifferenceBetweeenStateSnapshots(
|
|||
// If there is no entry for a given event type and state key pair then it will be discarded.
|
||||
// This is typically the state before an event or the current state of a room.
|
||||
// Returns a sorted list of state entries or an error if there was a problem talking to the database.
|
||||
func (v StateResolution) LoadStateAtSnapshotForStringTuples(
|
||||
func (v *StateResolution) LoadStateAtSnapshotForStringTuples(
|
||||
ctx context.Context,
|
||||
stateNID types.StateSnapshotNID,
|
||||
stateKeyTuples []gomatrixserverlib.StateKeyTuple,
|
||||
|
|
@ -251,7 +253,7 @@ func (v StateResolution) LoadStateAtSnapshotForStringTuples(
|
|||
// stringTuplesToNumericTuples converts the string state key tuples into numeric IDs
|
||||
// If there isn't a numeric ID for either the event type or the event state key then the tuple is discarded.
|
||||
// Returns an error if there was a problem talking to the database.
|
||||
func (v StateResolution) stringTuplesToNumericTuples(
|
||||
func (v *StateResolution) stringTuplesToNumericTuples(
|
||||
ctx context.Context,
|
||||
stringTuples []gomatrixserverlib.StateKeyTuple,
|
||||
) ([]types.StateKeyTuple, error) {
|
||||
|
|
@ -292,7 +294,7 @@ func (v StateResolution) stringTuplesToNumericTuples(
|
|||
// If there is no entry for a given event type and state key pair then it will be discarded.
|
||||
// This is typically the state before an event or the current state of a room.
|
||||
// Returns a sorted list of state entries or an error if there was a problem talking to the database.
|
||||
func (v StateResolution) loadStateAtSnapshotForNumericTuples(
|
||||
func (v *StateResolution) loadStateAtSnapshotForNumericTuples(
|
||||
ctx context.Context,
|
||||
stateNID types.StateSnapshotNID,
|
||||
stateKeyTuples []types.StateKeyTuple,
|
||||
|
|
@ -340,7 +342,7 @@ func (v StateResolution) loadStateAtSnapshotForNumericTuples(
|
|||
// If there is no entry for a given event type and state key pair then it will be discarded.
|
||||
// This is typically the state before an event.
|
||||
// Returns a sorted list of state entries or an error if there was a problem talking to the database.
|
||||
func (v StateResolution) LoadStateAfterEventsForStringTuples(
|
||||
func (v *StateResolution) LoadStateAfterEventsForStringTuples(
|
||||
ctx context.Context,
|
||||
prevStates []types.StateAtEvent,
|
||||
stateKeyTuples []gomatrixserverlib.StateKeyTuple,
|
||||
|
|
@ -352,7 +354,7 @@ func (v StateResolution) LoadStateAfterEventsForStringTuples(
|
|||
return v.loadStateAfterEventsForNumericTuples(ctx, prevStates, numericTuples)
|
||||
}
|
||||
|
||||
func (v StateResolution) loadStateAfterEventsForNumericTuples(
|
||||
func (v *StateResolution) loadStateAfterEventsForNumericTuples(
|
||||
ctx context.Context,
|
||||
prevStates []types.StateAtEvent,
|
||||
stateKeyTuples []types.StateKeyTuple,
|
||||
|
|
@ -520,7 +522,7 @@ func init() {
|
|||
// CalculateAndStoreStateBeforeEvent calculates a snapshot of the state of a room before an event.
|
||||
// Stores the snapshot of the state in the database.
|
||||
// Returns a numeric ID for the snapshot of the state before the event.
|
||||
func (v StateResolution) CalculateAndStoreStateBeforeEvent(
|
||||
func (v *StateResolution) CalculateAndStoreStateBeforeEvent(
|
||||
ctx context.Context,
|
||||
event *gomatrixserverlib.Event,
|
||||
isRejected bool,
|
||||
|
|
@ -537,7 +539,7 @@ func (v StateResolution) CalculateAndStoreStateBeforeEvent(
|
|||
|
||||
// CalculateAndStoreStateAfterEvents finds the room state after the given events.
|
||||
// Stores the resulting state in the database and returns a numeric ID for that snapshot.
|
||||
func (v StateResolution) CalculateAndStoreStateAfterEvents(
|
||||
func (v *StateResolution) CalculateAndStoreStateAfterEvents(
|
||||
ctx context.Context,
|
||||
prevStates []types.StateAtEvent,
|
||||
) (types.StateSnapshotNID, error) {
|
||||
|
|
@ -607,7 +609,7 @@ const maxStateBlockNIDs = 64
|
|||
// calculateAndStoreStateAfterManyEvents finds the room state after the given events.
|
||||
// This handles the slow path of calculateAndStoreStateAfterEvents for when there is more than one event.
|
||||
// Stores the resulting state and returns a numeric ID for the snapshot.
|
||||
func (v StateResolution) calculateAndStoreStateAfterManyEvents(
|
||||
func (v *StateResolution) calculateAndStoreStateAfterManyEvents(
|
||||
ctx context.Context,
|
||||
roomNID types.RoomNID,
|
||||
prevStates []types.StateAtEvent,
|
||||
|
|
@ -627,7 +629,7 @@ func (v StateResolution) calculateAndStoreStateAfterManyEvents(
|
|||
return metrics.stop(v.db.AddState(ctx, roomNID, nil, state))
|
||||
}
|
||||
|
||||
func (v StateResolution) calculateStateAfterManyEvents(
|
||||
func (v *StateResolution) calculateStateAfterManyEvents(
|
||||
ctx context.Context, roomVersion gomatrixserverlib.RoomVersion,
|
||||
prevStates []types.StateAtEvent,
|
||||
) (state []types.StateEntry, algorithm string, conflictLength int, err error) {
|
||||
|
|
@ -681,80 +683,7 @@ func (v StateResolution) calculateStateAfterManyEvents(
|
|||
return
|
||||
}
|
||||
|
||||
// ResolveConflictsAdhoc is a helper function to assist the query API in
|
||||
// performing state resolution when requested. This is a different code
|
||||
// path to the rest of state.go because this assumes you already have
|
||||
// gomatrixserverlib.Event objects and not just a bunch of NIDs like
|
||||
// elsewhere in the state resolution.
|
||||
// TODO: Some of this can possibly be deduplicated
|
||||
func ResolveConflictsAdhoc(
|
||||
version gomatrixserverlib.RoomVersion,
|
||||
events []*gomatrixserverlib.Event,
|
||||
authEvents []*gomatrixserverlib.Event,
|
||||
) ([]*gomatrixserverlib.Event, error) {
|
||||
type stateKeyTuple struct {
|
||||
Type string
|
||||
StateKey string
|
||||
}
|
||||
|
||||
// Prepare our data structures.
|
||||
eventMap := make(map[stateKeyTuple][]*gomatrixserverlib.Event)
|
||||
var conflicted, notConflicted, resolved []*gomatrixserverlib.Event
|
||||
|
||||
// Run through all of the events that we were given and sort them
|
||||
// into a map, sorted by (event_type, state_key) tuple. This means
|
||||
// that we can easily spot events that are "conflicted", e.g.
|
||||
// there are duplicate values for the same tuple key.
|
||||
for _, event := range events {
|
||||
if event.StateKey() == nil {
|
||||
// Ignore events that are not state events.
|
||||
continue
|
||||
}
|
||||
// Append the events if there is already a conflicted list for
|
||||
// this tuple key, create it if not.
|
||||
tuple := stateKeyTuple{event.Type(), *event.StateKey()}
|
||||
eventMap[tuple] = append(eventMap[tuple], event)
|
||||
}
|
||||
|
||||
// Split out the events in the map into conflicted and unconflicted
|
||||
// buckets. The conflicted events will be ran through state res,
|
||||
// whereas unconfliced events will always going to appear in the
|
||||
// final resolved state.
|
||||
for _, list := range eventMap {
|
||||
if len(list) > 1 {
|
||||
conflicted = append(conflicted, list...)
|
||||
} else {
|
||||
notConflicted = append(notConflicted, list...)
|
||||
}
|
||||
}
|
||||
|
||||
// Work out which state resolution algorithm we want to run for
|
||||
// the room version.
|
||||
stateResAlgo, err := version.StateResAlgorithm()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch stateResAlgo {
|
||||
case gomatrixserverlib.StateResV1:
|
||||
// Currently state res v1 doesn't handle unconflicted events
|
||||
// for us, like state res v2 does, so we will need to add the
|
||||
// unconflicted events into the state ourselves.
|
||||
// TODO: Fix state res v1 so this is handled for the caller.
|
||||
resolved = gomatrixserverlib.ResolveStateConflicts(conflicted, authEvents)
|
||||
resolved = append(resolved, notConflicted...)
|
||||
case gomatrixserverlib.StateResV2:
|
||||
// TODO: auth difference here?
|
||||
resolved = gomatrixserverlib.ResolveStateConflictsV2(conflicted, notConflicted, authEvents, authEvents)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported state resolution algorithm %v", stateResAlgo)
|
||||
}
|
||||
|
||||
// Return the final resolved state events, including both the
|
||||
// resolved set of conflicted events, and the unconflicted events.
|
||||
return resolved, nil
|
||||
}
|
||||
|
||||
func (v StateResolution) resolveConflicts(
|
||||
func (v *StateResolution) resolveConflicts(
|
||||
ctx context.Context, version gomatrixserverlib.RoomVersion,
|
||||
notConflicted, conflicted []types.StateEntry,
|
||||
) ([]types.StateEntry, error) {
|
||||
|
|
@ -778,7 +707,7 @@ func (v StateResolution) resolveConflicts(
|
|||
// Returns a list that combines the entries without conflicts with the result of state resolution for the entries with conflicts.
|
||||
// The returned list is sorted by state key tuple.
|
||||
// Returns an error if there was a problem talking to the database.
|
||||
func (v StateResolution) resolveConflictsV1(
|
||||
func (v *StateResolution) resolveConflictsV1(
|
||||
ctx context.Context,
|
||||
notConflicted, conflicted []types.StateEntry,
|
||||
) ([]types.StateEntry, error) {
|
||||
|
|
@ -842,7 +771,7 @@ func (v StateResolution) resolveConflictsV1(
|
|||
// The returned list is sorted by state key tuple.
|
||||
// Returns an error if there was a problem talking to the database.
|
||||
// nolint:gocyclo
|
||||
func (v StateResolution) resolveConflictsV2(
|
||||
func (v *StateResolution) resolveConflictsV2(
|
||||
ctx context.Context,
|
||||
notConflicted, conflicted []types.StateEntry,
|
||||
) ([]types.StateEntry, error) {
|
||||
|
|
@ -959,7 +888,7 @@ func (v StateResolution) resolveConflictsV2(
|
|||
}
|
||||
|
||||
// stateKeyTuplesNeeded works out which numeric state key tuples we need to authenticate some events.
|
||||
func (v StateResolution) stateKeyTuplesNeeded(stateKeyNIDMap map[string]types.EventStateKeyNID, stateNeeded gomatrixserverlib.StateNeeded) []types.StateKeyTuple {
|
||||
func (v *StateResolution) stateKeyTuplesNeeded(stateKeyNIDMap map[string]types.EventStateKeyNID, stateNeeded gomatrixserverlib.StateNeeded) []types.StateKeyTuple {
|
||||
var keyTuples []types.StateKeyTuple
|
||||
if stateNeeded.Create {
|
||||
keyTuples = append(keyTuples, types.StateKeyTuple{
|
||||
|
|
@ -1004,26 +933,33 @@ func (v StateResolution) stateKeyTuplesNeeded(stateKeyNIDMap map[string]types.Ev
|
|||
// Returns a list of state events in no particular order and a map from string event ID back to state entry.
|
||||
// The map can be used to recover which numeric state entry a given event is for.
|
||||
// Returns an error if there was a problem talking to the database.
|
||||
func (v StateResolution) loadStateEvents(
|
||||
func (v *StateResolution) loadStateEvents(
|
||||
ctx context.Context, entries []types.StateEntry,
|
||||
) ([]*gomatrixserverlib.Event, map[string]types.StateEntry, error) {
|
||||
eventNIDs := make([]types.EventNID, len(entries))
|
||||
for i := range entries {
|
||||
eventNIDs[i] = entries[i].EventNID
|
||||
result := make([]*gomatrixserverlib.Event, 0, len(entries))
|
||||
eventEntries := make([]types.StateEntry, 0, len(entries))
|
||||
eventNIDs := make([]types.EventNID, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if e, ok := v.events[entry.EventNID]; ok {
|
||||
result = append(result, e)
|
||||
} else {
|
||||
eventEntries = append(eventEntries, entry)
|
||||
eventNIDs = append(eventNIDs, entry.EventNID)
|
||||
}
|
||||
}
|
||||
events, err := v.db.Events(ctx, eventNIDs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
eventIDMap := map[string]types.StateEntry{}
|
||||
result := make([]*gomatrixserverlib.Event, len(entries))
|
||||
for i := range entries {
|
||||
event, ok := eventMap(events).lookup(entries[i].EventNID)
|
||||
for _, entry := range eventEntries {
|
||||
event, ok := eventMap(events).lookup(entry.EventNID)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("Corrupt DB: Missing event numeric ID %d", entries[i].EventNID))
|
||||
panic(fmt.Errorf("Corrupt DB: Missing event numeric ID %d", entry.EventNID))
|
||||
}
|
||||
result[i] = event.Event
|
||||
eventIDMap[event.Event.EventID()] = entries[i]
|
||||
result = append(result, event.Event)
|
||||
eventIDMap[event.Event.EventID()] = entry
|
||||
v.events[entry.EventNID] = event.Event
|
||||
}
|
||||
return result, eventIDMap, nil
|
||||
}
|
||||
|
|
@ -1103,7 +1039,7 @@ func (s stateNIDSorter) Len() int { return len(s) }
|
|||
func (s stateNIDSorter) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s stateNIDSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func uniqueStateSnapshotNIDs(nids []types.StateSnapshotNID) []types.StateSnapshotNID {
|
||||
func UniqueStateSnapshotNIDs(nids []types.StateSnapshotNID) []types.StateSnapshotNID {
|
||||
return nids[:util.SortAndUnique(stateNIDSorter(nids))]
|
||||
}
|
||||
|
||||
|
|
|
|||
111
setup/base.go
111
setup/base.go
|
|
@ -15,22 +15,28 @@
|
|||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal/caching"
|
||||
"github.com/matrix-org/dendrite/internal/httputil"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"go.uber.org/atomic"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/dendrite/userapi/storage/accounts"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
|
@ -61,6 +67,7 @@ import (
|
|||
// should only be used during start up.
|
||||
// Must be closed when shutting down.
|
||||
type BaseDendrite struct {
|
||||
*process.ProcessContext
|
||||
componentName string
|
||||
tracerCloser io.Closer
|
||||
PublicClientAPIMux *mux.Router
|
||||
|
|
@ -73,6 +80,7 @@ type BaseDendrite struct {
|
|||
httpClient *http.Client
|
||||
Cfg *config.Dendrite
|
||||
Caches *caching.Caches
|
||||
DNSCache *gomatrixserverlib.DNSCache
|
||||
// KafkaConsumer sarama.Consumer
|
||||
// KafkaProducer sarama.SyncProducer
|
||||
}
|
||||
|
|
@ -111,6 +119,20 @@ func NewBaseDendrite(cfg *config.Dendrite, componentName string, useHTTPAPIs boo
|
|||
logrus.WithError(err).Warnf("Failed to create cache")
|
||||
}
|
||||
|
||||
var dnsCache *gomatrixserverlib.DNSCache
|
||||
if cfg.Global.DNSCache.Enabled {
|
||||
lifetime := time.Second * cfg.Global.DNSCache.CacheLifetime
|
||||
dnsCache = gomatrixserverlib.NewDNSCache(
|
||||
cfg.Global.DNSCache.CacheSize,
|
||||
lifetime,
|
||||
)
|
||||
logrus.Infof(
|
||||
"DNS cache enabled (size %d, lifetime %s)",
|
||||
cfg.Global.DNSCache.CacheSize,
|
||||
lifetime,
|
||||
)
|
||||
}
|
||||
|
||||
apiClient := http.Client{
|
||||
Timeout: time.Minute * 10,
|
||||
Transport: &http2.Transport{
|
||||
|
|
@ -146,12 +168,15 @@ func NewBaseDendrite(cfg *config.Dendrite, componentName string, useHTTPAPIs boo
|
|||
// We need to be careful with media APIs if they read from a filesystem to make sure they
|
||||
// are not inadvertently reading paths without cleaning, else this could introduce a
|
||||
// directory traversal attack e.g /../../../etc/passwd
|
||||
|
||||
return &BaseDendrite{
|
||||
ProcessContext: process.NewProcessContext(),
|
||||
componentName: componentName,
|
||||
UseHTTPAPIs: useHTTPAPIs,
|
||||
tracerCloser: closer,
|
||||
Cfg: cfg,
|
||||
Caches: cache,
|
||||
DNSCache: dnsCache,
|
||||
PublicClientAPIMux: mux.NewRouter().SkipClean(true).PathPrefix(httputil.PublicClientPathPrefix).Subrouter().UseEncodedPath(),
|
||||
PublicFederationAPIMux: mux.NewRouter().SkipClean(true).PathPrefix(httputil.PublicFederationPathPrefix).Subrouter().UseEncodedPath(),
|
||||
PublicKeyAPIMux: mux.NewRouter().SkipClean(true).PathPrefix(httputil.PublicKeyPathPrefix).Subrouter().UseEncodedPath(),
|
||||
|
|
@ -250,11 +275,17 @@ func (b *BaseDendrite) CreateAccountsDB() accounts.Database {
|
|||
// Should only be called once per component.
|
||||
func (b *BaseDendrite) CreateClient() *gomatrixserverlib.Client {
|
||||
if b.Cfg.Global.DisableFederation {
|
||||
return gomatrixserverlib.NewClientWithTransport(noOpHTTPTransport)
|
||||
return gomatrixserverlib.NewClient(
|
||||
gomatrixserverlib.WithTransport(noOpHTTPTransport),
|
||||
)
|
||||
}
|
||||
client := gomatrixserverlib.NewClient(
|
||||
b.Cfg.FederationSender.DisableTLSValidation,
|
||||
)
|
||||
opts := []gomatrixserverlib.ClientOption{
|
||||
gomatrixserverlib.WithSkipVerify(b.Cfg.FederationSender.DisableTLSValidation),
|
||||
}
|
||||
if b.Cfg.Global.DNSCache.Enabled {
|
||||
opts = append(opts, gomatrixserverlib.WithDNSCache(b.DNSCache))
|
||||
}
|
||||
client := gomatrixserverlib.NewClient(opts...)
|
||||
client.SetUserAgent(fmt.Sprintf("Dendrite/%s", internal.VersionString()))
|
||||
return client
|
||||
}
|
||||
|
|
@ -263,14 +294,21 @@ func (b *BaseDendrite) CreateClient() *gomatrixserverlib.Client {
|
|||
// once per component.
|
||||
func (b *BaseDendrite) CreateFederationClient() *gomatrixserverlib.FederationClient {
|
||||
if b.Cfg.Global.DisableFederation {
|
||||
return gomatrixserverlib.NewFederationClientWithTransport(
|
||||
return gomatrixserverlib.NewFederationClient(
|
||||
b.Cfg.Global.ServerName, b.Cfg.Global.KeyID, b.Cfg.Global.PrivateKey,
|
||||
b.Cfg.FederationSender.DisableTLSValidation, noOpHTTPTransport,
|
||||
gomatrixserverlib.WithTransport(noOpHTTPTransport),
|
||||
)
|
||||
}
|
||||
client := gomatrixserverlib.NewFederationClientWithTimeout(
|
||||
b.Cfg.Global.ServerName, b.Cfg.Global.KeyID, b.Cfg.Global.PrivateKey,
|
||||
b.Cfg.FederationSender.DisableTLSValidation, time.Minute*5,
|
||||
opts := []gomatrixserverlib.ClientOption{
|
||||
gomatrixserverlib.WithTimeout(time.Minute * 5),
|
||||
gomatrixserverlib.WithSkipVerify(b.Cfg.FederationSender.DisableTLSValidation),
|
||||
}
|
||||
if b.Cfg.Global.DNSCache.Enabled {
|
||||
opts = append(opts, gomatrixserverlib.WithDNSCache(b.DNSCache))
|
||||
}
|
||||
client := gomatrixserverlib.NewFederationClient(
|
||||
b.Cfg.Global.ServerName, b.Cfg.Global.KeyID,
|
||||
b.Cfg.Global.PrivateKey, opts...,
|
||||
)
|
||||
client.SetUserAgent(fmt.Sprintf("Dendrite/%s", internal.VersionString()))
|
||||
return client
|
||||
|
|
@ -325,14 +363,26 @@ func (b *BaseDendrite) SetupAndServeHTTP(
|
|||
|
||||
if internalAddr != NoListener && internalAddr != externalAddr {
|
||||
go func() {
|
||||
var internalShutdown atomic.Bool // RegisterOnShutdown can be called more than once
|
||||
logrus.Infof("Starting internal %s listener on %s", b.componentName, internalServ.Addr)
|
||||
b.ProcessContext.ComponentStarted()
|
||||
internalServ.RegisterOnShutdown(func() {
|
||||
if internalShutdown.CAS(false, true) {
|
||||
b.ProcessContext.ComponentFinished()
|
||||
logrus.Infof("Stopped internal HTTP listener")
|
||||
}
|
||||
})
|
||||
if certFile != nil && keyFile != nil {
|
||||
if err := internalServ.ListenAndServeTLS(*certFile, *keyFile); err != nil {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTPS")
|
||||
if err != http.ErrServerClosed {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTPS")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := internalServ.ListenAndServe(); err != nil {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTP")
|
||||
if err != http.ErrServerClosed {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTP")
|
||||
}
|
||||
}
|
||||
}
|
||||
logrus.Infof("Stopped internal %s listener on %s", b.componentName, internalServ.Addr)
|
||||
|
|
@ -341,19 +391,52 @@ func (b *BaseDendrite) SetupAndServeHTTP(
|
|||
|
||||
if externalAddr != NoListener {
|
||||
go func() {
|
||||
var externalShutdown atomic.Bool // RegisterOnShutdown can be called more than once
|
||||
logrus.Infof("Starting external %s listener on %s", b.componentName, externalServ.Addr)
|
||||
b.ProcessContext.ComponentStarted()
|
||||
externalServ.RegisterOnShutdown(func() {
|
||||
if externalShutdown.CAS(false, true) {
|
||||
b.ProcessContext.ComponentFinished()
|
||||
logrus.Infof("Stopped external HTTP listener")
|
||||
}
|
||||
})
|
||||
if certFile != nil && keyFile != nil {
|
||||
if err := externalServ.ListenAndServeTLS(*certFile, *keyFile); err != nil {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTPS")
|
||||
if err != http.ErrServerClosed {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTPS")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := externalServ.ListenAndServe(); err != nil {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTP")
|
||||
if err != http.ErrServerClosed {
|
||||
logrus.WithError(err).Fatal("failed to serve HTTP")
|
||||
}
|
||||
}
|
||||
}
|
||||
logrus.Infof("Stopped external %s listener on %s", b.componentName, externalServ.Addr)
|
||||
}()
|
||||
}
|
||||
|
||||
select {}
|
||||
<-b.ProcessContext.WaitForShutdown()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
_ = internalServ.Shutdown(ctx)
|
||||
_ = externalServ.Shutdown(ctx)
|
||||
logrus.Infof("Stopped HTTP listeners")
|
||||
}
|
||||
|
||||
func (b *BaseDendrite) WaitForShutdown() {
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-sigs
|
||||
signal.Reset(syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
logrus.Warnf("Shutdown signal received")
|
||||
|
||||
b.ProcessContext.ShutdownDendrite()
|
||||
b.ProcessContext.WaitForComponentsToFinish()
|
||||
|
||||
logrus.Warnf("Dendrite is exiting now")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -344,6 +344,7 @@ func (c *Dendrite) Wiring() {
|
|||
|
||||
c.ClientAPI.Derived = &c.Derived
|
||||
c.AppServiceAPI.Derived = &c.Derived
|
||||
c.ClientAPI.MSCs = &c.MSCs
|
||||
}
|
||||
|
||||
// Error returns a string detailing how many errors were contained within a
|
||||
|
|
|
|||
|
|
@ -37,6 +37,8 @@ type ClientAPI struct {
|
|||
|
||||
// Rate-limiting options
|
||||
RateLimiting RateLimiting `yaml:"rate_limiting"`
|
||||
|
||||
MSCs *MSCs `yaml:"mscs"`
|
||||
}
|
||||
|
||||
func (c *ClientAPI) Defaults() {
|
||||
|
|
|
|||
|
|
@ -48,6 +48,9 @@ type Global struct {
|
|||
|
||||
// Metrics configuration
|
||||
Metrics Metrics `yaml:"metrics"`
|
||||
|
||||
// DNS caching options for all outbound HTTP requests
|
||||
DNSCache DNSCacheOptions `yaml:"dns_cache"`
|
||||
}
|
||||
|
||||
func (c *Global) Defaults() {
|
||||
|
|
@ -59,6 +62,7 @@ func (c *Global) Defaults() {
|
|||
|
||||
c.Kafka.Defaults()
|
||||
c.Metrics.Defaults()
|
||||
c.DNSCache.Defaults()
|
||||
}
|
||||
|
||||
func (c *Global) Verify(configErrs *ConfigErrors, isMonolith bool) {
|
||||
|
|
@ -67,6 +71,7 @@ func (c *Global) Verify(configErrs *ConfigErrors, isMonolith bool) {
|
|||
|
||||
c.Kafka.Verify(configErrs, isMonolith)
|
||||
c.Metrics.Verify(configErrs, isMonolith)
|
||||
c.DNSCache.Verify(configErrs, isMonolith)
|
||||
}
|
||||
|
||||
type OldVerifyKeys struct {
|
||||
|
|
@ -140,3 +145,23 @@ func (c DatabaseOptions) MaxOpenConns() int {
|
|||
func (c DatabaseOptions) ConnMaxLifetime() time.Duration {
|
||||
return time.Duration(c.ConnMaxLifetimeSeconds) * time.Second
|
||||
}
|
||||
|
||||
type DNSCacheOptions struct {
|
||||
// Whether the DNS cache is enabled or not
|
||||
Enabled bool `yaml:"enabled"`
|
||||
// How many entries to store in the DNS cache at a given time
|
||||
CacheSize int `yaml:"cache_size"`
|
||||
// How long a cache entry should be considered valid for
|
||||
CacheLifetime time.Duration `yaml:"cache_lifetime"`
|
||||
}
|
||||
|
||||
func (c *DNSCacheOptions) Defaults() {
|
||||
c.Enabled = false
|
||||
c.CacheSize = 256
|
||||
c.CacheLifetime = time.Minute * 5
|
||||
}
|
||||
|
||||
func (c *DNSCacheOptions) Verify(configErrs *ConfigErrors, isMonolith bool) {
|
||||
checkPositive(configErrs, "cache_size", int64(c.CacheSize))
|
||||
checkPositive(configErrs, "cache_lifetime", int64(c.CacheLifetime))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,11 @@ package config
|
|||
type MSCs struct {
|
||||
Matrix *Global `yaml:"-"`
|
||||
|
||||
// The MSCs to enable, currently only `msc2836` is supported.
|
||||
// The MSCs to enable. Supported MSCs include:
|
||||
// 'msc2444': Peeking over federation - https://github.com/matrix-org/matrix-doc/pull/2444
|
||||
// 'msc2753': Peeking via /sync - https://github.com/matrix-org/matrix-doc/pull/2753
|
||||
// 'msc2836': Threading - https://github.com/matrix-org/matrix-doc/pull/2836
|
||||
// 'msc2946': Spaces Summary - https://github.com/matrix-org/matrix-doc/pull/2946
|
||||
MSCs []string `yaml:"mscs"`
|
||||
|
||||
Database DatabaseOptions `yaml:"database"`
|
||||
|
|
@ -14,6 +18,16 @@ func (c *MSCs) Defaults() {
|
|||
c.Database.ConnectionString = "file:mscs.db"
|
||||
}
|
||||
|
||||
// Enabled returns true if the given msc is enabled. Should in the form 'msc12345'.
|
||||
func (c *MSCs) Enabled(msc string) bool {
|
||||
for _, m := range c.MSCs {
|
||||
if m == msc {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *MSCs) Verify(configErrs *ConfigErrors, isMonolith bool) {
|
||||
checkNotEmpty(configErrs, "mscs.database.connection_string", string(c.Database.ConnectionString))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/mediaapi"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
serverKeyAPI "github.com/matrix-org/dendrite/signingkeyserver/api"
|
||||
"github.com/matrix-org/dendrite/syncapi"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
|
|
@ -56,21 +57,22 @@ type Monolith struct {
|
|||
}
|
||||
|
||||
// AddAllPublicRoutes attaches all public paths to the given router
|
||||
func (m *Monolith) AddAllPublicRoutes(csMux, ssMux, keyMux, mediaMux *mux.Router) {
|
||||
func (m *Monolith) AddAllPublicRoutes(process *process.ProcessContext, csMux, ssMux, keyMux, mediaMux *mux.Router) {
|
||||
clientapi.AddPublicRoutes(
|
||||
csMux, &m.Config.ClientAPI, m.AccountDB,
|
||||
m.FedClient, m.RoomserverAPI,
|
||||
m.EDUInternalAPI, m.AppserviceAPI, transactions.New(),
|
||||
m.FederationSenderAPI, m.UserAPI, m.KeyAPI, m.ExtPublicRoomsProvider,
|
||||
&m.Config.MSCs,
|
||||
)
|
||||
federationapi.AddPublicRoutes(
|
||||
ssMux, keyMux, &m.Config.FederationAPI, m.UserAPI, m.FedClient,
|
||||
m.KeyRing, m.RoomserverAPI, m.FederationSenderAPI,
|
||||
m.EDUInternalAPI, m.KeyAPI,
|
||||
m.EDUInternalAPI, m.KeyAPI, &m.Config.MSCs,
|
||||
)
|
||||
mediaapi.AddPublicRoutes(mediaMux, &m.Config.MediaAPI, m.UserAPI, m.Client)
|
||||
syncapi.AddPublicRoutes(
|
||||
csMux, m.UserAPI, m.RoomserverAPI,
|
||||
process, csMux, m.UserAPI, m.RoomserverAPI,
|
||||
m.KeyAPI, m.FedClient, &m.Config.SyncAPI,
|
||||
)
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue