mirror of
https://github.com/matrix-org/dendrite.git
synced 2026-01-17 11:03:11 -06:00
Sync dendrite fork changes for gating, and single chain support (#778)
* Latest dendrite main (8c7b274e4e)
* Gating implementation from John and Tak
Fixes for https://github.com/matrix-org/dendrite/issues/2838 and
https://github.com/matrix-org/dendrite/issues/2842
Co-authored-by: Tak Wai Wong <tak@hntlabs.com>
This commit is contained in:
parent
6cce0b9b43
commit
424df14000
7
.github/workflows/dendrite.yml
vendored
7
.github/workflows/dendrite.yml
vendored
|
|
@ -109,6 +109,11 @@ jobs:
|
|||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
- name: Set up gotestfmt
|
||||
uses: gotesttools/gotestfmt-action@v2
|
||||
with:
|
||||
# Optional: pass GITHUB_TOKEN to avoid rate limiting.
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
|
|
@ -117,7 +122,7 @@ jobs:
|
|||
key: ${{ runner.os }}-go${{ matrix.go }}-test-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go${{ matrix.go }}-test-
|
||||
- run: go test ./...
|
||||
- run: go test -json -v ./... 2>&1 | gotestfmt
|
||||
env:
|
||||
POSTGRES_HOST: localhost
|
||||
POSTGRES_USER: postgres
|
||||
|
|
|
|||
128
.github/workflows/schedules.yaml
vendored
Normal file
128
.github/workflows/schedules.yaml
vendored
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
name: Scheduled
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # every day at midnight
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# run go test with different go versions
|
||||
test:
|
||||
timeout-minutes: 20
|
||||
name: Unit tests (Go ${{ matrix.go }})
|
||||
runs-on: ubuntu-latest
|
||||
# Service containers to run with `container-job`
|
||||
services:
|
||||
# Label used to access the service container
|
||||
postgres:
|
||||
# Docker Hub image
|
||||
image: postgres:13-alpine
|
||||
# Provide the password for postgres
|
||||
env:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_DB: dendrite
|
||||
ports:
|
||||
# Maps tcp port 5432 on service container to the host
|
||||
- 5432:5432
|
||||
# Set health checks to wait until postgres has started
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
go: ["1.18", "1.19"]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go${{ matrix.go }}-test-race-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go${{ matrix.go }}-test-race-
|
||||
- run: go test -race ./...
|
||||
env:
|
||||
POSTGRES_HOST: localhost
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_DB: dendrite
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
initial-tests-done:
|
||||
name: Initial tests passed
|
||||
needs: [test]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
steps:
|
||||
- name: Check initial tests passed
|
||||
uses: re-actors/alls-green@release/v1
|
||||
with:
|
||||
jobs: ${{ toJSON(needs) }}
|
||||
|
||||
# run Sytest in different variations
|
||||
sytest:
|
||||
timeout-minutes: 60
|
||||
needs: initial-tests-done
|
||||
name: "Sytest (${{ matrix.label }})"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- label: SQLite
|
||||
|
||||
- label: SQLite, full HTTP APIs
|
||||
api: full-http
|
||||
|
||||
- label: PostgreSQL
|
||||
postgres: postgres
|
||||
|
||||
- label: PostgreSQL, full HTTP APIs
|
||||
postgres: postgres
|
||||
api: full-http
|
||||
container:
|
||||
image: matrixdotorg/sytest-dendrite:latest
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
POSTGRES: ${{ matrix.postgres && 1}}
|
||||
API: ${{ matrix.api && 1 }}
|
||||
SYTEST_BRANCH: ${{ github.head_ref }}
|
||||
RACE_DETECTION: 1
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Run Sytest
|
||||
run: /bootstrap.sh dendrite
|
||||
working-directory: /src
|
||||
- name: Summarise results.tap
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Sytest List Maintenance
|
||||
if: ${{ always() }}
|
||||
run: /src/show-expected-fail-tests.sh /logs/results.tap /src/sytest-whitelist /src/sytest-blacklist
|
||||
continue-on-error: true # not fatal
|
||||
- name: Are We Synapse Yet?
|
||||
if: ${{ always() }}
|
||||
run: /src/are-we-synapse-yet.py /logs/results.tap -v
|
||||
continue-on-error: true # not fatal
|
||||
- name: Upload Sytest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (Dendrite, ${{ join(matrix.*, ', ') }})
|
||||
path: |
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
38
CHANGES.md
38
CHANGES.md
|
|
@ -1,5 +1,43 @@
|
|||
# Changelog
|
||||
|
||||
## Dendrite 0.10.5 (2022-10-31)
|
||||
|
||||
### Features
|
||||
|
||||
* It is now possible to use hCaptcha instead of reCAPTCHA for protecting registration
|
||||
* A new `auto_join_rooms` configuration option has been added for automatically joining new users to a set of rooms
|
||||
* A new `/_dendrite/admin/downloadState/{serverName}/{roomID}` endpoint has been added, which allows a server administrator to attempt to repair a room with broken room state by downloading a state snapshot from another federated server in the room
|
||||
|
||||
### Fixes
|
||||
|
||||
* Querying cross-signing keys for users should now be considerably faster
|
||||
* A bug in state resolution where some events were not correctly selected for third-party invites has been fixed
|
||||
* A bug in state resolution which could result in `not in room` event rejections has been fixed
|
||||
* When accepting a DM invite, it should now be possible to see messages that were sent before the invite was accepted
|
||||
* Claiming remote E2EE one-time keys has been refactored and should be more reliable now
|
||||
* Various fixes have been made to the `/members` endpoint, which may help with E2EE reliability and clients rendering memberships
|
||||
* A race condition in the federation API destination queues has been fixed when associating queued events with remote server destinations
|
||||
* A bug in the sync API where too many events were selected resulting in high CPU usage has been fixed
|
||||
* Configuring the avatar URL for the Server Notices user should work correctly now
|
||||
|
||||
## Dendrite 0.10.4 (2022-10-21)
|
||||
|
||||
### Features
|
||||
|
||||
* Various tables belonging to the user API will be renamed so that they are namespaced with the `userapi_` prefix
|
||||
* Note that, after upgrading to this version, you should not revert to an older version of Dendrite as the database changes **will not** be reverted automatically
|
||||
* The backoff and retry behaviour in the federation API has been refactored and improved
|
||||
|
||||
### Fixes
|
||||
|
||||
* Private read receipt support is now advertised in the client `/versions` endpoint
|
||||
* Private read receipts will now clear notification counts properly
|
||||
* A bug where a false `leave` membership transition was inserted into the timeline after accepting an invite has been fixed
|
||||
* Some panics caused by concurrent map writes in the key server have been fixed
|
||||
* The sync API now calculates membership transitions from state deltas more accurately
|
||||
* Transaction IDs are now scoped to endpoints, which should fix some bugs where transaction ID reuse could cause nonsensical cached responses from some endpoints
|
||||
* The length of the `type`, `sender`, `state_key` and `room_id` fields in events are now verified by number of bytes rather than codepoints after a spec clarification, reverting a change made in Dendrite 0.9.6
|
||||
|
||||
## Dendrite 0.10.3 (2022-10-14)
|
||||
|
||||
### Features
|
||||
|
|
|
|||
25
build/docker/Dockerfile.demo-yggdrasil
Normal file
25
build/docker/Dockerfile.demo-yggdrasil
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
FROM docker.io/golang:1.19-alpine AS base
|
||||
|
||||
RUN apk --update --no-cache add bash build-base
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
COPY . /build
|
||||
|
||||
RUN mkdir -p bin
|
||||
RUN go build -trimpath -o bin/ ./cmd/dendrite-demo-yggdrasil
|
||||
RUN go build -trimpath -o bin/ ./cmd/create-account
|
||||
RUN go build -trimpath -o bin/ ./cmd/generate-keys
|
||||
|
||||
FROM alpine:latest
|
||||
LABEL org.opencontainers.image.title="Dendrite (Yggdrasil demo)"
|
||||
LABEL org.opencontainers.image.description="Next-generation Matrix homeserver written in Go"
|
||||
LABEL org.opencontainers.image.source="https://github.com/matrix-org/dendrite"
|
||||
LABEL org.opencontainers.image.licenses="Apache-2.0"
|
||||
|
||||
COPY --from=base /build/bin/* /usr/bin/
|
||||
|
||||
VOLUME /etc/dendrite
|
||||
WORKDIR /etc/dendrite
|
||||
|
||||
ENTRYPOINT ["/usr/bin/dendrite-demo-yggdrasil"]
|
||||
|
|
@ -101,18 +101,46 @@ func (m *DendriteMonolith) SessionCount() int {
|
|||
return len(m.PineconeQUIC.Protocol("matrix").Sessions())
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) RegisterNetworkInterface(name string, index int, mtu int, up bool, broadcast bool, loopback bool, pointToPoint bool, multicast bool, addrs string) {
|
||||
m.PineconeMulticast.RegisterInterface(pineconeMulticast.InterfaceInfo{
|
||||
Name: name,
|
||||
Index: index,
|
||||
Mtu: mtu,
|
||||
Up: up,
|
||||
Broadcast: broadcast,
|
||||
Loopback: loopback,
|
||||
PointToPoint: pointToPoint,
|
||||
Multicast: multicast,
|
||||
Addrs: addrs,
|
||||
})
|
||||
type InterfaceInfo struct {
|
||||
Name string
|
||||
Index int
|
||||
Mtu int
|
||||
Up bool
|
||||
Broadcast bool
|
||||
Loopback bool
|
||||
PointToPoint bool
|
||||
Multicast bool
|
||||
Addrs string
|
||||
}
|
||||
|
||||
type InterfaceRetriever interface {
|
||||
CacheCurrentInterfaces() int
|
||||
GetCachedInterface(index int) *InterfaceInfo
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) RegisterNetworkCallback(intfCallback InterfaceRetriever) {
|
||||
callback := func() []pineconeMulticast.InterfaceInfo {
|
||||
count := intfCallback.CacheCurrentInterfaces()
|
||||
intfs := []pineconeMulticast.InterfaceInfo{}
|
||||
for i := 0; i < count; i++ {
|
||||
iface := intfCallback.GetCachedInterface(i)
|
||||
if iface != nil {
|
||||
intfs = append(intfs, pineconeMulticast.InterfaceInfo{
|
||||
Name: iface.Name,
|
||||
Index: iface.Index,
|
||||
Mtu: iface.Mtu,
|
||||
Up: iface.Up,
|
||||
Broadcast: iface.Broadcast,
|
||||
Loopback: iface.Loopback,
|
||||
PointToPoint: iface.PointToPoint,
|
||||
Multicast: iface.Multicast,
|
||||
Addrs: iface.Addrs,
|
||||
})
|
||||
}
|
||||
}
|
||||
return intfs
|
||||
}
|
||||
m.PineconeMulticast.RegisterNetworkCallback(callback)
|
||||
}
|
||||
|
||||
func (m *DendriteMonolith) SetMulticastEnabled(enabled bool) {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
#syntax=docker/dockerfile:1.2
|
||||
|
||||
FROM golang:1.19-buster as build
|
||||
FROM golang:1.18-stretch as build
|
||||
RUN apt-get update && apt-get install -y sqlite3
|
||||
WORKDIR /build
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@
|
|||
#
|
||||
# Use these mounts to make use of this dockerfile:
|
||||
# COMPLEMENT_HOST_MOUNTS='/your/local/dendrite:/dendrite:ro;/your/go/path:/go:ro'
|
||||
FROM golang:1.19-buster
|
||||
FROM golang:1.18-stretch
|
||||
RUN apt-get update && apt-get install -y sqlite3
|
||||
|
||||
ENV SERVER_NAME=localhost
|
||||
|
|
|
|||
|
|
@ -50,4 +50,4 @@ CMD /build/run_postgres.sh && ./generate-keys --keysize 1024 --server $SERVER_NA
|
|||
# Bump max_open_conns up here in the global database config
|
||||
sed -i 's/max_open_conns:.*$/max_open_conns: 1990/g' dendrite.yaml && \
|
||||
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates && \
|
||||
exec ./dendrite-monolith-server --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml -api=${API:-0}
|
||||
exec ./dendrite-monolith-server --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml -api=${API:-0}
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ func (pk LoginPublicKeyEthereum) GetType() string {
|
|||
}
|
||||
|
||||
func (pk LoginPublicKeyEthereum) AccountExists(ctx context.Context) (string, *jsonerror.MatrixError) {
|
||||
localPart, err := userutil.ParseUsernameParam(pk.UserId, &pk.config.Matrix.ServerName)
|
||||
localPart, _, err := userutil.ParseUsernameParam(pk.UserId, pk.config.Matrix)
|
||||
if err != nil {
|
||||
// userId does not exist
|
||||
return "", jsonerror.Forbidden("the address is incorrect, or the account does not exist.")
|
||||
|
|
@ -129,7 +129,7 @@ func (pk LoginPublicKeyEthereum) ValidateLoginResponse() (bool, *jsonerror.Matri
|
|||
}
|
||||
|
||||
// Error if the chainId is not supported by the server.
|
||||
if !contains(pk.config.PublicKeyAuthentication.Ethereum.ChainIDs, message.GetChainID()) {
|
||||
if pk.config.PublicKeyAuthentication.Ethereum.GetChainID() != message.GetChainID() {
|
||||
return false, jsonerror.Forbidden("chainId")
|
||||
}
|
||||
|
||||
|
|
@ -156,12 +156,3 @@ func (pk LoginPublicKeyEthereum) verifyMessageUserId(message *siwe.Message) bool
|
|||
// one derived from the signed message.
|
||||
return pk.UserId == strings.ToLower(expectedUserId)
|
||||
}
|
||||
|
||||
func contains(list []int, element int) bool {
|
||||
for _, i := range list {
|
||||
if i == element {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,13 +18,14 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/internal/mapsutil"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/test"
|
||||
testutil "github.com/matrix-org/dendrite/test"
|
||||
uapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
@ -35,19 +36,17 @@ type loginContext struct {
|
|||
}
|
||||
|
||||
func createLoginContext(_ *testing.T) *loginContext {
|
||||
chainIds := []int{4}
|
||||
|
||||
cfg := &config.ClientAPI{
|
||||
Matrix: &config.Global{
|
||||
ServerName: test.TestServerName,
|
||||
ServerName: testutil.TestServerName,
|
||||
},
|
||||
Derived: &config.Derived{},
|
||||
PasswordAuthenticationDisabled: true,
|
||||
PublicKeyAuthentication: config.PublicKeyAuthentication{
|
||||
Ethereum: config.EthereumAuthConfig{
|
||||
Enabled: true,
|
||||
Version: 1,
|
||||
ChainIDs: chainIds,
|
||||
Enabled: true,
|
||||
Version: 1,
|
||||
ConfigChainID: strconv.Itoa(testutil.EthereumTestNetworkId),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -154,9 +153,9 @@ func TestLoginPublicKeyEthereum(t *testing.T) {
|
|||
var userAPI fakePublicKeyUserApi
|
||||
ctx := context.Background()
|
||||
loginContext := createLoginContext(t)
|
||||
wallet, _ := test.CreateTestAccount()
|
||||
message, _ := test.CreateEip4361TestMessage(wallet.PublicAddress)
|
||||
signature, _ := test.SignMessage(message.String(), wallet.PrivateKey)
|
||||
wallet, _ := testutil.CreateTestAccount()
|
||||
message, _ := testutil.CreateEip4361TestMessage(wallet.PublicAddress)
|
||||
signature, _ := testutil.SignMessage(message.String(), wallet.PrivateKey)
|
||||
sessionId := publicKeyTestSession(
|
||||
&ctx,
|
||||
loginContext.config,
|
||||
|
|
@ -165,7 +164,7 @@ func TestLoginPublicKeyEthereum(t *testing.T) {
|
|||
)
|
||||
|
||||
// Escape \t and \n. Work around for marshalling and unmarshalling message.
|
||||
msgStr := test.FromEip4361MessageToString(message)
|
||||
msgStr := testutil.FromEip4361MessageToString(message)
|
||||
body := fmt.Sprintf(`{
|
||||
"type": "m.login.publickey",
|
||||
"auth": {
|
||||
|
|
@ -219,8 +218,8 @@ func TestLoginPublicKeyEthereumMissingSignature(t *testing.T) {
|
|||
var userAPI fakePublicKeyUserApi
|
||||
ctx := context.Background()
|
||||
loginContext := createLoginContext(t)
|
||||
wallet, _ := test.CreateTestAccount()
|
||||
message, _ := test.CreateEip4361TestMessage(wallet.PublicAddress)
|
||||
wallet, _ := testutil.CreateTestAccount()
|
||||
message, _ := testutil.CreateEip4361TestMessage(wallet.PublicAddress)
|
||||
sessionId := publicKeyTestSession(
|
||||
&ctx,
|
||||
loginContext.config,
|
||||
|
|
@ -229,7 +228,7 @@ func TestLoginPublicKeyEthereumMissingSignature(t *testing.T) {
|
|||
)
|
||||
|
||||
// Escape \t and \n. Work around for marshalling and unmarshalling message.
|
||||
msgStr := test.FromEip4361MessageToString(message)
|
||||
msgStr := testutil.FromEip4361MessageToString(message)
|
||||
body := fmt.Sprintf(`{
|
||||
"type": "m.login.publickey",
|
||||
"auth": {
|
||||
|
|
@ -280,7 +279,7 @@ func TestLoginPublicKeyEthereumEmptyMessage(t *testing.T) {
|
|||
var userAPI fakePublicKeyUserApi
|
||||
ctx := context.Background()
|
||||
loginContext := createLoginContext(t)
|
||||
wallet, _ := test.CreateTestAccount()
|
||||
wallet, _ := testutil.CreateTestAccount()
|
||||
sessionId := publicKeyTestSession(
|
||||
&ctx,
|
||||
loginContext.config,
|
||||
|
|
@ -333,7 +332,7 @@ func TestLoginPublicKeyEthereumWrongUserId(t *testing.T) {
|
|||
var userAPI fakePublicKeyUserApi
|
||||
ctx := context.Background()
|
||||
loginContext := createLoginContext(t)
|
||||
wallet, _ := test.CreateTestAccount()
|
||||
wallet, _ := testutil.CreateTestAccount()
|
||||
sessionId := publicKeyTestSession(
|
||||
&ctx,
|
||||
loginContext.config,
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
testutil "github.com/matrix-org/dendrite/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
|
@ -72,7 +73,10 @@ func TestLoginPublicKeyNewSession(t *testing.T) {
|
|||
params,
|
||||
"[object]")
|
||||
ethParams := params.(config.EthereumAuthParams)
|
||||
assert.NotEmptyf(ethParams.ChainIDs, "ChainIDs actual: empty, expected not empty")
|
||||
assert.Equalf(
|
||||
testutil.EthereumTestNetworkId,
|
||||
ethParams.ChainID,
|
||||
"ChainID actual: %d, expected %d", ethParams.ChainID, testutil.EthereumTestNetworkId)
|
||||
assert.NotEmptyf(ethParams.Version, "Version actual: \"\", expected: not empty")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ func (t *LoginTypePassword) Login(ctx context.Context, req interface{}) (*Login,
|
|||
JSON: jsonerror.BadJSON("A password must be supplied."),
|
||||
}
|
||||
}
|
||||
localpart, err := userutil.ParseUsernameParam(username, &t.Config.Matrix.ServerName)
|
||||
localpart, _, err := userutil.ParseUsernameParam(username, t.Config.Matrix)
|
||||
if err != nil {
|
||||
return nil, &util.JSONResponse{
|
||||
Code: http.StatusUnauthorized,
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@ import (
|
|||
|
||||
func NewAuthorization(cfg *config.ClientAPI, rsAPI roomserver.ClientRoomserverAPI) authorization.Authorization {
|
||||
// Load authorization manager for Zion
|
||||
if cfg.PublicKeyAuthentication.Ethereum.EnableAuthz {
|
||||
auth, err := zion.NewZionAuthorization(rsAPI)
|
||||
if cfg.PublicKeyAuthentication.Ethereum.GetEnableAuthZ() {
|
||||
auth, err := zion.NewZionAuthorization(cfg, rsAPI)
|
||||
|
||||
if err != nil {
|
||||
log.Errorln("Failed to initialise Zion authorization manager. Using default.", err)
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ func AdminEvacuateUser(req *http.Request, cfg *config.ClientAPI, device *userapi
|
|||
if err != nil {
|
||||
return util.MessageResponse(http.StatusBadRequest, err.Error())
|
||||
}
|
||||
if domain != cfg.Matrix.ServerName {
|
||||
if !cfg.Matrix.IsLocalServerName(domain) {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.MissingArgument("User ID must belong to this server."),
|
||||
|
|
@ -169,7 +169,7 @@ func AdminMarkAsStale(req *http.Request, cfg *config.ClientAPI, keyAPI api.Clien
|
|||
if err != nil {
|
||||
return util.MessageResponse(http.StatusBadRequest, err.Error())
|
||||
}
|
||||
if domain == cfg.Matrix.ServerName {
|
||||
if cfg.Matrix.IsLocalServerName(domain) {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.InvalidParam("Can not mark local device list as stale"),
|
||||
|
|
@ -191,3 +191,43 @@ func AdminMarkAsStale(req *http.Request, cfg *config.ClientAPI, keyAPI api.Clien
|
|||
JSON: struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
func AdminDownloadState(req *http.Request, cfg *config.ClientAPI, device *userapi.Device, rsAPI roomserverAPI.ClientRoomserverAPI) util.JSONResponse {
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
roomID, ok := vars["roomID"]
|
||||
if !ok {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.MissingArgument("Expecting room ID."),
|
||||
}
|
||||
}
|
||||
serverName, ok := vars["serverName"]
|
||||
if !ok {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.MissingArgument("Expecting remote server name."),
|
||||
}
|
||||
}
|
||||
res := &roomserverAPI.PerformAdminDownloadStateResponse{}
|
||||
if err := rsAPI.PerformAdminDownloadState(
|
||||
req.Context(),
|
||||
&roomserverAPI.PerformAdminDownloadStateRequest{
|
||||
UserID: device.UserID,
|
||||
RoomID: roomID,
|
||||
ServerName: gomatrixserverlib.ServerName(serverName),
|
||||
},
|
||||
res,
|
||||
); err != nil {
|
||||
return jsonerror.InternalAPIError(req.Context(), err)
|
||||
}
|
||||
if err := res.Error; err != nil {
|
||||
return err.JSONResponse()
|
||||
}
|
||||
return util.JSONResponse{
|
||||
Code: 200,
|
||||
JSON: map[string]interface{}{},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,8 +31,7 @@ const recaptchaTemplate = `
|
|||
<title>Authentication</title>
|
||||
<meta name='viewport' content='width=device-width, initial-scale=1,
|
||||
user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
|
||||
<script src="https://www.google.com/recaptcha/api.js"
|
||||
async defer></script>
|
||||
<script src="{{.apiJsUrl}}" async defer></script>
|
||||
<script src="//code.jquery.com/jquery-1.11.2.min.js"></script>
|
||||
<script>
|
||||
function captchaDone() {
|
||||
|
|
@ -51,8 +50,8 @@ function captchaDone() {
|
|||
Please verify that you're not a robot.
|
||||
</p>
|
||||
<input type="hidden" name="session" value="{{.session}}" />
|
||||
<div class="g-recaptcha"
|
||||
data-sitekey="{{.siteKey}}"
|
||||
<div class="{{.sitekeyClass}}"
|
||||
data-sitekey="{{.sitekey}}"
|
||||
data-callback="captchaDone">
|
||||
</div>
|
||||
<noscript>
|
||||
|
|
@ -114,9 +113,12 @@ func AuthFallback(
|
|||
|
||||
serveRecaptcha := func() {
|
||||
data := map[string]string{
|
||||
"myUrl": req.URL.String(),
|
||||
"session": sessionID,
|
||||
"siteKey": cfg.RecaptchaPublicKey,
|
||||
"myUrl": req.URL.String(),
|
||||
"session": sessionID,
|
||||
"apiJsUrl": cfg.RecaptchaApiJsUrl,
|
||||
"sitekey": cfg.RecaptchaPublicKey,
|
||||
"sitekeyClass": cfg.RecaptchaSitekeyClass,
|
||||
"formField": cfg.RecaptchaFormField,
|
||||
}
|
||||
serveTemplate(w, recaptchaTemplate, data)
|
||||
}
|
||||
|
|
@ -155,7 +157,7 @@ func AuthFallback(
|
|||
return &res
|
||||
}
|
||||
|
||||
response := req.Form.Get("g-recaptcha-response")
|
||||
response := req.Form.Get(cfg.RecaptchaFormField)
|
||||
if err := validateRecaptcha(cfg, response, clientIP); err != nil {
|
||||
util.GetLogger(req.Context()).Error(err)
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -169,9 +169,21 @@ func createRoom(
|
|||
asAPI appserviceAPI.AppServiceInternalAPI,
|
||||
evTime time.Time,
|
||||
) util.JSONResponse {
|
||||
_, userDomain, err := gomatrixserverlib.SplitID('@', device.UserID)
|
||||
if err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("gomatrixserverlib.SplitID failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
if !cfg.Matrix.IsLocalServerName(userDomain) {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden(fmt.Sprintf("User domain %q not configured locally", userDomain)),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO (#267): Check room ID doesn't clash with an existing one, and we
|
||||
// probably shouldn't be using pseudo-random strings, maybe GUIDs?
|
||||
roomID := fmt.Sprintf("!%s:%s", util.RandomString(16), cfg.Matrix.ServerName)
|
||||
roomID := fmt.Sprintf("!%s:%s", util.RandomString(16), userDomain)
|
||||
|
||||
logger := util.GetLogger(ctx)
|
||||
userID := device.UserID
|
||||
|
|
@ -314,7 +326,7 @@ func createRoom(
|
|||
|
||||
var roomAlias string
|
||||
if r.RoomAliasName != "" {
|
||||
roomAlias = fmt.Sprintf("#%s:%s", r.RoomAliasName, cfg.Matrix.ServerName)
|
||||
roomAlias = fmt.Sprintf("#%s:%s", r.RoomAliasName, userDomain)
|
||||
// check it's free TODO: This races but is better than nothing
|
||||
hasAliasReq := roomserverAPI.GetRoomIDForAliasRequest{
|
||||
Alias: roomAlias,
|
||||
|
|
@ -436,7 +448,7 @@ func createRoom(
|
|||
builder.PrevEvents = []gomatrixserverlib.EventReference{builtEvents[i-1].EventReference()}
|
||||
}
|
||||
var ev *gomatrixserverlib.Event
|
||||
ev, err = buildEvent(&builder, &authEvents, cfg, evTime, roomVersion)
|
||||
ev, err = buildEvent(&builder, userDomain, &authEvents, cfg, evTime, roomVersion)
|
||||
if err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("buildEvent failed")
|
||||
return jsonerror.InternalServerError()
|
||||
|
|
@ -461,7 +473,7 @@ func createRoom(
|
|||
inputs = append(inputs, roomserverAPI.InputRoomEvent{
|
||||
Kind: roomserverAPI.KindNew,
|
||||
Event: event,
|
||||
Origin: cfg.Matrix.ServerName,
|
||||
Origin: userDomain,
|
||||
SendAsServer: roomserverAPI.DoNotSendToOtherServers,
|
||||
})
|
||||
}
|
||||
|
|
@ -548,7 +560,7 @@ func createRoom(
|
|||
Event: event,
|
||||
InviteRoomState: inviteStrippedState,
|
||||
RoomVersion: event.RoomVersion,
|
||||
SendAsServer: string(cfg.Matrix.ServerName),
|
||||
SendAsServer: string(userDomain),
|
||||
}, &inviteRes); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("PerformInvite failed")
|
||||
return util.JSONResponse{
|
||||
|
|
@ -591,6 +603,7 @@ func createRoom(
|
|||
// buildEvent fills out auth_events for the builder then builds the event
|
||||
func buildEvent(
|
||||
builder *gomatrixserverlib.EventBuilder,
|
||||
serverName gomatrixserverlib.ServerName,
|
||||
provider gomatrixserverlib.AuthEventProvider,
|
||||
cfg *config.ClientAPI,
|
||||
evTime time.Time,
|
||||
|
|
@ -606,7 +619,7 @@ func buildEvent(
|
|||
}
|
||||
builder.AuthEvents = refs
|
||||
event, err := builder.Build(
|
||||
evTime, cfg.Matrix.ServerName, cfg.Matrix.KeyID,
|
||||
evTime, serverName, cfg.Matrix.KeyID,
|
||||
cfg.Matrix.PrivateKey, roomVersion,
|
||||
)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -18,14 +18,15 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
federationAPI "github.com/matrix-org/dendrite/federationapi/api"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
type roomDirectoryResponse struct {
|
||||
|
|
@ -75,7 +76,7 @@ func DirectoryRoom(
|
|||
if res.RoomID == "" {
|
||||
// If we don't know it locally, do a federation query.
|
||||
// But don't send the query to ourselves.
|
||||
if domain != cfg.Matrix.ServerName {
|
||||
if !cfg.Matrix.IsLocalServerName(domain) {
|
||||
fedRes, fedErr := federation.LookupRoomAlias(req.Context(), domain, roomAlias)
|
||||
if fedErr != nil {
|
||||
// TODO: Return 502 if the remote server errored.
|
||||
|
|
@ -127,7 +128,7 @@ func SetLocalAlias(
|
|||
}
|
||||
}
|
||||
|
||||
if domain != cfg.Matrix.ServerName {
|
||||
if !cfg.Matrix.IsLocalServerName(domain) {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("Alias must be on local homeserver"),
|
||||
|
|
@ -318,3 +319,43 @@ func SetVisibility(
|
|||
JSON: struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
func SetVisibilityAS(
|
||||
req *http.Request, rsAPI roomserverAPI.ClientRoomserverAPI, dev *userapi.Device,
|
||||
networkID, roomID string,
|
||||
) util.JSONResponse {
|
||||
if dev.AccountType != userapi.AccountTypeAppService {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("Only appservice may use this endpoint"),
|
||||
}
|
||||
}
|
||||
var v roomVisibility
|
||||
|
||||
// If the method is delete, we simply mark the visibility as private
|
||||
if req.Method == http.MethodDelete {
|
||||
v.Visibility = "private"
|
||||
} else {
|
||||
if reqErr := httputil.UnmarshalJSONRequest(req, &v); reqErr != nil {
|
||||
return *reqErr
|
||||
}
|
||||
}
|
||||
var publishRes roomserverAPI.PerformPublishResponse
|
||||
if err := rsAPI.PerformPublish(req.Context(), &roomserverAPI.PerformPublishRequest{
|
||||
RoomID: roomID,
|
||||
Visibility: v.Visibility,
|
||||
NetworkID: networkID,
|
||||
AppserviceID: dev.AppserviceID,
|
||||
}, &publishRes); err != nil {
|
||||
return jsonerror.InternalAPIError(req.Context(), err)
|
||||
}
|
||||
if publishRes.Error != nil {
|
||||
util.GetLogger(req.Context()).WithError(publishRes.Error).Error("PerformPublish failed")
|
||||
return publishRes.Error.JSONResponse()
|
||||
}
|
||||
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: struct{}{},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,14 +39,17 @@ var (
|
|||
)
|
||||
|
||||
type PublicRoomReq struct {
|
||||
Since string `json:"since,omitempty"`
|
||||
Limit int16 `json:"limit,omitempty"`
|
||||
Filter filter `json:"filter,omitempty"`
|
||||
Server string `json:"server,omitempty"`
|
||||
Since string `json:"since,omitempty"`
|
||||
Limit int64 `json:"limit,omitempty"`
|
||||
Filter filter `json:"filter,omitempty"`
|
||||
Server string `json:"server,omitempty"`
|
||||
IncludeAllNetworks bool `json:"include_all_networks,omitempty"`
|
||||
NetworkID string `json:"third_party_instance_id,omitempty"`
|
||||
}
|
||||
|
||||
type filter struct {
|
||||
SearchTerms string `json:"generic_search_term,omitempty"`
|
||||
SearchTerms string `json:"generic_search_term,omitempty"`
|
||||
RoomTypes []string `json:"room_types,omitempty"` // TODO: Implement filter on this
|
||||
}
|
||||
|
||||
// GetPostPublicRooms implements GET and POST /publicRooms
|
||||
|
|
@ -61,9 +64,15 @@ func GetPostPublicRooms(
|
|||
return *fillErr
|
||||
}
|
||||
|
||||
serverName := gomatrixserverlib.ServerName(request.Server)
|
||||
if request.IncludeAllNetworks && request.NetworkID != "" {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.InvalidParam("include_all_networks and third_party_instance_id can not be used together"),
|
||||
}
|
||||
}
|
||||
|
||||
if serverName != "" && serverName != cfg.Matrix.ServerName {
|
||||
serverName := gomatrixserverlib.ServerName(request.Server)
|
||||
if serverName != "" && !cfg.Matrix.IsLocalServerName(serverName) {
|
||||
res, err := federation.GetPublicRoomsFiltered(
|
||||
req.Context(), serverName,
|
||||
int(request.Limit), request.Since,
|
||||
|
|
@ -98,7 +107,7 @@ func publicRooms(
|
|||
response := gomatrixserverlib.RespPublicRooms{
|
||||
Chunk: []gomatrixserverlib.PublicRoom{},
|
||||
}
|
||||
var limit int16
|
||||
var limit int64
|
||||
var offset int64
|
||||
limit = request.Limit
|
||||
if limit == 0 {
|
||||
|
|
@ -115,7 +124,7 @@ func publicRooms(
|
|||
|
||||
var rooms []gomatrixserverlib.PublicRoom
|
||||
if request.Since == "" {
|
||||
rooms = refreshPublicRoomCache(ctx, rsAPI, extRoomsProvider)
|
||||
rooms = refreshPublicRoomCache(ctx, rsAPI, extRoomsProvider, request)
|
||||
} else {
|
||||
rooms = getPublicRoomsFromCache()
|
||||
}
|
||||
|
|
@ -177,7 +186,7 @@ func fillPublicRoomsReq(httpReq *http.Request, request *PublicRoomReq) *util.JSO
|
|||
JSON: jsonerror.BadJSON("limit param is not a number"),
|
||||
}
|
||||
}
|
||||
request.Limit = int16(limit)
|
||||
request.Limit = int64(limit)
|
||||
request.Since = httpReq.FormValue("since")
|
||||
request.Server = httpReq.FormValue("server")
|
||||
} else {
|
||||
|
|
@ -205,7 +214,7 @@ func fillPublicRoomsReq(httpReq *http.Request, request *PublicRoomReq) *util.JSO
|
|||
// limit=3&since=6 => G (prev='3', next='')
|
||||
//
|
||||
// A value of '-1' for prev/next indicates no position.
|
||||
func sliceInto(slice []gomatrixserverlib.PublicRoom, since int64, limit int16) (subset []gomatrixserverlib.PublicRoom, prev, next int) {
|
||||
func sliceInto(slice []gomatrixserverlib.PublicRoom, since int64, limit int64) (subset []gomatrixserverlib.PublicRoom, prev, next int) {
|
||||
prev = -1
|
||||
next = -1
|
||||
|
||||
|
|
@ -231,6 +240,7 @@ func sliceInto(slice []gomatrixserverlib.PublicRoom, since int64, limit int16) (
|
|||
|
||||
func refreshPublicRoomCache(
|
||||
ctx context.Context, rsAPI roomserverAPI.ClientRoomserverAPI, extRoomsProvider api.ExtraPublicRoomsProvider,
|
||||
request PublicRoomReq,
|
||||
) []gomatrixserverlib.PublicRoom {
|
||||
cacheMu.Lock()
|
||||
defer cacheMu.Unlock()
|
||||
|
|
@ -239,8 +249,17 @@ func refreshPublicRoomCache(
|
|||
extraRooms = extRoomsProvider.Rooms()
|
||||
}
|
||||
|
||||
// TODO: this is only here to make Sytest happy, for now.
|
||||
ns := strings.Split(request.NetworkID, "|")
|
||||
if len(ns) == 2 {
|
||||
request.NetworkID = ns[1]
|
||||
}
|
||||
|
||||
var queryRes roomserverAPI.QueryPublishedRoomsResponse
|
||||
err := rsAPI.QueryPublishedRooms(ctx, &roomserverAPI.QueryPublishedRoomsRequest{}, &queryRes)
|
||||
err := rsAPI.QueryPublishedRooms(ctx, &roomserverAPI.QueryPublishedRoomsRequest{
|
||||
NetworkID: request.NetworkID,
|
||||
IncludeAllNetworks: request.IncludeAllNetworks,
|
||||
}, &queryRes)
|
||||
if err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("QueryPublishedRooms failed")
|
||||
return publicRoomsCache
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ func TestSliceInto(t *testing.T) {
|
|||
slice := []gomatrixserverlib.PublicRoom{
|
||||
pubRoom("a"), pubRoom("b"), pubRoom("c"), pubRoom("d"), pubRoom("e"), pubRoom("f"), pubRoom("g"),
|
||||
}
|
||||
limit := int16(3)
|
||||
limit := int64(3)
|
||||
testCases := []struct {
|
||||
since int64
|
||||
wantPrev int
|
||||
|
|
|
|||
52
clientapi/routing/joined_rooms.go
Normal file
52
clientapi/routing/joined_rooms.go
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package routing
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/matrix-org/util"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
)
|
||||
|
||||
type getJoinedRoomsResponse struct {
|
||||
JoinedRooms []string `json:"joined_rooms"`
|
||||
}
|
||||
|
||||
func GetJoinedRooms(
|
||||
req *http.Request,
|
||||
device *userapi.Device,
|
||||
rsAPI api.ClientRoomserverAPI,
|
||||
) util.JSONResponse {
|
||||
var res api.QueryRoomsForUserResponse
|
||||
err := rsAPI.QueryRoomsForUser(req.Context(), &api.QueryRoomsForUserRequest{
|
||||
UserID: device.UserID,
|
||||
WantMembership: "join",
|
||||
}, &res)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("QueryRoomsForUser failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
if res.RoomIDs == nil {
|
||||
res.RoomIDs = []string{}
|
||||
}
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: getJoinedRoomsResponse{res.RoomIDs},
|
||||
}
|
||||
}
|
||||
|
|
@ -99,7 +99,11 @@ func (r *queryKeysRequest) GetTimeout() time.Duration {
|
|||
if r.Timeout == 0 {
|
||||
return 10 * time.Second
|
||||
}
|
||||
return time.Duration(r.Timeout) * time.Millisecond
|
||||
timeout := time.Duration(r.Timeout) * time.Millisecond
|
||||
if timeout > time.Second*20 {
|
||||
timeout = time.Second * 20
|
||||
}
|
||||
return timeout
|
||||
}
|
||||
|
||||
func QueryKeys(req *http.Request, keyAPI api.ClientKeyAPI, device *userapi.Device) util.JSONResponse {
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ func Login(
|
|||
return *authErr
|
||||
}
|
||||
// make a device/access token
|
||||
authErr2 := completeAuth(req.Context(), cfg.Matrix.ServerName, userAPI, login, req.RemoteAddr, req.UserAgent())
|
||||
authErr2 := completeAuth(req.Context(), cfg.Matrix, userAPI, login, req.RemoteAddr, req.UserAgent())
|
||||
cleanup(req.Context(), &authErr2)
|
||||
return authErr2
|
||||
}
|
||||
|
|
@ -93,7 +93,7 @@ func Login(
|
|||
}
|
||||
|
||||
func completeAuth(
|
||||
ctx context.Context, serverName gomatrixserverlib.ServerName, userAPI userapi.ClientUserAPI, login *auth.Login,
|
||||
ctx context.Context, cfg *config.Global, userAPI userapi.ClientUserAPI, login *auth.Login,
|
||||
ipAddr, userAgent string,
|
||||
) util.JSONResponse {
|
||||
token, err := auth.GenerateAccessToken()
|
||||
|
|
@ -102,7 +102,7 @@ func completeAuth(
|
|||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
localpart, err := userutil.ParseUsernameParam(login.Username(), &serverName)
|
||||
localpart, serverName, err := userutil.ParseUsernameParam(login.Username(), cfg)
|
||||
if err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("auth.ParseUsernameParam failed")
|
||||
return jsonerror.InternalServerError()
|
||||
|
|
|
|||
|
|
@ -105,12 +105,13 @@ func sendMembership(ctx context.Context, profileAPI userapi.ClientUserAPI, devic
|
|||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
serverName := device.UserDomain()
|
||||
if err = roomserverAPI.SendEvents(
|
||||
ctx, rsAPI,
|
||||
roomserverAPI.KindNew,
|
||||
[]*gomatrixserverlib.HeaderedEvent{event.Event.Headered(roomVer)},
|
||||
cfg.Matrix.ServerName,
|
||||
cfg.Matrix.ServerName,
|
||||
serverName,
|
||||
serverName,
|
||||
nil,
|
||||
false,
|
||||
); err != nil {
|
||||
|
|
@ -271,7 +272,7 @@ func sendInvite(
|
|||
Event: event,
|
||||
InviteRoomState: nil, // ask the roomserver to draw up invite room state for us
|
||||
RoomVersion: event.RoomVersion,
|
||||
SendAsServer: string(cfg.Matrix.ServerName),
|
||||
SendAsServer: string(device.UserDomain()),
|
||||
}, &inviteRes); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("PerformInvite failed")
|
||||
return util.JSONResponse{
|
||||
|
|
@ -341,7 +342,7 @@ func loadProfile(
|
|||
}
|
||||
|
||||
var profile *authtypes.Profile
|
||||
if serverName == cfg.Matrix.ServerName {
|
||||
if cfg.Matrix.IsLocalServerName(serverName) {
|
||||
profile, err = appserviceAPI.RetrieveUserProfile(ctx, userID, asAPI, profileAPI)
|
||||
} else {
|
||||
profile = &authtypes.Profile{}
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ func CreateOpenIDToken(
|
|||
JSON: openIDTokenResponse{
|
||||
AccessToken: response.Token.Token,
|
||||
TokenType: "Bearer",
|
||||
MatrixServerName: string(cfg.Matrix.ServerName),
|
||||
MatrixServerName: string(device.UserDomain()),
|
||||
ExpiresIn: response.Token.ExpiresAtMS / 1000, // convert ms to s
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
appserviceAPI "github.com/matrix-org/dendrite/appservice/api"
|
||||
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||
|
|
@ -27,7 +29,6 @@ import (
|
|||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
"github.com/matrix-org/gomatrix"
|
||||
"github.com/matrix-org/util"
|
||||
|
|
@ -112,12 +113,19 @@ func SetAvatarURL(
|
|||
}
|
||||
}
|
||||
|
||||
localpart, _, err := gomatrixserverlib.SplitID('@', userID)
|
||||
localpart, domain, err := gomatrixserverlib.SplitID('@', userID)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("gomatrixserverlib.SplitID failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
if !cfg.Matrix.IsLocalServerName(domain) {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("userID does not belong to a locally configured domain"),
|
||||
}
|
||||
}
|
||||
|
||||
evTime, err := httputil.ParseTSParam(req)
|
||||
if err != nil {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -126,63 +134,26 @@ func SetAvatarURL(
|
|||
}
|
||||
}
|
||||
|
||||
res := &userapi.QueryProfileResponse{}
|
||||
err = profileAPI.QueryProfile(req.Context(), &userapi.QueryProfileRequest{
|
||||
UserID: userID,
|
||||
}, res)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.QueryProfile failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
oldProfile := &authtypes.Profile{
|
||||
Localpart: localpart,
|
||||
DisplayName: res.DisplayName,
|
||||
AvatarURL: res.AvatarURL,
|
||||
}
|
||||
|
||||
setRes := &userapi.PerformSetAvatarURLResponse{}
|
||||
if err = profileAPI.SetAvatarURL(req.Context(), &userapi.PerformSetAvatarURLRequest{
|
||||
Localpart: localpart,
|
||||
AvatarURL: r.AvatarURL,
|
||||
Localpart: localpart,
|
||||
ServerName: domain,
|
||||
AvatarURL: r.AvatarURL,
|
||||
}, setRes); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.SetAvatarURL failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
var roomsRes api.QueryRoomsForUserResponse
|
||||
err = rsAPI.QueryRoomsForUser(req.Context(), &api.QueryRoomsForUserRequest{
|
||||
UserID: device.UserID,
|
||||
WantMembership: "join",
|
||||
}, &roomsRes)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("QueryRoomsForUser failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
newProfile := authtypes.Profile{
|
||||
Localpart: localpart,
|
||||
DisplayName: oldProfile.DisplayName,
|
||||
AvatarURL: r.AvatarURL,
|
||||
}
|
||||
|
||||
events, err := buildMembershipEvents(
|
||||
req.Context(), roomsRes.RoomIDs, newProfile, userID, cfg, evTime, rsAPI,
|
||||
)
|
||||
switch e := err.(type) {
|
||||
case nil:
|
||||
case gomatrixserverlib.BadJSONError:
|
||||
// No need to build new membership events, since nothing changed
|
||||
if !setRes.Changed {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.BadJSON(e.Error()),
|
||||
Code: http.StatusOK,
|
||||
JSON: struct{}{},
|
||||
}
|
||||
default:
|
||||
util.GetLogger(req.Context()).WithError(err).Error("buildMembershipEvents failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, cfg.Matrix.ServerName, nil, true); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed")
|
||||
return jsonerror.InternalServerError()
|
||||
response, err := updateProfile(req.Context(), rsAPI, device, setRes.Profile, userID, cfg, evTime)
|
||||
if err != nil {
|
||||
return response
|
||||
}
|
||||
|
||||
return util.JSONResponse{
|
||||
|
|
@ -241,12 +212,19 @@ func SetDisplayName(
|
|||
}
|
||||
}
|
||||
|
||||
localpart, _, err := gomatrixserverlib.SplitID('@', userID)
|
||||
localpart, domain, err := gomatrixserverlib.SplitID('@', userID)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("gomatrixserverlib.SplitID failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
if !cfg.Matrix.IsLocalServerName(domain) {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("userID does not belong to a locally configured domain"),
|
||||
}
|
||||
}
|
||||
|
||||
evTime, err := httputil.ParseTSParam(req)
|
||||
if err != nil {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -255,47 +233,58 @@ func SetDisplayName(
|
|||
}
|
||||
}
|
||||
|
||||
pRes := &userapi.QueryProfileResponse{}
|
||||
err = profileAPI.QueryProfile(req.Context(), &userapi.QueryProfileRequest{
|
||||
UserID: userID,
|
||||
}, pRes)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.QueryProfile failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
oldProfile := &authtypes.Profile{
|
||||
Localpart: localpart,
|
||||
DisplayName: pRes.DisplayName,
|
||||
AvatarURL: pRes.AvatarURL,
|
||||
}
|
||||
|
||||
profileRes := &userapi.PerformUpdateDisplayNameResponse{}
|
||||
err = profileAPI.SetDisplayName(req.Context(), &userapi.PerformUpdateDisplayNameRequest{
|
||||
Localpart: localpart,
|
||||
ServerName: domain,
|
||||
DisplayName: r.DisplayName,
|
||||
}, &struct{}{})
|
||||
}, profileRes)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.SetDisplayName failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
// No need to build new membership events, since nothing changed
|
||||
if !profileRes.Changed {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
response, err := updateProfile(req.Context(), rsAPI, device, profileRes.Profile, userID, cfg, evTime)
|
||||
if err != nil {
|
||||
return response
|
||||
}
|
||||
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
func updateProfile(
|
||||
ctx context.Context, rsAPI api.ClientRoomserverAPI, device *userapi.Device,
|
||||
profile *authtypes.Profile,
|
||||
userID string, cfg *config.ClientAPI, evTime time.Time,
|
||||
) (util.JSONResponse, error) {
|
||||
var res api.QueryRoomsForUserResponse
|
||||
err = rsAPI.QueryRoomsForUser(req.Context(), &api.QueryRoomsForUserRequest{
|
||||
err := rsAPI.QueryRoomsForUser(ctx, &api.QueryRoomsForUserRequest{
|
||||
UserID: device.UserID,
|
||||
WantMembership: "join",
|
||||
}, &res)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("QueryRoomsForUser failed")
|
||||
return jsonerror.InternalServerError()
|
||||
util.GetLogger(ctx).WithError(err).Error("QueryRoomsForUser failed")
|
||||
return jsonerror.InternalServerError(), err
|
||||
}
|
||||
|
||||
newProfile := authtypes.Profile{
|
||||
Localpart: localpart,
|
||||
DisplayName: r.DisplayName,
|
||||
AvatarURL: oldProfile.AvatarURL,
|
||||
_, domain, err := gomatrixserverlib.SplitID('@', userID)
|
||||
if err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("gomatrixserverlib.SplitID failed")
|
||||
return jsonerror.InternalServerError(), err
|
||||
}
|
||||
|
||||
events, err := buildMembershipEvents(
|
||||
req.Context(), res.RoomIDs, newProfile, userID, cfg, evTime, rsAPI,
|
||||
ctx, res.RoomIDs, *profile, userID, cfg, evTime, rsAPI,
|
||||
)
|
||||
switch e := err.(type) {
|
||||
case nil:
|
||||
|
|
@ -303,21 +292,17 @@ func SetDisplayName(
|
|||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.BadJSON(e.Error()),
|
||||
}
|
||||
}, e
|
||||
default:
|
||||
util.GetLogger(req.Context()).WithError(err).Error("buildMembershipEvents failed")
|
||||
return jsonerror.InternalServerError()
|
||||
util.GetLogger(ctx).WithError(err).Error("buildMembershipEvents failed")
|
||||
return jsonerror.InternalServerError(), e
|
||||
}
|
||||
|
||||
if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, cfg.Matrix.ServerName, nil, true); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: struct{}{},
|
||||
if err := api.SendEvents(ctx, rsAPI, api.KindNew, events, domain, domain, nil, true); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("SendEvents failed")
|
||||
return jsonerror.InternalServerError(), err
|
||||
}
|
||||
return util.JSONResponse{}, nil
|
||||
}
|
||||
|
||||
// getProfile gets the full profile of a user by querying the database or a
|
||||
|
|
@ -335,7 +320,7 @@ func getProfile(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if domain != cfg.Matrix.ServerName {
|
||||
if !cfg.Matrix.IsLocalServerName(domain) {
|
||||
profile, fedErr := federation.LookupProfile(ctx, domain, userID, "")
|
||||
if fedErr != nil {
|
||||
if x, ok := fedErr.(gomatrix.HTTPError); ok {
|
||||
|
|
|
|||
|
|
@ -19,6 +19,9 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||
|
|
@ -26,8 +29,6 @@ import (
|
|||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
type redactionContent struct {
|
||||
|
|
@ -51,7 +52,7 @@ func SendRedaction(
|
|||
|
||||
if txnID != nil {
|
||||
// Try to fetch response from transactionsCache
|
||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID); ok {
|
||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID, req.URL); ok {
|
||||
return *res
|
||||
}
|
||||
}
|
||||
|
|
@ -130,7 +131,8 @@ func SendRedaction(
|
|||
JSON: jsonerror.NotFound("Room does not exist"),
|
||||
}
|
||||
}
|
||||
if err = roomserverAPI.SendEvents(context.Background(), rsAPI, roomserverAPI.KindNew, []*gomatrixserverlib.HeaderedEvent{e}, cfg.Matrix.ServerName, cfg.Matrix.ServerName, nil, false); err != nil {
|
||||
domain := device.UserDomain()
|
||||
if err = roomserverAPI.SendEvents(context.Background(), rsAPI, roomserverAPI.KindNew, []*gomatrixserverlib.HeaderedEvent{e}, domain, domain, nil, false); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Errorf("failed to SendEvents")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
|
@ -144,7 +146,7 @@ func SendRedaction(
|
|||
|
||||
// Add response to transactionsCache
|
||||
if txnID != nil {
|
||||
txnCache.AddTransaction(device.AccessToken, *txnID, &res)
|
||||
txnCache.AddTransaction(device.AccessToken, *txnID, req.URL, &res)
|
||||
}
|
||||
|
||||
return res
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
|
@ -353,6 +354,7 @@ func validateRecaptcha(
|
|||
response string,
|
||||
clientip string,
|
||||
) *util.JSONResponse {
|
||||
ip, _, _ := net.SplitHostPort(clientip)
|
||||
if !cfg.RecaptchaEnabled {
|
||||
return &util.JSONResponse{
|
||||
Code: http.StatusConflict,
|
||||
|
|
@ -372,7 +374,7 @@ func validateRecaptcha(
|
|||
url.Values{
|
||||
"secret": {cfg.RecaptchaPrivateKey},
|
||||
"response": {response},
|
||||
"remoteip": {clientip},
|
||||
"remoteip": {ip},
|
||||
},
|
||||
)
|
||||
|
||||
|
|
@ -429,7 +431,7 @@ func UserIDIsWithinApplicationServiceNamespace(
|
|||
return false
|
||||
}
|
||||
|
||||
if domain != cfg.Matrix.ServerName {
|
||||
if !cfg.Matrix.IsLocalServerName(domain) {
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
|
@ -26,8 +27,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
||||
"github.com/matrix-org/dendrite/internal/mapsutil"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/test"
|
||||
"github.com/matrix-org/dendrite/userapi/api"
|
||||
testutil "github.com/matrix-org/dendrite/test"
|
||||
uapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
@ -41,19 +41,17 @@ type registerContext struct {
|
|||
}
|
||||
|
||||
func createRegisterContext(_ *testing.T) *registerContext {
|
||||
chainIds := []int{4}
|
||||
|
||||
cfg := &config.ClientAPI{
|
||||
Matrix: &config.Global{
|
||||
ServerName: test.TestServerName,
|
||||
ServerName: testutil.TestServerName,
|
||||
},
|
||||
Derived: &config.Derived{},
|
||||
PasswordAuthenticationDisabled: true,
|
||||
PublicKeyAuthentication: config.PublicKeyAuthentication{
|
||||
Ethereum: config.EthereumAuthConfig{
|
||||
Enabled: true,
|
||||
Version: 1,
|
||||
ChainIDs: chainIds,
|
||||
Enabled: true,
|
||||
Version: 1,
|
||||
ConfigChainID: strconv.Itoa(testutil.EthereumTestNetworkId),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -129,7 +127,7 @@ func (ua *fakePublicKeyUserApi) PerformDeviceCreation(
|
|||
req *uapi.PerformDeviceCreationRequest,
|
||||
res *uapi.PerformDeviceCreationResponse) error {
|
||||
res.DeviceCreated = true
|
||||
res.Device = &api.Device{
|
||||
res.Device = &uapi.Device{
|
||||
ID: "device_id",
|
||||
UserID: req.Localpart,
|
||||
AccessToken: req.AccessToken,
|
||||
|
|
@ -142,11 +140,11 @@ func (ua *fakePublicKeyUserApi) PerformAccountCreation(
|
|||
req *uapi.PerformAccountCreationRequest,
|
||||
res *uapi.PerformAccountCreationResponse) error {
|
||||
res.AccountCreated = true
|
||||
res.Account = &api.Account{
|
||||
res.Account = &uapi.Account{
|
||||
AppServiceID: req.AppServiceID,
|
||||
Localpart: req.Localpart,
|
||||
ServerName: test.TestServerName,
|
||||
UserID: fmt.Sprintf("@%s:%s", req.Localpart, test.TestServerName),
|
||||
ServerName: testutil.TestServerName,
|
||||
UserID: fmt.Sprintf("@%s:%s", req.Localpart, testutil.TestServerName),
|
||||
AccountType: req.AccountType,
|
||||
}
|
||||
return nil
|
||||
|
|
@ -173,8 +171,6 @@ func (*fakePublicKeyUserApi) QueryLoginToken(ctx context.Context, req *uapi.Quer
|
|||
func newRegistrationSession(
|
||||
t *testing.T,
|
||||
userId string,
|
||||
_ *config.ClientAPI,
|
||||
_ *auth.UserInteractive,
|
||||
userAPI *fakePublicKeyUserApi,
|
||||
) string {
|
||||
body := fmt.Sprintf(`{
|
||||
|
|
@ -214,20 +210,18 @@ func newRegistrationSession(
|
|||
func TestRegisterEthereum(t *testing.T) {
|
||||
// Setup
|
||||
var userAPI fakePublicKeyUserApi
|
||||
wallet, _ := test.CreateTestAccount()
|
||||
message, _ := test.CreateEip4361TestMessage(wallet.PublicAddress)
|
||||
signature, _ := test.SignMessage(message.String(), wallet.PrivateKey)
|
||||
wallet, _ := testutil.CreateTestAccount()
|
||||
message, _ := testutil.CreateEip4361TestMessage(wallet.PublicAddress)
|
||||
signature, _ := testutil.SignMessage(message.String(), wallet.PrivateKey)
|
||||
registerContext := createRegisterContext(t)
|
||||
sessionId := newRegistrationSession(
|
||||
t,
|
||||
wallet.Eip155UserId,
|
||||
registerContext.config,
|
||||
registerContext.userInteractive,
|
||||
&userAPI,
|
||||
)
|
||||
|
||||
// Escape \t and \n. Work around for marshalling and unmarshalling message.
|
||||
msgStr := test.FromEip4361MessageToString(message)
|
||||
msgStr := testutil.FromEip4361MessageToString(message)
|
||||
body := fmt.Sprintf(`{
|
||||
"username": "%v",
|
||||
"auth": {
|
||||
|
|
@ -339,7 +333,10 @@ func TestNewRegistrationSession(t *testing.T) {
|
|||
params,
|
||||
"[object]")
|
||||
ethParams := params.(config.EthereumAuthParams)
|
||||
assert.NotEmptyf(ethParams.ChainIDs, "ChainIDs actual: empty, expected not empty")
|
||||
assert.Equalf(
|
||||
testutil.EthereumTestNetworkId,
|
||||
ethParams.ChainID,
|
||||
"ChainID actual: %d, expected %d", ethParams.ChainID, testutil.EthereumTestNetworkId)
|
||||
assert.NotEmptyf(ethParams.Version, "Version actual: \"\", expected: not empty")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -80,6 +80,7 @@ func Setup(
|
|||
|
||||
unstableFeatures := map[string]bool{
|
||||
"org.matrix.e2e_cross_signing": true,
|
||||
"org.matrix.msc2285.stable": true,
|
||||
}
|
||||
for _, msc := range cfg.MSCs.MSCs {
|
||||
unstableFeatures["org.matrix."+msc] = true
|
||||
|
|
@ -173,6 +174,12 @@ func Setup(
|
|||
}),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
dendriteAdminRouter.Handle("/admin/downloadState/{serverName}/{roomID}",
|
||||
httputil.MakeAdminAPI("admin_download_state", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
return AdminDownloadState(req, cfg, device, rsAPI)
|
||||
}),
|
||||
).Methods(http.MethodGet, http.MethodOptions)
|
||||
|
||||
dendriteAdminRouter.Handle("/admin/fulltext/reindex",
|
||||
httputil.MakeAdminAPI("admin_fultext_reindex", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
return AdminReindex(req, cfg, device, natsClient)
|
||||
|
|
@ -188,7 +195,7 @@ func Setup(
|
|||
// server notifications
|
||||
if cfg.Matrix.ServerNotices.Enabled {
|
||||
logrus.Info("Enabling server notices at /_synapse/admin/v1/send_server_notice")
|
||||
serverNotificationSender, err := getSenderDevice(context.Background(), userAPI, cfg)
|
||||
serverNotificationSender, err := getSenderDevice(context.Background(), rsAPI, userAPI, cfg)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Fatal("unable to get account for sending sending server notices")
|
||||
}
|
||||
|
|
@ -518,7 +525,7 @@ func Setup(
|
|||
return GetVisibility(req, rsAPI, vars["roomID"])
|
||||
}),
|
||||
).Methods(http.MethodGet, http.MethodOptions)
|
||||
// TODO: Add AS support
|
||||
|
||||
v3mux.Handle("/directory/list/room/{roomID}",
|
||||
httputil.MakeAuthAPI("directory_list", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
|
|
@ -528,6 +535,27 @@ func Setup(
|
|||
return SetVisibility(req, rsAPI, device, vars["roomID"])
|
||||
}),
|
||||
).Methods(http.MethodPut, http.MethodOptions)
|
||||
v3mux.Handle("/directory/list/appservice/{networkID}/{roomID}",
|
||||
httputil.MakeAuthAPI("directory_list", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return SetVisibilityAS(req, rsAPI, device, vars["networkID"], vars["roomID"])
|
||||
}),
|
||||
).Methods(http.MethodPut, http.MethodOptions)
|
||||
|
||||
// Undocumented endpoint
|
||||
v3mux.Handle("/directory/list/appservice/{networkID}/{roomID}",
|
||||
httputil.MakeAuthAPI("directory_list", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return SetVisibilityAS(req, rsAPI, device, vars["networkID"], vars["roomID"])
|
||||
}),
|
||||
).Methods(http.MethodDelete, http.MethodOptions)
|
||||
|
||||
v3mux.Handle("/publicRooms",
|
||||
httputil.MakeExternalAPI("public_rooms", func(req *http.Request) util.JSONResponse {
|
||||
return GetPostPublicRooms(req, rsAPI, extRoomsProvider, federation, cfg)
|
||||
|
|
@ -988,26 +1016,6 @@ func Setup(
|
|||
}),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
v3mux.Handle("/rooms/{roomID}/members",
|
||||
httputil.MakeAuthAPI("rooms_members", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return GetMemberships(req, device, vars["roomID"], false, cfg, rsAPI)
|
||||
}),
|
||||
).Methods(http.MethodGet, http.MethodOptions)
|
||||
|
||||
v3mux.Handle("/rooms/{roomID}/joined_members",
|
||||
httputil.MakeAuthAPI("rooms_members", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
|
||||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return GetMemberships(req, device, vars["roomID"], true, cfg, rsAPI)
|
||||
}),
|
||||
).Methods(http.MethodGet, http.MethodOptions)
|
||||
|
||||
v3mux.Handle("/rooms/{roomID}/read_markers",
|
||||
httputil.MakeAuthAPI("rooms_read_markers", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req, device); r != nil {
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ func SendEvent(
|
|||
|
||||
if txnID != nil {
|
||||
// Try to fetch response from transactionsCache
|
||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID); ok {
|
||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID, req.URL); ok {
|
||||
return *res
|
||||
}
|
||||
}
|
||||
|
|
@ -94,6 +94,7 @@ func SendEvent(
|
|||
// create a mutex for the specific user in the specific room
|
||||
// this avoids a situation where events that are received in quick succession are sent to the roomserver in a jumbled order
|
||||
userID := device.UserID
|
||||
domain := device.UserDomain()
|
||||
mutex, _ := userRoomSendMutexes.LoadOrStore(roomID+userID, &sync.Mutex{})
|
||||
mutex.(*sync.Mutex).Lock()
|
||||
defer mutex.(*sync.Mutex).Unlock()
|
||||
|
|
@ -185,8 +186,8 @@ func SendEvent(
|
|||
[]*gomatrixserverlib.HeaderedEvent{
|
||||
e.Headered(verRes.RoomVersion),
|
||||
},
|
||||
cfg.Matrix.ServerName,
|
||||
cfg.Matrix.ServerName,
|
||||
domain,
|
||||
domain,
|
||||
txnAndSessionID,
|
||||
false,
|
||||
); err != nil {
|
||||
|
|
@ -206,7 +207,7 @@ func SendEvent(
|
|||
}
|
||||
// Add response to transactionsCache
|
||||
if txnID != nil {
|
||||
txnCache.AddTransaction(device.AccessToken, *txnID, &res)
|
||||
txnCache.AddTransaction(device.AccessToken, *txnID, req.URL, &res)
|
||||
}
|
||||
|
||||
// Take a note of how long it took to generate the event vs submit
|
||||
|
|
|
|||
|
|
@ -16,12 +16,13 @@ import (
|
|||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/matrix-org/util"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/clientapi/producers"
|
||||
"github.com/matrix-org/dendrite/internal/transactions"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
// SendToDevice handles PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}
|
||||
|
|
@ -33,7 +34,7 @@ func SendToDevice(
|
|||
eventType string, txnID *string,
|
||||
) util.JSONResponse {
|
||||
if txnID != nil {
|
||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID); ok {
|
||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID, req.URL); ok {
|
||||
return *res
|
||||
}
|
||||
}
|
||||
|
|
@ -63,7 +64,7 @@ func SendToDevice(
|
|||
}
|
||||
|
||||
if txnID != nil {
|
||||
txnCache.AddTransaction(device.AccessToken, *txnID, &res)
|
||||
txnCache.AddTransaction(device.AccessToken, *txnID, req.URL, &res)
|
||||
}
|
||||
|
||||
return res
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/roomserver/version"
|
||||
"github.com/matrix-org/gomatrix"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/gomatrixserverlib/tokens"
|
||||
|
|
@ -29,6 +28,8 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/matrix-org/dendrite/roomserver/version"
|
||||
|
||||
appserviceAPI "github.com/matrix-org/dendrite/appservice/api"
|
||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
|
|
@ -73,7 +74,7 @@ func SendServerNotice(
|
|||
|
||||
if txnID != nil {
|
||||
// Try to fetch response from transactionsCache
|
||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID); ok {
|
||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID, req.URL); ok {
|
||||
return *res
|
||||
}
|
||||
}
|
||||
|
|
@ -251,7 +252,7 @@ func SendServerNotice(
|
|||
}
|
||||
// Add response to transactionsCache
|
||||
if txnID != nil {
|
||||
txnCache.AddTransaction(device.AccessToken, *txnID, &res)
|
||||
txnCache.AddTransaction(device.AccessToken, *txnID, req.URL, &res)
|
||||
}
|
||||
|
||||
// Take a note of how long it took to generate the event vs submit
|
||||
|
|
@ -276,6 +277,7 @@ func (r sendServerNoticeRequest) valid() (ok bool) {
|
|||
// It returns an userapi.Device, which is used for building the event
|
||||
func getSenderDevice(
|
||||
ctx context.Context,
|
||||
rsAPI api.ClientRoomserverAPI,
|
||||
userAPI userapi.ClientUserAPI,
|
||||
cfg *config.ClientAPI,
|
||||
) (*userapi.Device, error) {
|
||||
|
|
@ -290,16 +292,32 @@ func getSenderDevice(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// set the avatarurl for the user
|
||||
res := &userapi.PerformSetAvatarURLResponse{}
|
||||
// Set the avatarurl for the user
|
||||
avatarRes := &userapi.PerformSetAvatarURLResponse{}
|
||||
if err = userAPI.SetAvatarURL(ctx, &userapi.PerformSetAvatarURLRequest{
|
||||
Localpart: cfg.Matrix.ServerNotices.LocalPart,
|
||||
AvatarURL: cfg.Matrix.ServerNotices.AvatarURL,
|
||||
}, res); err != nil {
|
||||
}, avatarRes); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("userAPI.SetAvatarURL failed")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
profile := avatarRes.Profile
|
||||
|
||||
// Set the displayname for the user
|
||||
displayNameRes := &userapi.PerformUpdateDisplayNameResponse{}
|
||||
if err = userAPI.SetDisplayName(ctx, &userapi.PerformUpdateDisplayNameRequest{
|
||||
Localpart: cfg.Matrix.ServerNotices.LocalPart,
|
||||
DisplayName: cfg.Matrix.ServerNotices.DisplayName,
|
||||
}, displayNameRes); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("userAPI.SetDisplayName failed")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if displayNameRes.Changed {
|
||||
profile.DisplayName = cfg.Matrix.ServerNotices.DisplayName
|
||||
}
|
||||
|
||||
// Check if we got existing devices
|
||||
deviceRes := &userapi.QueryDevicesResponse{}
|
||||
err = userAPI.QueryDevices(ctx, &userapi.QueryDevicesRequest{
|
||||
|
|
@ -309,7 +327,15 @@ func getSenderDevice(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// We've got an existing account, return the first device of it
|
||||
if len(deviceRes.Devices) > 0 {
|
||||
// If there were changes to the profile, create a new membership event
|
||||
if displayNameRes.Changed || avatarRes.Changed {
|
||||
_, err = updateProfile(ctx, rsAPI, &deviceRes.Devices[0], profile, accRes.Account.UserID, cfg, time.Now())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &deviceRes.Devices[0], nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -215,7 +215,7 @@ func queryIDServerStoreInvite(
|
|||
}
|
||||
|
||||
var profile *authtypes.Profile
|
||||
if serverName == cfg.Matrix.ServerName {
|
||||
if cfg.Matrix.IsLocalServerName(serverName) {
|
||||
res := &userapi.QueryProfileResponse{}
|
||||
err = userAPI.QueryProfile(ctx, &userapi.QueryProfileRequest{UserID: device.UserID}, res)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
|
|
@ -24,23 +25,23 @@ import (
|
|||
// usernameParam can either be a user ID or just the localpart/username.
|
||||
// If serverName is passed, it is verified against the domain obtained from usernameParam (if present)
|
||||
// Returns error in case of invalid usernameParam.
|
||||
func ParseUsernameParam(usernameParam string, expectedServerName *gomatrixserverlib.ServerName) (string, error) {
|
||||
func ParseUsernameParam(usernameParam string, cfg *config.Global) (string, gomatrixserverlib.ServerName, error) {
|
||||
localpart := usernameParam
|
||||
|
||||
if strings.HasPrefix(usernameParam, "@") {
|
||||
lp, domain, err := gomatrixserverlib.SplitID('@', usernameParam)
|
||||
|
||||
if err != nil {
|
||||
return "", errors.New("invalid username")
|
||||
return "", "", errors.New("invalid username")
|
||||
}
|
||||
|
||||
if expectedServerName != nil && domain != *expectedServerName {
|
||||
return "", errors.New("user ID does not belong to this server")
|
||||
if !cfg.IsLocalServerName(domain) {
|
||||
return "", "", errors.New("user ID does not belong to this server")
|
||||
}
|
||||
|
||||
localpart = lp
|
||||
return lp, domain, nil
|
||||
}
|
||||
return localpart, nil
|
||||
return localpart, cfg.ServerName, nil
|
||||
}
|
||||
|
||||
// MakeUserID generates user ID from localpart & server name
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ package userutil
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
|
|
@ -28,7 +29,11 @@ var (
|
|||
|
||||
// TestGoodUserID checks that correct localpart is returned for a valid user ID.
|
||||
func TestGoodUserID(t *testing.T) {
|
||||
lp, err := ParseUsernameParam(goodUserID, &serverName)
|
||||
cfg := &config.Global{
|
||||
ServerName: serverName,
|
||||
}
|
||||
|
||||
lp, _, err := ParseUsernameParam(goodUserID, cfg)
|
||||
|
||||
if err != nil {
|
||||
t.Error("User ID Parsing failed for ", goodUserID, " with error: ", err.Error())
|
||||
|
|
@ -41,7 +46,11 @@ func TestGoodUserID(t *testing.T) {
|
|||
|
||||
// TestWithLocalpartOnly checks that localpart is returned when usernameParam contains only localpart.
|
||||
func TestWithLocalpartOnly(t *testing.T) {
|
||||
lp, err := ParseUsernameParam(localpart, &serverName)
|
||||
cfg := &config.Global{
|
||||
ServerName: serverName,
|
||||
}
|
||||
|
||||
lp, _, err := ParseUsernameParam(localpart, cfg)
|
||||
|
||||
if err != nil {
|
||||
t.Error("User ID Parsing failed for ", localpart, " with error: ", err.Error())
|
||||
|
|
@ -54,7 +63,11 @@ func TestWithLocalpartOnly(t *testing.T) {
|
|||
|
||||
// TestIncorrectDomain checks for error when there's server name mismatch.
|
||||
func TestIncorrectDomain(t *testing.T) {
|
||||
_, err := ParseUsernameParam(goodUserID, &invalidServerName)
|
||||
cfg := &config.Global{
|
||||
ServerName: invalidServerName,
|
||||
}
|
||||
|
||||
_, _, err := ParseUsernameParam(goodUserID, cfg)
|
||||
|
||||
if err == nil {
|
||||
t.Error("Invalid Domain should return an error")
|
||||
|
|
@ -63,7 +76,11 @@ func TestIncorrectDomain(t *testing.T) {
|
|||
|
||||
// TestBadUserID checks that ParseUsernameParam fails for invalid user ID
|
||||
func TestBadUserID(t *testing.T) {
|
||||
_, err := ParseUsernameParam(badUserID, &serverName)
|
||||
cfg := &config.Global{
|
||||
ServerName: serverName,
|
||||
}
|
||||
|
||||
_, _, err := ParseUsernameParam(badUserID, cfg)
|
||||
|
||||
if err == nil {
|
||||
t.Error("Illegal User ID should return an error")
|
||||
|
|
|
|||
|
|
@ -179,7 +179,10 @@ func sharedSecretRegister(sharedSecret, serverURL, localpart, password string, a
|
|||
body, _ = io.ReadAll(regResp.Body)
|
||||
return "", fmt.Errorf(gjson.GetBytes(body, "error").Str)
|
||||
}
|
||||
r, _ := io.ReadAll(regResp.Body)
|
||||
r, err := io.ReadAll(regResp.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read response body (HTTP %d): %w", regResp.StatusCode, err)
|
||||
}
|
||||
|
||||
return gjson.GetBytes(r, "access_token").Str, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"encoding/hex"
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
|
@ -27,9 +28,9 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
|
||||
ironwoodtypes "github.com/Arceliar/ironwood/types"
|
||||
yggdrasilconfig "github.com/yggdrasil-network/yggdrasil-go/src/config"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/core"
|
||||
yggdrasilcore "github.com/yggdrasil-network/yggdrasil-go/src/core"
|
||||
yggdrasildefaults "github.com/yggdrasil-network/yggdrasil-go/src/defaults"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/multicast"
|
||||
yggdrasilmulticast "github.com/yggdrasil-network/yggdrasil-go/src/multicast"
|
||||
|
||||
gologme "github.com/gologme/log"
|
||||
|
|
@ -37,7 +38,6 @@ import (
|
|||
|
||||
type Node struct {
|
||||
core *yggdrasilcore.Core
|
||||
config *yggdrasilconfig.NodeConfig
|
||||
multicast *yggdrasilmulticast.Multicast
|
||||
log *gologme.Logger
|
||||
utpSocket *utp.Socket
|
||||
|
|
@ -57,43 +57,52 @@ func (n *Node) DialerContext(ctx context.Context, _, address string) (net.Conn,
|
|||
|
||||
func Setup(sk ed25519.PrivateKey, instanceName, storageDirectory, peerURI, listenURI string) (*Node, error) {
|
||||
n := &Node{
|
||||
core: &yggdrasilcore.Core{},
|
||||
config: yggdrasildefaults.GenerateConfig(),
|
||||
multicast: &yggdrasilmulticast.Multicast{},
|
||||
log: gologme.New(logrus.StandardLogger().Writer(), "", 0),
|
||||
incoming: make(chan net.Conn),
|
||||
log: gologme.New(logrus.StandardLogger().Writer(), "", 0),
|
||||
incoming: make(chan net.Conn),
|
||||
}
|
||||
|
||||
options := []yggdrasilcore.SetupOption{
|
||||
yggdrasilcore.AdminListenAddress("none"),
|
||||
}
|
||||
if listenURI != "" {
|
||||
options = append(options, yggdrasilcore.ListenAddress(listenURI))
|
||||
}
|
||||
if peerURI != "" {
|
||||
for _, uri := range strings.Split(peerURI, ",") {
|
||||
options = append(options, yggdrasilcore.Peer{
|
||||
URI: uri,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
if n.core, err = yggdrasilcore.New(sk, options...); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
n.log.EnableLevel("error")
|
||||
n.log.EnableLevel("warn")
|
||||
n.log.EnableLevel("info")
|
||||
n.core.SetLogger(n.log)
|
||||
if n.utpSocket, err = utp.NewSocketFromPacketConnNoClose(n.core); err != nil {
|
||||
panic(err)
|
||||
|
||||
{
|
||||
var err error
|
||||
options := []yggdrasilcore.SetupOption{}
|
||||
if listenURI != "" {
|
||||
options = append(options, yggdrasilcore.ListenAddress(listenURI))
|
||||
}
|
||||
if peerURI != "" {
|
||||
for _, uri := range strings.Split(peerURI, ",") {
|
||||
options = append(options, yggdrasilcore.Peer{
|
||||
URI: uri,
|
||||
})
|
||||
}
|
||||
}
|
||||
if n.core, err = core.New(sk[:], n.log, options...); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
n.core.SetLogger(n.log)
|
||||
|
||||
if n.utpSocket, err = utp.NewSocketFromPacketConnNoClose(n.core); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
if err = n.multicast.Init(n.core, n.config, n.log, nil); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err = n.multicast.Start(); err != nil {
|
||||
panic(err)
|
||||
|
||||
// Setup the multicast module.
|
||||
{
|
||||
var err error
|
||||
options := []multicast.SetupOption{
|
||||
multicast.MulticastInterface{
|
||||
Regex: regexp.MustCompile(".*"),
|
||||
Beacon: true,
|
||||
Listen: true,
|
||||
Port: 0,
|
||||
Priority: 0,
|
||||
},
|
||||
}
|
||||
if n.multicast, err = multicast.New(n.core, n.log, options...); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
n.log.Printf("Public key: %x", n.core.PublicKey())
|
||||
|
|
@ -114,14 +123,7 @@ func (n *Node) DerivedServerName() string {
|
|||
}
|
||||
|
||||
func (n *Node) PrivateKey() ed25519.PrivateKey {
|
||||
sk := make(ed25519.PrivateKey, ed25519.PrivateKeySize)
|
||||
sb, err := hex.DecodeString(n.config.PrivateKey)
|
||||
if err == nil {
|
||||
copy(sk, sb[:])
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
return sk
|
||||
return n.core.PrivateKey()
|
||||
}
|
||||
|
||||
func (n *Node) PublicKey() ed25519.PublicKey {
|
||||
|
|
|
|||
|
|
@ -178,7 +178,8 @@ client_api:
|
|||
ethereum:
|
||||
enabled: false
|
||||
version: 1
|
||||
chain_ids: []
|
||||
chain_id: 31337
|
||||
network_url: "http://127.0.0.1:8545"
|
||||
|
||||
# Whether to require reCAPTCHA for registration. If you have enabled registration
|
||||
# then this is HIGHLY RECOMMENDED to reduce the risk of your homeserver being used
|
||||
|
|
@ -189,7 +190,13 @@ client_api:
|
|||
recaptcha_public_key: ""
|
||||
recaptcha_private_key: ""
|
||||
recaptcha_bypass_secret: ""
|
||||
recaptcha_siteverify_api: ""
|
||||
|
||||
# To use hcaptcha.com instead of ReCAPTCHA, set the following parameters, otherwise just keep them empty.
|
||||
# recaptcha_siteverify_api: "https://hcaptcha.com/siteverify"
|
||||
# recaptcha_api_js_url: "https://js.hcaptcha.com/1/api.js"
|
||||
# recaptcha_form_field: "h-captcha-response"
|
||||
# recaptcha_sitekey_class: "h-captcha"
|
||||
|
||||
|
||||
# TURN server information that this homeserver should send to clients.
|
||||
turn:
|
||||
|
|
@ -320,6 +327,14 @@ user_api:
|
|||
# The default lifetime is 3600000ms (60 minutes).
|
||||
# openid_token_lifetime_ms: 3600000
|
||||
|
||||
# Users who register on this homeserver will automatically be joined to the rooms listed under "auto_join_rooms" option.
|
||||
# By default, any room aliases included in this list will be created as a publicly joinable room
|
||||
# when the first user registers for the homeserver. If the room already exists,
|
||||
# make certain it is a publicly joinable room, i.e. the join rule of the room must be set to 'public'.
|
||||
# As Spaces are just rooms under the hood, Space aliases may also be used.
|
||||
auto_join_rooms:
|
||||
# - "#main:matrix.org"
|
||||
|
||||
# Configuration for Opentracing.
|
||||
# See https://github.com/matrix-org/dendrite/tree/master/docs/tracing for information on
|
||||
# how this works and how to set it up.
|
||||
|
|
|
|||
|
|
@ -174,7 +174,8 @@ client_api:
|
|||
ethereum:
|
||||
enabled: false
|
||||
version: 1
|
||||
chain_ids: []
|
||||
chain_id: 31337
|
||||
network_url: "http://127.0.0.1:8545"
|
||||
|
||||
# Whether to require reCAPTCHA for registration. If you have enabled registration
|
||||
# then this is HIGHLY RECOMMENDED to reduce the risk of your homeserver being used
|
||||
|
|
@ -185,7 +186,13 @@ client_api:
|
|||
recaptcha_public_key: ""
|
||||
recaptcha_private_key: ""
|
||||
recaptcha_bypass_secret: ""
|
||||
recaptcha_siteverify_api: ""
|
||||
|
||||
# To use hcaptcha.com instead of ReCAPTCHA, set the following parameters, otherwise just keep them empty.
|
||||
# recaptcha_siteverify_api: "https://hcaptcha.com/siteverify"
|
||||
# recaptcha_api_js_url: "https://js.hcaptcha.com/1/api.js"
|
||||
# recaptcha_form_field: "h-captcha-response"
|
||||
# recaptcha_sitekey_class: "h-captcha"
|
||||
|
||||
|
||||
# TURN server information that this homeserver should send to clients.
|
||||
turn:
|
||||
|
|
@ -385,6 +392,14 @@ user_api:
|
|||
# The default lifetime is 3600000ms (60 minutes).
|
||||
# openid_token_lifetime_ms: 3600000
|
||||
|
||||
# Users who register on this homeserver will automatically be joined to the rooms listed under "auto_join_rooms" option.
|
||||
# By default, any room aliases included in this list will be created as a publicly joinable room
|
||||
# when the first user registers for the homeserver. If the room already exists,
|
||||
# make certain it is a publicly joinable room, i.e. the join rule of the room must be set to 'public'.
|
||||
# As Spaces are just rooms under the hood, Space aliases may also be used.
|
||||
auto_join_rooms:
|
||||
# - "#main:matrix.org"
|
||||
|
||||
# Configuration for Opentracing.
|
||||
# See https://github.com/matrix-org/dendrite/tree/master/docs/tracing for information on
|
||||
# how this works and how to set it up.
|
||||
|
|
|
|||
387
dendrite-zion.yaml
Normal file
387
dendrite-zion.yaml
Normal file
|
|
@ -0,0 +1,387 @@
|
|||
# This is the Dendrite configuration file.
|
||||
#
|
||||
# The configuration is split up into sections - each Dendrite component has a
|
||||
# configuration section, in addition to the "global" section which applies to
|
||||
# all components.
|
||||
#
|
||||
# At a minimum, to get started, you will need to update the settings in the
|
||||
# "global" section for your deployment, and you will need to check that the
|
||||
# database "connection_string" line in each component section is correct.
|
||||
#
|
||||
# Each component with a "database" section can accept the following formats
|
||||
# for "connection_string":
|
||||
# SQLite: file:filename.db
|
||||
# file:///path/to/filename.db
|
||||
# PostgreSQL: postgresql://user:pass@hostname/database?params=...
|
||||
#
|
||||
# SQLite is embedded into Dendrite and therefore no further prerequisites are
|
||||
# needed for the database when using SQLite mode. However, performance with
|
||||
# PostgreSQL is significantly better and recommended for multi-user deployments.
|
||||
# SQLite is typically around 20-30% slower than PostgreSQL when tested with a
|
||||
# small number of users and likely will perform worse still with a higher volume
|
||||
# of users.
|
||||
#
|
||||
# The "max_open_conns" and "max_idle_conns" settings configure the maximum
|
||||
# number of open/idle database connections. The value 0 will use the database
|
||||
# engine default, and a negative value will use unlimited connections. The
|
||||
# "conn_max_lifetime" option controls the maximum length of time a database
|
||||
# connection can be idle in seconds - a negative value is unlimited.
|
||||
|
||||
# The version of the configuration file.
|
||||
version: 2
|
||||
|
||||
# Global Matrix configuration. This configuration applies to all components.
|
||||
global:
|
||||
# The domain name of this homeserver.
|
||||
server_name: ${SERVER_NAME}
|
||||
|
||||
# The path to the signing private key file, used to sign requests and events.
|
||||
# Note that this is NOT the same private key as used for TLS! To generate a
|
||||
# signing key, use "./bin/generate-keys --private-key matrix_key.pem".
|
||||
private_key: matrix_key.pem
|
||||
|
||||
# Global database connection pool, for PostgreSQL monolith deployments only. If
|
||||
# this section is populated then you can omit the "database" blocks in all other
|
||||
# sections. For polylith deployments, or monolith deployments using SQLite databases,
|
||||
# you must configure the "database" block for each component instead.
|
||||
database:
|
||||
connection_string: ${DATABASE_CONNECTION_STRING}
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 5
|
||||
conn_max_lifetime: -1
|
||||
|
||||
# The paths and expiry timestamps (as a UNIX timestamp in millisecond precision)
|
||||
# to old signing private keys that were formerly in use on this domain. These
|
||||
# keys will not be used for federation request or event signing, but will be
|
||||
# provided to any other homeserver that asks when trying to verify old events.
|
||||
# old_private_keys:
|
||||
# - private_key: old_matrix_key.pem
|
||||
# expired_at: 1601024554498
|
||||
|
||||
# How long a remote server can cache our server signing key before requesting it
|
||||
# again. Increasing this number will reduce the number of requests made by other
|
||||
# servers for our key but increases the period that a compromised key will be
|
||||
# considered valid by other homeservers.
|
||||
key_validity_period: 168h0m0s
|
||||
|
||||
# The server name to delegate server-server communications to, with optional port
|
||||
# e.g. localhost:443
|
||||
well_known_server_name: ""
|
||||
|
||||
# Lists of domains that the server will trust as identity servers to verify third
|
||||
# party identifiers such as phone numbers and email addresses.
|
||||
trusted_third_party_id_servers:
|
||||
- matrix.org
|
||||
- vector.im
|
||||
|
||||
# Disables federation. Dendrite will not be able to make any outbound HTTP requests
|
||||
# to other servers and the federation API will not be exposed.
|
||||
disable_federation: false
|
||||
|
||||
# Configures the handling of presence events.
|
||||
presence:
|
||||
# Whether inbound presence events are allowed, e.g. receiving presence events from other servers
|
||||
enable_inbound: false
|
||||
# Whether outbound presence events are allowed, e.g. sending presence events to other servers
|
||||
enable_outbound: false
|
||||
|
||||
# Server notices allows server admins to send messages to all users.
|
||||
server_notices:
|
||||
enabled: false
|
||||
# The server localpart to be used when sending notices, ensure this is not yet taken
|
||||
local_part: "_server"
|
||||
# The displayname to be used when sending notices
|
||||
display_name: "Server alerts"
|
||||
# The mxid of the avatar to use
|
||||
avatar_url: ""
|
||||
# The roomname to be used when creating messages
|
||||
room_name: "Server Alerts"
|
||||
|
||||
# Configuration for NATS JetStream
|
||||
jetstream:
|
||||
# A list of NATS Server addresses to connect to. If none are specified, an
|
||||
# internal NATS server will be started automatically when running Dendrite
|
||||
# in monolith mode. It is required to specify the address of at least one
|
||||
# NATS Server node if running in polylith mode.
|
||||
addresses:
|
||||
# - localhost:4222
|
||||
|
||||
# Keep all NATS streams in memory, rather than persisting it to the storage
|
||||
# path below. This option is present primarily for integration testing and
|
||||
# should not be used on a real world Dendrite deployment.
|
||||
in_memory: false
|
||||
|
||||
# Persistent directory to store JetStream streams in. This directory
|
||||
# should be preserved across Dendrite restarts.
|
||||
storage_path: ./
|
||||
|
||||
# The prefix to use for stream names for this homeserver - really only
|
||||
# useful if running more than one Dendrite on the same NATS deployment.
|
||||
topic_prefix: Dendrite
|
||||
|
||||
# Configuration for Prometheus metric collection.
|
||||
metrics:
|
||||
# Whether or not Prometheus metrics are enabled.
|
||||
enabled: false
|
||||
|
||||
# HTTP basic authentication to protect access to monitoring.
|
||||
basic_auth:
|
||||
username: metrics
|
||||
password: metrics
|
||||
|
||||
# DNS cache options. The DNS cache may reduce the load on DNS servers
|
||||
# if there is no local caching resolver available for use.
|
||||
dns_cache:
|
||||
# Whether or not the DNS cache is enabled.
|
||||
enabled: false
|
||||
|
||||
# Maximum number of entries to hold in the DNS cache, and
|
||||
# for how long those items should be considered valid in seconds.
|
||||
cache_size: 256
|
||||
cache_lifetime: "5m" # 5minutes; see https://pkg.go.dev/time@master#ParseDuration for more
|
||||
|
||||
# Configuration for the Appservice API.
|
||||
app_service_api:
|
||||
internal_api:
|
||||
listen: http://localhost:7777 # Only used in polylith deployments
|
||||
connect: http://localhost:7777 # Only used in polylith deployments
|
||||
database:
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
||||
# Disable the validation of TLS certificates of appservices. This is
|
||||
# not recommended in production since it may allow appservice traffic
|
||||
# to be sent to an unverified endpoint.
|
||||
disable_tls_validation: false
|
||||
|
||||
# Appservice configuration files to load into this homeserver.
|
||||
config_files: []
|
||||
|
||||
# Configuration for the Client API.
|
||||
client_api:
|
||||
internal_api:
|
||||
listen: http://localhost:7771 # Only used in polylith deployments
|
||||
connect: http://localhost:7771 # Only used in polylith deployments
|
||||
external_api:
|
||||
listen: http://[::]:8071
|
||||
|
||||
# Prevents new users from being able to register on this homeserver, except when
|
||||
# using the registration shared secret below.
|
||||
registration_disabled: false
|
||||
|
||||
# Prevents new guest accounts from being created. Guest registration is also
|
||||
# disabled implicitly by setting 'registration_disabled' above.
|
||||
guests_disabled: true
|
||||
|
||||
# If set, allows registration by anyone who knows the shared secret, regardless of
|
||||
# whether registration is otherwise disabled.
|
||||
registration_shared_secret: ""
|
||||
|
||||
# Disable password authentication.
|
||||
password_authentication_disabled: true # TODO: turn this into an environment variable - or create a separate dendrite.yaml for dev vs prod
|
||||
|
||||
# public key authentication
|
||||
public_key_authentication:
|
||||
ethereum:
|
||||
enabled: true
|
||||
version: 1
|
||||
chain_id: ${CHAIN_ID}
|
||||
network_url: ${BLOCKCHAIN_PROVIDER_URL}
|
||||
enable_authz: ${ENABLE_AUTHZ}
|
||||
|
||||
# Whether to require reCAPTCHA for registration.
|
||||
enable_registration_captcha: false
|
||||
|
||||
# Settings for ReCAPTCHA.
|
||||
recaptcha_public_key: ""
|
||||
recaptcha_private_key: ""
|
||||
recaptcha_bypass_secret: ""
|
||||
recaptcha_siteverify_api: ""
|
||||
|
||||
# TURN server information that this homeserver should send to clients.
|
||||
turn:
|
||||
turn_user_lifetime: ""
|
||||
turn_uris: []
|
||||
turn_shared_secret: ""
|
||||
turn_username: ""
|
||||
turn_password: ""
|
||||
|
||||
# Settings for rate-limited endpoints. Rate limiting will kick in after the
|
||||
# threshold number of "slots" have been taken by requests from a specific
|
||||
# host. Each "slot" will be released after the cooloff time in milliseconds.
|
||||
rate_limiting:
|
||||
enabled: true
|
||||
threshold: 5
|
||||
cooloff_ms: 500
|
||||
|
||||
# Configuration for the Federation API.
|
||||
federation_api:
|
||||
internal_api:
|
||||
listen: http://localhost:7772 # Only used in polylith deployments
|
||||
connect: http://localhost:7772 # Only used in polylith deployments
|
||||
external_api:
|
||||
listen: http://[::]:8072
|
||||
database:
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
||||
# How many times we will try to resend a failed transaction to a specific server. The
|
||||
# backoff is 2**x seconds, so 1 = 2 seconds, 2 = 4 seconds, 3 = 8 seconds etc.
|
||||
send_max_retries: 16
|
||||
|
||||
# Disable the validation of TLS certificates of remote federated homeservers. Do not
|
||||
# enable this option in production as it presents a security risk!
|
||||
disable_tls_validation: false
|
||||
|
||||
# Perspective keyservers to use as a backup when direct key fetches fail. This may
|
||||
# be required to satisfy key requests for servers that are no longer online when
|
||||
# joining some rooms.
|
||||
key_perspectives:
|
||||
- server_name: matrix.org
|
||||
keys:
|
||||
- key_id: ed25519:auto
|
||||
public_key: Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw
|
||||
- key_id: ed25519:a_RXGa
|
||||
public_key: l8Hft5qXKn1vfHrg3p4+W8gELQVo8N13JkluMfmn2sQ
|
||||
|
||||
# This option will control whether Dendrite will prefer to look up keys directly
|
||||
# or whether it should try perspective servers first, using direct fetches as a
|
||||
# last resort.
|
||||
prefer_direct_fetch: false
|
||||
|
||||
# Configuration for the Key Server (for end-to-end encryption).
|
||||
key_server:
|
||||
internal_api:
|
||||
listen: http://localhost:7779 # Only used in polylith deployments
|
||||
connect: http://localhost:7779 # Only used in polylith deployments
|
||||
database:
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
||||
# Configuration for the Media API.
|
||||
media_api:
|
||||
internal_api:
|
||||
listen: http://localhost:7774 # Only used in polylith deployments
|
||||
connect: http://localhost:7774 # Only used in polylith deployments
|
||||
external_api:
|
||||
listen: http://[::]:8074
|
||||
database:
|
||||
max_open_conns: 5
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
||||
# Storage path for uploaded media. May be relative or absolute.
|
||||
base_path: ./media_store
|
||||
|
||||
# The maximum allowed file size (in bytes) for media uploads to this homeserver
|
||||
# (0 = unlimited). If using a reverse proxy, ensure it allows requests at
|
||||
# least this large (e.g. client_max_body_size in nginx.)
|
||||
max_file_size_bytes: 10485760
|
||||
|
||||
# Whether to dynamically generate thumbnails if needed.
|
||||
dynamic_thumbnails: false
|
||||
|
||||
# The maximum number of simultaneous thumbnail generators to run.
|
||||
max_thumbnail_generators: 10
|
||||
|
||||
# A list of thumbnail sizes to be generated for media content.
|
||||
thumbnail_sizes:
|
||||
- width: 32
|
||||
height: 32
|
||||
method: crop
|
||||
- width: 96
|
||||
height: 96
|
||||
method: crop
|
||||
- width: 640
|
||||
height: 480
|
||||
method: scale
|
||||
|
||||
# Configuration for experimental MSC's
|
||||
mscs:
|
||||
# A list of enabled MSC's
|
||||
# Currently valid values are:
|
||||
# - msc2836 (Threading, see https://github.com/matrix-org/matrix-doc/pull/2836)
|
||||
# - msc2946 # (Spaces Summary, see https://github.com/matrix-org/matrix-doc/pull/2946)
|
||||
mscs: [msc2946]
|
||||
database:
|
||||
max_open_conns: 5
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
||||
# Configuration for the Room Server.
|
||||
room_server:
|
||||
internal_api:
|
||||
listen: http://localhost:7770 # Only used in polylith deployments
|
||||
connect: http://localhost:7770 # Only used in polylith deployments
|
||||
database:
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
||||
# Configuration for the Sync API.
|
||||
sync_api:
|
||||
internal_api:
|
||||
listen: http://localhost:7773 # Only used in polylith deployments
|
||||
connect: http://localhost:7773 # Only used in polylith deployments
|
||||
external_api:
|
||||
listen: http://[::]:8073
|
||||
database:
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
|
||||
# This option controls which HTTP header to inspect to find the real remote IP
|
||||
# address of the client. This is likely required if Dendrite is running behind
|
||||
# a reverse proxy server.
|
||||
# real_ip_header: X-Real-IP
|
||||
|
||||
# Configuration for the User API.
|
||||
user_api:
|
||||
# The cost when hashing passwords on registration/login. Default: 10. Min: 4, Max: 31
|
||||
# See https://pkg.go.dev/golang.org/x/crypto/bcrypt for more information.
|
||||
# Setting this lower makes registration/login consume less CPU resources at the cost of security
|
||||
# should the database be compromised. Setting this higher makes registration/login consume more
|
||||
# CPU resources but makes it harder to brute force password hashes.
|
||||
# This value can be low if performing tests or on embedded Dendrite instances (e.g WASM builds)
|
||||
# bcrypt_cost: 10
|
||||
internal_api:
|
||||
listen: http://localhost:7781 # Only used in polylith deployments
|
||||
connect: http://localhost:7781 # Only used in polylith deployments
|
||||
account_database:
|
||||
max_open_conns: 10
|
||||
max_idle_conns: 2
|
||||
conn_max_lifetime: -1
|
||||
# The length of time that a token issued for a relying party from
|
||||
# /_matrix/client/r0/user/{userId}/openid/request_token endpoint
|
||||
# is considered to be valid in milliseconds.
|
||||
# The default lifetime is 3600000ms (60 minutes).
|
||||
# openid_token_lifetime_ms: 3600000
|
||||
|
||||
# Configuration for Opentracing.
|
||||
# See https://github.com/matrix-org/dendrite/tree/master/docs/tracing for information on
|
||||
# how this works and how to set it up.
|
||||
tracing:
|
||||
enabled: false
|
||||
jaeger:
|
||||
serviceName: ""
|
||||
disabled: false
|
||||
rpc_metrics: false
|
||||
tags: []
|
||||
sampler: null
|
||||
reporter: null
|
||||
headers: null
|
||||
baggage_restrictions: null
|
||||
throttler: null
|
||||
|
||||
# Logging configuration
|
||||
logging:
|
||||
- type: std
|
||||
level: info
|
||||
- type: file
|
||||
# The logging level, must be one of debug, info, warn, error, fatal, panic.
|
||||
level: info
|
||||
params:
|
||||
path: ./logs
|
||||
|
|
@ -231,9 +231,9 @@ GEM
|
|||
jekyll-seo-tag (~> 2.1)
|
||||
minitest (5.15.0)
|
||||
multipart-post (2.1.1)
|
||||
nokogiri (1.13.6-arm64-darwin)
|
||||
nokogiri (1.13.9-arm64-darwin)
|
||||
racc (~> 1.4)
|
||||
nokogiri (1.13.6-x86_64-linux)
|
||||
nokogiri (1.13.9-x86_64-linux)
|
||||
racc (~> 1.4)
|
||||
octokit (4.22.0)
|
||||
faraday (>= 0.9)
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ matrix.example.com {
|
|||
# Change the end of each reverse_proxy line to the correct
|
||||
# address for your various services.
|
||||
@sync_api {
|
||||
path_regexp /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/(messages|context/.*?|relations/.*?|event/.*?))$
|
||||
path_regexp /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/(messages|.*?_?members|context/.*?|relations/.*?|event/.*?))$
|
||||
}
|
||||
reverse_proxy @sync_api sync_api:8073
|
||||
|
||||
|
|
|
|||
|
|
@ -23,8 +23,10 @@ VirtualHost {
|
|||
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}
|
||||
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}/{relType}
|
||||
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}/{relType}/{eventType}
|
||||
# /_matrix/client/.*/rooms/{roomId}/members
|
||||
# /_matrix/client/.*/rooms/{roomId}/joined_members
|
||||
# to sync_api
|
||||
ReverseProxy = /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/(messages|context/.*?|relations/.*?|event/.*?))$ http://localhost:8073 600
|
||||
ReverseProxy = /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/(messages|.*?_?members|context/.*?|relations/.*?|event/.*?))$ http://localhost:8073 600
|
||||
ReverseProxy = /_matrix/client http://localhost:8071 600
|
||||
ReverseProxy = /_matrix/federation http://localhost:8072 600
|
||||
ReverseProxy = /_matrix/key http://localhost:8072 600
|
||||
|
|
|
|||
|
|
@ -33,8 +33,10 @@ server {
|
|||
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}
|
||||
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}/{relType}
|
||||
# /_matrix/client/.*/rooms/{roomId}/relations/{eventID}/{relType}/{eventType}
|
||||
# /_matrix/client/.*/rooms/{roomId}/members
|
||||
# /_matrix/client/.*/rooms/{roomId}/joined_members
|
||||
# to sync_api
|
||||
location ~ /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/(messages|context/.*?|relations/.*?|event/.*?))$ {
|
||||
location ~ /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/(messages|.*?_?members|context/.*?|relations/.*?|event/.*?))$ {
|
||||
proxy_pass http://sync_api:8073;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -35,14 +35,14 @@ import (
|
|||
|
||||
// KeyChangeConsumer consumes events that originate in key server.
|
||||
type KeyChangeConsumer struct {
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable string
|
||||
db storage.Database
|
||||
queues *queue.OutgoingQueues
|
||||
serverName gomatrixserverlib.ServerName
|
||||
rsAPI roomserverAPI.FederationRoomserverAPI
|
||||
topic string
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable string
|
||||
db storage.Database
|
||||
queues *queue.OutgoingQueues
|
||||
isLocalServerName func(gomatrixserverlib.ServerName) bool
|
||||
rsAPI roomserverAPI.FederationRoomserverAPI
|
||||
topic string
|
||||
}
|
||||
|
||||
// NewKeyChangeConsumer creates a new KeyChangeConsumer. Call Start() to begin consuming from key servers.
|
||||
|
|
@ -55,14 +55,14 @@ func NewKeyChangeConsumer(
|
|||
rsAPI roomserverAPI.FederationRoomserverAPI,
|
||||
) *KeyChangeConsumer {
|
||||
return &KeyChangeConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
durable: cfg.Matrix.JetStream.Prefixed("FederationAPIKeyChangeConsumer"),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputKeyChangeEvent),
|
||||
queues: queues,
|
||||
db: store,
|
||||
serverName: cfg.Matrix.ServerName,
|
||||
rsAPI: rsAPI,
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
durable: cfg.Matrix.JetStream.Prefixed("FederationAPIKeyChangeConsumer"),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputKeyChangeEvent),
|
||||
queues: queues,
|
||||
db: store,
|
||||
isLocalServerName: cfg.Matrix.IsLocalServerName,
|
||||
rsAPI: rsAPI,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -112,7 +112,7 @@ func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) bool {
|
|||
logger.WithError(err).Error("Failed to extract domain from key change event")
|
||||
return true
|
||||
}
|
||||
if originServerName != t.serverName {
|
||||
if !t.isLocalServerName(originServerName) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
@ -141,7 +141,7 @@ func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) bool {
|
|||
// Pack the EDU and marshal it
|
||||
edu := &gomatrixserverlib.EDU{
|
||||
Type: gomatrixserverlib.MDeviceListUpdate,
|
||||
Origin: string(t.serverName),
|
||||
Origin: string(originServerName),
|
||||
}
|
||||
event := gomatrixserverlib.DeviceListUpdateEvent{
|
||||
UserID: m.UserID,
|
||||
|
|
@ -159,7 +159,7 @@ func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) bool {
|
|||
}
|
||||
|
||||
logger.Debugf("Sending device list update message to %q", destinations)
|
||||
err = t.queues.SendEDU(edu, t.serverName, destinations)
|
||||
err = t.queues.SendEDU(edu, originServerName, destinations)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
|
|
@ -171,7 +171,7 @@ func (t *KeyChangeConsumer) onCrossSigningMessage(m api.DeviceMessage) bool {
|
|||
logrus.WithError(err).Errorf("fedsender key change consumer: user ID parse failure")
|
||||
return true
|
||||
}
|
||||
if host != gomatrixserverlib.ServerName(t.serverName) {
|
||||
if !t.isLocalServerName(host) {
|
||||
// Ignore any messages that didn't originate locally, otherwise we'll
|
||||
// end up parroting information we received from other servers.
|
||||
return true
|
||||
|
|
@ -203,7 +203,7 @@ func (t *KeyChangeConsumer) onCrossSigningMessage(m api.DeviceMessage) bool {
|
|||
// Pack the EDU and marshal it
|
||||
edu := &gomatrixserverlib.EDU{
|
||||
Type: types.MSigningKeyUpdate,
|
||||
Origin: string(t.serverName),
|
||||
Origin: string(host),
|
||||
}
|
||||
if edu.Content, err = json.Marshal(output); err != nil {
|
||||
sentry.CaptureException(err)
|
||||
|
|
@ -212,7 +212,7 @@ func (t *KeyChangeConsumer) onCrossSigningMessage(m api.DeviceMessage) bool {
|
|||
}
|
||||
|
||||
logger.Debugf("Sending cross-signing update message to %q", destinations)
|
||||
err = t.queues.SendEDU(edu, t.serverName, destinations)
|
||||
err = t.queues.SendEDU(edu, host, destinations)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ type OutputPresenceConsumer struct {
|
|||
durable string
|
||||
db storage.Database
|
||||
queues *queue.OutgoingQueues
|
||||
ServerName gomatrixserverlib.ServerName
|
||||
isLocalServerName func(gomatrixserverlib.ServerName) bool
|
||||
topic string
|
||||
outboundPresenceEnabled bool
|
||||
}
|
||||
|
|
@ -56,7 +56,7 @@ func NewOutputPresenceConsumer(
|
|||
jetstream: js,
|
||||
queues: queues,
|
||||
db: store,
|
||||
ServerName: cfg.Matrix.ServerName,
|
||||
isLocalServerName: cfg.Matrix.IsLocalServerName,
|
||||
durable: cfg.Matrix.JetStream.Durable("FederationAPIPresenceConsumer"),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputPresenceEvent),
|
||||
outboundPresenceEnabled: cfg.Matrix.Presence.EnableOutbound,
|
||||
|
|
@ -85,7 +85,7 @@ func (t *OutputPresenceConsumer) onMessage(ctx context.Context, msgs []*nats.Msg
|
|||
log.WithError(err).WithField("user_id", userID).Error("failed to extract domain from receipt sender")
|
||||
return true
|
||||
}
|
||||
if serverName != t.ServerName {
|
||||
if !t.isLocalServerName(serverName) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
@ -127,7 +127,7 @@ func (t *OutputPresenceConsumer) onMessage(ctx context.Context, msgs []*nats.Msg
|
|||
|
||||
edu := &gomatrixserverlib.EDU{
|
||||
Type: gomatrixserverlib.MPresence,
|
||||
Origin: string(t.ServerName),
|
||||
Origin: string(serverName),
|
||||
}
|
||||
if edu.Content, err = json.Marshal(content); err != nil {
|
||||
log.WithError(err).Error("failed to marshal EDU JSON")
|
||||
|
|
@ -135,7 +135,7 @@ func (t *OutputPresenceConsumer) onMessage(ctx context.Context, msgs []*nats.Msg
|
|||
}
|
||||
|
||||
log.Tracef("sending presence EDU to %d servers", len(joined))
|
||||
if err = t.queues.SendEDU(edu, t.ServerName, joined); err != nil {
|
||||
if err = t.queues.SendEDU(edu, serverName, joined); err != nil {
|
||||
log.WithError(err).Error("failed to send EDU")
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,13 +34,13 @@ import (
|
|||
|
||||
// OutputReceiptConsumer consumes events that originate in the clientapi.
|
||||
type OutputReceiptConsumer struct {
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable string
|
||||
db storage.Database
|
||||
queues *queue.OutgoingQueues
|
||||
ServerName gomatrixserverlib.ServerName
|
||||
topic string
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable string
|
||||
db storage.Database
|
||||
queues *queue.OutgoingQueues
|
||||
isLocalServerName func(gomatrixserverlib.ServerName) bool
|
||||
topic string
|
||||
}
|
||||
|
||||
// NewOutputReceiptConsumer creates a new OutputReceiptConsumer. Call Start() to begin consuming typing events.
|
||||
|
|
@ -52,13 +52,13 @@ func NewOutputReceiptConsumer(
|
|||
store storage.Database,
|
||||
) *OutputReceiptConsumer {
|
||||
return &OutputReceiptConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
queues: queues,
|
||||
db: store,
|
||||
ServerName: cfg.Matrix.ServerName,
|
||||
durable: cfg.Matrix.JetStream.Durable("FederationAPIReceiptConsumer"),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputReceiptEvent),
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
queues: queues,
|
||||
db: store,
|
||||
isLocalServerName: cfg.Matrix.IsLocalServerName,
|
||||
durable: cfg.Matrix.JetStream.Durable("FederationAPIReceiptConsumer"),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputReceiptEvent),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -95,7 +95,7 @@ func (t *OutputReceiptConsumer) onMessage(ctx context.Context, msgs []*nats.Msg)
|
|||
log.WithError(err).WithField("user_id", receipt.UserID).Error("failed to extract domain from receipt sender")
|
||||
return true
|
||||
}
|
||||
if receiptServerName != t.ServerName {
|
||||
if !t.isLocalServerName(receiptServerName) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
@ -134,14 +134,14 @@ func (t *OutputReceiptConsumer) onMessage(ctx context.Context, msgs []*nats.Msg)
|
|||
|
||||
edu := &gomatrixserverlib.EDU{
|
||||
Type: gomatrixserverlib.MReceipt,
|
||||
Origin: string(t.ServerName),
|
||||
Origin: string(receiptServerName),
|
||||
}
|
||||
if edu.Content, err = json.Marshal(content); err != nil {
|
||||
log.WithError(err).Error("failed to marshal EDU JSON")
|
||||
return true
|
||||
}
|
||||
|
||||
if err := t.queues.SendEDU(edu, t.ServerName, names); err != nil {
|
||||
if err := t.queues.SendEDU(edu, receiptServerName, names); err != nil {
|
||||
log.WithError(err).Error("failed to send EDU")
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,13 +34,13 @@ import (
|
|||
|
||||
// OutputSendToDeviceConsumer consumes events that originate in the clientapi.
|
||||
type OutputSendToDeviceConsumer struct {
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable string
|
||||
db storage.Database
|
||||
queues *queue.OutgoingQueues
|
||||
ServerName gomatrixserverlib.ServerName
|
||||
topic string
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable string
|
||||
db storage.Database
|
||||
queues *queue.OutgoingQueues
|
||||
isLocalServerName func(gomatrixserverlib.ServerName) bool
|
||||
topic string
|
||||
}
|
||||
|
||||
// NewOutputSendToDeviceConsumer creates a new OutputSendToDeviceConsumer. Call Start() to begin consuming send-to-device events.
|
||||
|
|
@ -52,13 +52,13 @@ func NewOutputSendToDeviceConsumer(
|
|||
store storage.Database,
|
||||
) *OutputSendToDeviceConsumer {
|
||||
return &OutputSendToDeviceConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
queues: queues,
|
||||
db: store,
|
||||
ServerName: cfg.Matrix.ServerName,
|
||||
durable: cfg.Matrix.JetStream.Durable("FederationAPIESendToDeviceConsumer"),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputSendToDeviceEvent),
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
queues: queues,
|
||||
db: store,
|
||||
isLocalServerName: cfg.Matrix.IsLocalServerName,
|
||||
durable: cfg.Matrix.JetStream.Durable("FederationAPIESendToDeviceConsumer"),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputSendToDeviceEvent),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -82,7 +82,7 @@ func (t *OutputSendToDeviceConsumer) onMessage(ctx context.Context, msgs []*nats
|
|||
log.WithError(err).WithField("user_id", sender).Error("Failed to extract domain from send-to-device sender")
|
||||
return true
|
||||
}
|
||||
if originServerName != t.ServerName {
|
||||
if !t.isLocalServerName(originServerName) {
|
||||
return true
|
||||
}
|
||||
// Extract the send-to-device event from msg.
|
||||
|
|
@ -101,14 +101,14 @@ func (t *OutputSendToDeviceConsumer) onMessage(ctx context.Context, msgs []*nats
|
|||
}
|
||||
|
||||
// The SyncAPI is already handling sendToDevice for the local server
|
||||
if destServerName == t.ServerName {
|
||||
if t.isLocalServerName(destServerName) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Pack the EDU and marshal it
|
||||
edu := &gomatrixserverlib.EDU{
|
||||
Type: gomatrixserverlib.MDirectToDevice,
|
||||
Origin: string(t.ServerName),
|
||||
Origin: string(originServerName),
|
||||
}
|
||||
tdm := gomatrixserverlib.ToDeviceMessage{
|
||||
Sender: ote.Sender,
|
||||
|
|
@ -127,7 +127,7 @@ func (t *OutputSendToDeviceConsumer) onMessage(ctx context.Context, msgs []*nats
|
|||
}
|
||||
|
||||
log.Debugf("Sending send-to-device message into %q destination queue", destServerName)
|
||||
if err := t.queues.SendEDU(edu, t.ServerName, []gomatrixserverlib.ServerName{destServerName}); err != nil {
|
||||
if err := t.queues.SendEDU(edu, originServerName, []gomatrixserverlib.ServerName{destServerName}); err != nil {
|
||||
log.WithError(err).Error("failed to send EDU")
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,13 +31,13 @@ import (
|
|||
|
||||
// OutputTypingConsumer consumes events that originate in the clientapi.
|
||||
type OutputTypingConsumer struct {
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable string
|
||||
db storage.Database
|
||||
queues *queue.OutgoingQueues
|
||||
ServerName gomatrixserverlib.ServerName
|
||||
topic string
|
||||
ctx context.Context
|
||||
jetstream nats.JetStreamContext
|
||||
durable string
|
||||
db storage.Database
|
||||
queues *queue.OutgoingQueues
|
||||
isLocalServerName func(gomatrixserverlib.ServerName) bool
|
||||
topic string
|
||||
}
|
||||
|
||||
// NewOutputTypingConsumer creates a new OutputTypingConsumer. Call Start() to begin consuming typing events.
|
||||
|
|
@ -49,13 +49,13 @@ func NewOutputTypingConsumer(
|
|||
store storage.Database,
|
||||
) *OutputTypingConsumer {
|
||||
return &OutputTypingConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
queues: queues,
|
||||
db: store,
|
||||
ServerName: cfg.Matrix.ServerName,
|
||||
durable: cfg.Matrix.JetStream.Durable("FederationAPITypingConsumer"),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputTypingEvent),
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
queues: queues,
|
||||
db: store,
|
||||
isLocalServerName: cfg.Matrix.IsLocalServerName,
|
||||
durable: cfg.Matrix.JetStream.Durable("FederationAPITypingConsumer"),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputTypingEvent),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -87,7 +87,7 @@ func (t *OutputTypingConsumer) onMessage(ctx context.Context, msgs []*nats.Msg)
|
|||
_ = msg.Ack()
|
||||
return true
|
||||
}
|
||||
if typingServerName != t.ServerName {
|
||||
if !t.isLocalServerName(typingServerName) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
@ -111,7 +111,7 @@ func (t *OutputTypingConsumer) onMessage(ctx context.Context, msgs []*nats.Msg)
|
|||
log.WithError(err).Error("failed to marshal EDU JSON")
|
||||
return true
|
||||
}
|
||||
if err := t.queues.SendEDU(edu, t.ServerName, names); err != nil {
|
||||
if err := t.queues.SendEDU(edu, typingServerName, names); err != nil {
|
||||
log.WithError(err).Error("failed to send EDU")
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ func AddPublicRoutes(
|
|||
TopicPresenceEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputPresenceEvent),
|
||||
TopicDeviceListUpdate: cfg.Matrix.JetStream.Prefixed(jetstream.InputDeviceListUpdate),
|
||||
TopicSigningKeyUpdate: cfg.Matrix.JetStream.Prefixed(jetstream.InputSigningKeyUpdate),
|
||||
ServerName: cfg.Matrix.ServerName,
|
||||
Config: cfg,
|
||||
UserAPI: userAPI,
|
||||
}
|
||||
|
||||
|
|
@ -107,7 +107,7 @@ func NewInternalAPI(
|
|||
) api.FederationInternalAPI {
|
||||
cfg := &base.Cfg.FederationAPI
|
||||
|
||||
federationDB, err := storage.NewDatabase(base, &cfg.Database, base.Caches, base.Cfg.Global.ServerName)
|
||||
federationDB, err := storage.NewDatabase(base, &cfg.Database, base.Caches, base.Cfg.Global.IsLocalServerName)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panic("failed to connect to federation sender db")
|
||||
}
|
||||
|
|
@ -116,17 +116,14 @@ func NewInternalAPI(
|
|||
_ = federationDB.RemoveAllServersFromBlacklist()
|
||||
}
|
||||
|
||||
stats := &statistics.Statistics{
|
||||
DB: federationDB,
|
||||
FailuresUntilBlacklist: cfg.FederationMaxRetries,
|
||||
}
|
||||
stats := statistics.NewStatistics(federationDB, cfg.FederationMaxRetries+1)
|
||||
|
||||
js, _ := base.NATS.Prepare(base.ProcessContext, &cfg.Matrix.JetStream)
|
||||
|
||||
queues := queue.NewOutgoingQueues(
|
||||
federationDB, base.ProcessContext,
|
||||
cfg.Matrix.DisableFederation,
|
||||
cfg.Matrix.ServerName, federation, rsAPI, stats,
|
||||
cfg.Matrix.ServerName, federation, rsAPI, &stats,
|
||||
&queue.SigningInfo{
|
||||
KeyID: cfg.Matrix.KeyID,
|
||||
PrivateKey: cfg.Matrix.PrivateKey,
|
||||
|
|
@ -183,5 +180,5 @@ func NewInternalAPI(
|
|||
}
|
||||
time.AfterFunc(time.Minute, cleanExpiredEDUs)
|
||||
|
||||
return internal.NewFederationInternalAPI(federationDB, cfg, rsAPI, federation, stats, caches, queues, keyRing)
|
||||
return internal.NewFederationInternalAPI(federationDB, cfg, rsAPI, federation, &stats, caches, queues, keyRing)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -87,6 +87,7 @@ func TestMain(m *testing.M) {
|
|||
cfg.Global.JetStream.StoragePath = config.Path(d)
|
||||
cfg.Global.KeyID = serverKeyID
|
||||
cfg.Global.KeyValidityPeriod = s.validity
|
||||
cfg.FederationAPI.KeyPerspectives = nil
|
||||
f, err := os.CreateTemp(d, "federation_keys_test*.db")
|
||||
if err != nil {
|
||||
return -1
|
||||
|
|
@ -207,7 +208,6 @@ func TestRenewalBehaviour(t *testing.T) {
|
|||
// happy at this point that the key that we already have is from the past
|
||||
// then repeating a key fetch should cause us to try and renew the key.
|
||||
// If so, then the new key will end up in our cache.
|
||||
|
||||
serverC.renew()
|
||||
|
||||
res, err = serverA.api.FetchKeys(
|
||||
|
|
|
|||
|
|
@ -164,6 +164,7 @@ func TestFederationAPIJoinThenKeyUpdate(t *testing.T) {
|
|||
func testFederationAPIJoinThenKeyUpdate(t *testing.T, dbType test.DBType) {
|
||||
base, close := testrig.CreateBaseDendrite(t, dbType)
|
||||
base.Cfg.FederationAPI.PreferDirectFetch = true
|
||||
base.Cfg.FederationAPI.KeyPerspectives = nil
|
||||
defer close()
|
||||
jsctx, _ := base.NATS.Prepare(base.ProcessContext, &base.Cfg.Global.JetStream)
|
||||
defer jetstream.DeleteAllStreams(jsctx, &base.Cfg.Global.JetStream)
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ func (a *FederationInternalAPI) ClaimKeys(
|
|||
) (gomatrixserverlib.RespClaimKeys, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
|
||||
defer cancel()
|
||||
ires, err := a.doRequestIfNotBackingOffOrBlacklisted(s, func() (interface{}, error) {
|
||||
ires, err := a.doRequestIfNotBlacklisted(s, func() (interface{}, error) {
|
||||
return a.federation.ClaimKeys(ctx, s, oneTimeKeys)
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ func (s *FederationInternalAPI) handleLocalKeys(
|
|||
results map[gomatrixserverlib.PublicKeyLookupRequest]gomatrixserverlib.PublicKeyLookupResult,
|
||||
) {
|
||||
for req := range requests {
|
||||
if req.ServerName != s.cfg.Matrix.ServerName {
|
||||
if !s.cfg.Matrix.IsLocalServerName(req.ServerName) {
|
||||
continue
|
||||
}
|
||||
if req.KeyID == s.cfg.Matrix.KeyID {
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ func (r *FederationInternalAPI) PerformJoin(
|
|||
seenSet := make(map[gomatrixserverlib.ServerName]bool)
|
||||
var uniqueList []gomatrixserverlib.ServerName
|
||||
for _, srv := range request.ServerNames {
|
||||
if seenSet[srv] || srv == r.cfg.Matrix.ServerName {
|
||||
if seenSet[srv] || r.cfg.Matrix.IsLocalServerName(srv) {
|
||||
continue
|
||||
}
|
||||
seenSet[srv] = true
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/nats-io/nats.go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/syncapi/types"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
|
|
@ -39,7 +40,7 @@ type SyncAPIProducer struct {
|
|||
TopicDeviceListUpdate string
|
||||
TopicSigningKeyUpdate string
|
||||
JetStream nats.JetStreamContext
|
||||
ServerName gomatrixserverlib.ServerName
|
||||
Config *config.FederationAPI
|
||||
UserAPI userapi.UserInternalAPI
|
||||
}
|
||||
|
||||
|
|
@ -77,7 +78,7 @@ func (p *SyncAPIProducer) SendToDevice(
|
|||
// device. If the event isn't targeted locally then we can't expand the
|
||||
// wildcard as we don't know about the remote devices, so instead we leave it
|
||||
// as-is, so that the federation sender can send it on with the wildcard intact.
|
||||
if domain == p.ServerName && deviceID == "*" {
|
||||
if p.Config.Matrix.IsLocalServerName(domain) && deviceID == "*" {
|
||||
var res userapi.QueryDevicesResponse
|
||||
err = p.UserAPI.QueryDevices(context.TODO(), &userapi.QueryDevicesRequest{
|
||||
UserID: userID,
|
||||
|
|
|
|||
|
|
@ -21,21 +21,22 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/gomatrix"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
fedapi "github.com/matrix-org/dendrite/federationapi/api"
|
||||
"github.com/matrix-org/dendrite/federationapi/statistics"
|
||||
"github.com/matrix-org/dendrite/federationapi/storage"
|
||||
"github.com/matrix-org/dendrite/federationapi/storage/shared"
|
||||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrix"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
maxPDUsPerTransaction = 50
|
||||
maxEDUsPerTransaction = 50
|
||||
maxEDUsPerTransaction = 100
|
||||
maxPDUsInMemory = 128
|
||||
maxEDUsInMemory = 128
|
||||
queueIdleTimeout = time.Second * 30
|
||||
|
|
@ -64,7 +65,6 @@ type destinationQueue struct {
|
|||
pendingPDUs []*queuedPDU // PDUs waiting to be sent
|
||||
pendingEDUs []*queuedEDU // EDUs waiting to be sent
|
||||
pendingMutex sync.RWMutex // protects pendingPDUs and pendingEDUs
|
||||
interruptBackoff chan bool // interrupts backoff
|
||||
}
|
||||
|
||||
// Send event adds the event to the pending queue for the destination.
|
||||
|
|
@ -75,18 +75,7 @@ func (oq *destinationQueue) sendEvent(event *gomatrixserverlib.HeaderedEvent, re
|
|||
logrus.Errorf("attempt to send nil PDU with destination %q", oq.destination)
|
||||
return
|
||||
}
|
||||
// Create a database entry that associates the given PDU NID with
|
||||
// this destination queue. We'll then be able to retrieve the PDU
|
||||
// later.
|
||||
if err := oq.db.AssociatePDUWithDestination(
|
||||
oq.process.Context(),
|
||||
"", // TODO: remove this, as we don't need to persist the transaction ID
|
||||
oq.destination, // the destination server name
|
||||
receipt, // NIDs from federationapi_queue_json table
|
||||
); err != nil {
|
||||
logrus.WithError(err).Errorf("failed to associate PDU %q with destination %q", event.EventID(), oq.destination)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the destination is blacklisted. If it isn't then wake
|
||||
// up the queue.
|
||||
if !oq.statistics.Blacklisted() {
|
||||
|
|
@ -102,11 +91,9 @@ func (oq *destinationQueue) sendEvent(event *gomatrixserverlib.HeaderedEvent, re
|
|||
oq.overflowed.Store(true)
|
||||
}
|
||||
oq.pendingMutex.Unlock()
|
||||
// Wake up the queue if it's asleep.
|
||||
oq.wakeQueueIfNeeded()
|
||||
select {
|
||||
case oq.notify <- struct{}{}:
|
||||
default:
|
||||
|
||||
if !oq.backingOff.Load() {
|
||||
oq.wakeQueueAndNotify()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -119,19 +106,7 @@ func (oq *destinationQueue) sendEDU(event *gomatrixserverlib.EDU, receipt *share
|
|||
logrus.Errorf("attempt to send nil EDU with destination %q", oq.destination)
|
||||
return
|
||||
}
|
||||
// Create a database entry that associates the given PDU NID with
|
||||
// this destination queue. We'll then be able to retrieve the PDU
|
||||
// later.
|
||||
if err := oq.db.AssociateEDUWithDestination(
|
||||
oq.process.Context(),
|
||||
oq.destination, // the destination server name
|
||||
receipt, // NIDs from federationapi_queue_json table
|
||||
event.Type,
|
||||
nil, // this will use the default expireEDUTypes map
|
||||
); err != nil {
|
||||
logrus.WithError(err).Errorf("failed to associate EDU with destination %q", oq.destination)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the destination is blacklisted. If it isn't then wake
|
||||
// up the queue.
|
||||
if !oq.statistics.Blacklisted() {
|
||||
|
|
@ -147,24 +122,48 @@ func (oq *destinationQueue) sendEDU(event *gomatrixserverlib.EDU, receipt *share
|
|||
oq.overflowed.Store(true)
|
||||
}
|
||||
oq.pendingMutex.Unlock()
|
||||
// Wake up the queue if it's asleep.
|
||||
oq.wakeQueueIfNeeded()
|
||||
select {
|
||||
case oq.notify <- struct{}{}:
|
||||
default:
|
||||
|
||||
if !oq.backingOff.Load() {
|
||||
oq.wakeQueueAndNotify()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleBackoffNotifier is registered as the backoff notification
|
||||
// callback with Statistics. It will wakeup and notify the queue
|
||||
// if the queue is currently backing off.
|
||||
func (oq *destinationQueue) handleBackoffNotifier() {
|
||||
// Only wake up the queue if it is backing off.
|
||||
// Otherwise there is no pending work for the queue to handle
|
||||
// so waking the queue would be a waste of resources.
|
||||
if oq.backingOff.Load() {
|
||||
oq.wakeQueueAndNotify()
|
||||
}
|
||||
}
|
||||
|
||||
// wakeQueueAndNotify ensures the destination queue is running and notifies it
|
||||
// that there is pending work.
|
||||
func (oq *destinationQueue) wakeQueueAndNotify() {
|
||||
// Wake up the queue if it's asleep.
|
||||
oq.wakeQueueIfNeeded()
|
||||
|
||||
// Notify the queue that there are events ready to send.
|
||||
select {
|
||||
case oq.notify <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// wakeQueueIfNeeded will wake up the destination queue if it is
|
||||
// not already running. If it is running but it is backing off
|
||||
// then we will interrupt the backoff, causing any federation
|
||||
// requests to retry.
|
||||
func (oq *destinationQueue) wakeQueueIfNeeded() {
|
||||
// If we are backing off then interrupt the backoff.
|
||||
// Clear the backingOff flag and update the backoff metrics if it was set.
|
||||
if oq.backingOff.CompareAndSwap(true, false) {
|
||||
oq.interruptBackoff <- true
|
||||
destinationQueueBackingOff.Dec()
|
||||
}
|
||||
|
||||
// If we aren't running then wake up the queue.
|
||||
if !oq.running.Load() {
|
||||
// Start the queue.
|
||||
|
|
@ -196,38 +195,54 @@ func (oq *destinationQueue) getPendingFromDatabase() {
|
|||
gotEDUs[edu.receipt.String()] = struct{}{}
|
||||
}
|
||||
|
||||
overflowed := false
|
||||
if pduCapacity := maxPDUsInMemory - len(oq.pendingPDUs); pduCapacity > 0 {
|
||||
// We have room in memory for some PDUs - let's request no more than that.
|
||||
if pdus, err := oq.db.GetPendingPDUs(ctx, oq.destination, pduCapacity); err == nil {
|
||||
if pdus, err := oq.db.GetPendingPDUs(ctx, oq.destination, maxPDUsInMemory); err == nil {
|
||||
if len(pdus) == maxPDUsInMemory {
|
||||
overflowed = true
|
||||
}
|
||||
for receipt, pdu := range pdus {
|
||||
if _, ok := gotPDUs[receipt.String()]; ok {
|
||||
continue
|
||||
}
|
||||
oq.pendingPDUs = append(oq.pendingPDUs, &queuedPDU{receipt, pdu})
|
||||
retrieved = true
|
||||
if len(oq.pendingPDUs) == maxPDUsInMemory {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logrus.WithError(err).Errorf("Failed to get pending PDUs for %q", oq.destination)
|
||||
}
|
||||
}
|
||||
|
||||
if eduCapacity := maxEDUsInMemory - len(oq.pendingEDUs); eduCapacity > 0 {
|
||||
// We have room in memory for some EDUs - let's request no more than that.
|
||||
if edus, err := oq.db.GetPendingEDUs(ctx, oq.destination, eduCapacity); err == nil {
|
||||
if edus, err := oq.db.GetPendingEDUs(ctx, oq.destination, maxEDUsInMemory); err == nil {
|
||||
if len(edus) == maxEDUsInMemory {
|
||||
overflowed = true
|
||||
}
|
||||
for receipt, edu := range edus {
|
||||
if _, ok := gotEDUs[receipt.String()]; ok {
|
||||
continue
|
||||
}
|
||||
oq.pendingEDUs = append(oq.pendingEDUs, &queuedEDU{receipt, edu})
|
||||
retrieved = true
|
||||
if len(oq.pendingEDUs) == maxEDUsInMemory {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logrus.WithError(err).Errorf("Failed to get pending EDUs for %q", oq.destination)
|
||||
}
|
||||
}
|
||||
|
||||
// If we've retrieved all of the events from the database with room to spare
|
||||
// in memory then we'll no longer consider this queue to be overflowed.
|
||||
if len(oq.pendingPDUs) < maxPDUsInMemory && len(oq.pendingEDUs) < maxEDUsInMemory {
|
||||
if !overflowed {
|
||||
oq.overflowed.Store(false)
|
||||
} else {
|
||||
}
|
||||
// If we've retrieved some events then notify the destination queue goroutine.
|
||||
if retrieved {
|
||||
|
|
@ -238,6 +253,24 @@ func (oq *destinationQueue) getPendingFromDatabase() {
|
|||
}
|
||||
}
|
||||
|
||||
// checkNotificationsOnClose checks for any remaining notifications
|
||||
// and starts a new backgroundSend goroutine if any exist.
|
||||
func (oq *destinationQueue) checkNotificationsOnClose() {
|
||||
// NOTE : If we are stopping the queue due to blacklist then it
|
||||
// doesn't matter if we have been notified of new work since
|
||||
// this queue instance will be deleted anyway.
|
||||
if !oq.statistics.Blacklisted() {
|
||||
select {
|
||||
case <-oq.notify:
|
||||
// We received a new notification in between the
|
||||
// idle timeout firing and stopping the goroutine.
|
||||
// Immediately restart the queue.
|
||||
oq.wakeQueueAndNotify()
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// backgroundSend is the worker goroutine for sending events.
|
||||
func (oq *destinationQueue) backgroundSend() {
|
||||
// Check if a worker is already running, and if it isn't, then
|
||||
|
|
@ -245,10 +278,17 @@ func (oq *destinationQueue) backgroundSend() {
|
|||
if !oq.running.CompareAndSwap(false, true) {
|
||||
return
|
||||
}
|
||||
|
||||
// Register queue cleanup functions.
|
||||
// NOTE : The ordering here is very intentional.
|
||||
defer oq.checkNotificationsOnClose()
|
||||
defer oq.running.Store(false)
|
||||
|
||||
destinationQueueRunning.Inc()
|
||||
defer destinationQueueRunning.Dec()
|
||||
defer oq.queues.clearQueue(oq)
|
||||
defer oq.running.Store(false)
|
||||
|
||||
idleTimeout := time.NewTimer(queueIdleTimeout)
|
||||
defer idleTimeout.Stop()
|
||||
|
||||
// Mark the queue as overflowed, so we will consult the database
|
||||
// to see if there's anything new to send.
|
||||
|
|
@ -261,59 +301,33 @@ func (oq *destinationQueue) backgroundSend() {
|
|||
oq.getPendingFromDatabase()
|
||||
}
|
||||
|
||||
// Reset the queue idle timeout.
|
||||
if !idleTimeout.Stop() {
|
||||
select {
|
||||
case <-idleTimeout.C:
|
||||
default:
|
||||
}
|
||||
}
|
||||
idleTimeout.Reset(queueIdleTimeout)
|
||||
|
||||
// If we have nothing to do then wait either for incoming events, or
|
||||
// until we hit an idle timeout.
|
||||
select {
|
||||
case <-oq.notify:
|
||||
// There's work to do, either because getPendingFromDatabase
|
||||
// told us there is, or because a new event has come in via
|
||||
// sendEvent/sendEDU.
|
||||
case <-time.After(queueIdleTimeout):
|
||||
// told us there is, a new event has come in via sendEvent/sendEDU,
|
||||
// or we are backing off and it is time to retry.
|
||||
case <-idleTimeout.C:
|
||||
// The worker is idle so stop the goroutine. It'll get
|
||||
// restarted automatically the next time we have an event to
|
||||
// send.
|
||||
return
|
||||
case <-oq.process.Context().Done():
|
||||
// The parent process is shutting down, so stop.
|
||||
oq.statistics.ClearBackoff()
|
||||
return
|
||||
}
|
||||
|
||||
// If we are backing off this server then wait for the
|
||||
// backoff duration to complete first, or until explicitly
|
||||
// told to retry.
|
||||
until, blacklisted := oq.statistics.BackoffInfo()
|
||||
if blacklisted {
|
||||
// It's been suggested that we should give up because the backoff
|
||||
// has exceeded a maximum allowable value. Clean up the in-memory
|
||||
// buffers at this point. The PDU clean-up is already on a defer.
|
||||
logrus.Warnf("Blacklisting %q due to exceeding backoff threshold", oq.destination)
|
||||
oq.pendingMutex.Lock()
|
||||
for i := range oq.pendingPDUs {
|
||||
oq.pendingPDUs[i] = nil
|
||||
}
|
||||
for i := range oq.pendingEDUs {
|
||||
oq.pendingEDUs[i] = nil
|
||||
}
|
||||
oq.pendingPDUs = nil
|
||||
oq.pendingEDUs = nil
|
||||
oq.pendingMutex.Unlock()
|
||||
return
|
||||
}
|
||||
if until != nil && until.After(time.Now()) {
|
||||
// We haven't backed off yet, so wait for the suggested amount of
|
||||
// time.
|
||||
duration := time.Until(*until)
|
||||
logrus.Debugf("Backing off %q for %s", oq.destination, duration)
|
||||
oq.backingOff.Store(true)
|
||||
destinationQueueBackingOff.Inc()
|
||||
select {
|
||||
case <-time.After(duration):
|
||||
case <-oq.interruptBackoff:
|
||||
}
|
||||
destinationQueueBackingOff.Dec()
|
||||
oq.backingOff.Store(false)
|
||||
}
|
||||
|
||||
// Work out which PDUs/EDUs to include in the next transaction.
|
||||
oq.pendingMutex.RLock()
|
||||
pduCount := len(oq.pendingPDUs)
|
||||
|
|
@ -328,99 +342,52 @@ func (oq *destinationQueue) backgroundSend() {
|
|||
toSendEDUs := oq.pendingEDUs[:eduCount]
|
||||
oq.pendingMutex.RUnlock()
|
||||
|
||||
// If we didn't get anything from the database and there are no
|
||||
// pending EDUs then there's nothing to do - stop here.
|
||||
if pduCount == 0 && eduCount == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// If we have pending PDUs or EDUs then construct a transaction.
|
||||
// Try sending the next transaction and see what happens.
|
||||
transaction, pc, ec, terr := oq.nextTransaction(toSendPDUs, toSendEDUs)
|
||||
terr := oq.nextTransaction(toSendPDUs, toSendEDUs)
|
||||
if terr != nil {
|
||||
// We failed to send the transaction. Mark it as a failure.
|
||||
oq.statistics.Failure()
|
||||
|
||||
} else if transaction {
|
||||
// If we successfully sent the transaction then clear out
|
||||
// the pending events and EDUs, and wipe our transaction ID.
|
||||
oq.statistics.Success()
|
||||
oq.pendingMutex.Lock()
|
||||
for i := range oq.pendingPDUs[:pc] {
|
||||
oq.pendingPDUs[i] = nil
|
||||
_, blacklisted := oq.statistics.Failure()
|
||||
if !blacklisted {
|
||||
// Register the backoff state and exit the goroutine.
|
||||
// It'll get restarted automatically when the backoff
|
||||
// completes.
|
||||
oq.backingOff.Store(true)
|
||||
destinationQueueBackingOff.Inc()
|
||||
return
|
||||
} else {
|
||||
// Immediately trigger the blacklist logic.
|
||||
oq.blacklistDestination()
|
||||
return
|
||||
}
|
||||
for i := range oq.pendingEDUs[:ec] {
|
||||
oq.pendingEDUs[i] = nil
|
||||
}
|
||||
oq.pendingPDUs = oq.pendingPDUs[pc:]
|
||||
oq.pendingEDUs = oq.pendingEDUs[ec:]
|
||||
oq.pendingMutex.Unlock()
|
||||
} else {
|
||||
oq.handleTransactionSuccess(pduCount, eduCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nextTransaction creates a new transaction from the pending event
|
||||
// queue and sends it. Returns true if a transaction was sent or
|
||||
// false otherwise.
|
||||
// queue and sends it.
|
||||
// Returns an error if the transaction wasn't sent.
|
||||
func (oq *destinationQueue) nextTransaction(
|
||||
pdus []*queuedPDU,
|
||||
edus []*queuedEDU,
|
||||
) (bool, int, int, error) {
|
||||
// If there's no projected transaction ID then generate one. If
|
||||
// the transaction succeeds then we'll set it back to "" so that
|
||||
// we generate a new one next time. If it fails, we'll preserve
|
||||
// it so that we retry with the same transaction ID.
|
||||
oq.transactionIDMutex.Lock()
|
||||
if oq.transactionID == "" {
|
||||
now := gomatrixserverlib.AsTimestamp(time.Now())
|
||||
oq.transactionID = gomatrixserverlib.TransactionID(fmt.Sprintf("%d-%d", now, oq.statistics.SuccessCount()))
|
||||
}
|
||||
oq.transactionIDMutex.Unlock()
|
||||
|
||||
) error {
|
||||
// Create the transaction.
|
||||
t := gomatrixserverlib.Transaction{
|
||||
PDUs: []json.RawMessage{},
|
||||
EDUs: []gomatrixserverlib.EDU{},
|
||||
}
|
||||
t.Origin = oq.origin
|
||||
t.Destination = oq.destination
|
||||
t.OriginServerTS = gomatrixserverlib.AsTimestamp(time.Now())
|
||||
t.TransactionID = oq.transactionID
|
||||
|
||||
// If we didn't get anything from the database and there are no
|
||||
// pending EDUs then there's nothing to do - stop here.
|
||||
if len(pdus) == 0 && len(edus) == 0 {
|
||||
return false, 0, 0, nil
|
||||
}
|
||||
|
||||
var pduReceipts []*shared.Receipt
|
||||
var eduReceipts []*shared.Receipt
|
||||
|
||||
// Go through PDUs that we retrieved from the database, if any,
|
||||
// and add them into the transaction.
|
||||
for _, pdu := range pdus {
|
||||
if pdu == nil || pdu.pdu == nil {
|
||||
continue
|
||||
}
|
||||
// Append the JSON of the event, since this is a json.RawMessage type in the
|
||||
// gomatrixserverlib.Transaction struct
|
||||
t.PDUs = append(t.PDUs, pdu.pdu.JSON())
|
||||
pduReceipts = append(pduReceipts, pdu.receipt)
|
||||
}
|
||||
|
||||
// Do the same for pending EDUS in the queue.
|
||||
for _, edu := range edus {
|
||||
if edu == nil || edu.edu == nil {
|
||||
continue
|
||||
}
|
||||
t.EDUs = append(t.EDUs, *edu.edu)
|
||||
eduReceipts = append(eduReceipts, edu.receipt)
|
||||
}
|
||||
|
||||
t, pduReceipts, eduReceipts := oq.createTransaction(pdus, edus)
|
||||
logrus.WithField("server_name", oq.destination).Debugf("Sending transaction %q containing %d PDUs, %d EDUs", t.TransactionID, len(t.PDUs), len(t.EDUs))
|
||||
|
||||
// Try to send the transaction to the destination server.
|
||||
// TODO: we should check for 500-ish fails vs 400-ish here,
|
||||
// since we shouldn't queue things indefinitely in response
|
||||
// to a 400-ish error
|
||||
ctx, cancel := context.WithTimeout(oq.process.Context(), time.Minute*5)
|
||||
defer cancel()
|
||||
_, err := oq.client.SendTransaction(ctx, t)
|
||||
switch err.(type) {
|
||||
switch errResponse := err.(type) {
|
||||
case nil:
|
||||
// Clean up the transaction in the database.
|
||||
if pduReceipts != nil {
|
||||
|
|
@ -439,16 +406,129 @@ func (oq *destinationQueue) nextTransaction(
|
|||
oq.transactionIDMutex.Lock()
|
||||
oq.transactionID = ""
|
||||
oq.transactionIDMutex.Unlock()
|
||||
return true, len(t.PDUs), len(t.EDUs), nil
|
||||
return nil
|
||||
case gomatrix.HTTPError:
|
||||
// Report that we failed to send the transaction and we
|
||||
// will retry again, subject to backoff.
|
||||
return false, 0, 0, err
|
||||
|
||||
// TODO: we should check for 500-ish fails vs 400-ish here,
|
||||
// since we shouldn't queue things indefinitely in response
|
||||
// to a 400-ish error
|
||||
code := errResponse.Code
|
||||
logrus.Debug("Transaction failed with HTTP", code)
|
||||
return err
|
||||
default:
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"destination": oq.destination,
|
||||
logrus.ErrorKey: err,
|
||||
}).Debugf("Failed to send transaction %q", t.TransactionID)
|
||||
return false, 0, 0, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// createTransaction generates a gomatrixserverlib.Transaction from the provided pdus and edus.
|
||||
// It also returns the associated event receipts so they can be cleaned from the database in
|
||||
// the case of a successful transaction.
|
||||
func (oq *destinationQueue) createTransaction(
|
||||
pdus []*queuedPDU,
|
||||
edus []*queuedEDU,
|
||||
) (gomatrixserverlib.Transaction, []*shared.Receipt, []*shared.Receipt) {
|
||||
// If there's no projected transaction ID then generate one. If
|
||||
// the transaction succeeds then we'll set it back to "" so that
|
||||
// we generate a new one next time. If it fails, we'll preserve
|
||||
// it so that we retry with the same transaction ID.
|
||||
oq.transactionIDMutex.Lock()
|
||||
if oq.transactionID == "" {
|
||||
now := gomatrixserverlib.AsTimestamp(time.Now())
|
||||
oq.transactionID = gomatrixserverlib.TransactionID(fmt.Sprintf("%d-%d", now, oq.statistics.SuccessCount()))
|
||||
}
|
||||
oq.transactionIDMutex.Unlock()
|
||||
|
||||
t := gomatrixserverlib.Transaction{
|
||||
PDUs: []json.RawMessage{},
|
||||
EDUs: []gomatrixserverlib.EDU{},
|
||||
}
|
||||
t.Origin = oq.origin
|
||||
t.Destination = oq.destination
|
||||
t.OriginServerTS = gomatrixserverlib.AsTimestamp(time.Now())
|
||||
t.TransactionID = oq.transactionID
|
||||
|
||||
var pduReceipts []*shared.Receipt
|
||||
var eduReceipts []*shared.Receipt
|
||||
|
||||
// Go through PDUs that we retrieved from the database, if any,
|
||||
// and add them into the transaction.
|
||||
for _, pdu := range pdus {
|
||||
// These should never be nil.
|
||||
if pdu == nil || pdu.pdu == nil {
|
||||
continue
|
||||
}
|
||||
// Append the JSON of the event, since this is a json.RawMessage type in the
|
||||
// gomatrixserverlib.Transaction struct
|
||||
t.PDUs = append(t.PDUs, pdu.pdu.JSON())
|
||||
pduReceipts = append(pduReceipts, pdu.receipt)
|
||||
}
|
||||
|
||||
// Do the same for pending EDUS in the queue.
|
||||
for _, edu := range edus {
|
||||
// These should never be nil.
|
||||
if edu == nil || edu.edu == nil {
|
||||
continue
|
||||
}
|
||||
t.EDUs = append(t.EDUs, *edu.edu)
|
||||
eduReceipts = append(eduReceipts, edu.receipt)
|
||||
}
|
||||
|
||||
return t, pduReceipts, eduReceipts
|
||||
}
|
||||
|
||||
// blacklistDestination removes all pending PDUs and EDUs that have been cached
|
||||
// and deletes this queue.
|
||||
func (oq *destinationQueue) blacklistDestination() {
|
||||
// It's been suggested that we should give up because the backoff
|
||||
// has exceeded a maximum allowable value. Clean up the in-memory
|
||||
// buffers at this point. The PDU clean-up is already on a defer.
|
||||
logrus.Warnf("Blacklisting %q due to exceeding backoff threshold", oq.destination)
|
||||
|
||||
oq.pendingMutex.Lock()
|
||||
for i := range oq.pendingPDUs {
|
||||
oq.pendingPDUs[i] = nil
|
||||
}
|
||||
for i := range oq.pendingEDUs {
|
||||
oq.pendingEDUs[i] = nil
|
||||
}
|
||||
oq.pendingPDUs = nil
|
||||
oq.pendingEDUs = nil
|
||||
oq.pendingMutex.Unlock()
|
||||
|
||||
// Delete this queue as no more messages will be sent to this
|
||||
// destination until it is no longer blacklisted.
|
||||
oq.statistics.AssignBackoffNotifier(nil)
|
||||
oq.queues.clearQueue(oq)
|
||||
}
|
||||
|
||||
// handleTransactionSuccess updates the cached event queues as well as the success and
|
||||
// backoff information for this server.
|
||||
func (oq *destinationQueue) handleTransactionSuccess(pduCount int, eduCount int) {
|
||||
// If we successfully sent the transaction then clear out
|
||||
// the pending events and EDUs, and wipe our transaction ID.
|
||||
oq.statistics.Success()
|
||||
oq.pendingMutex.Lock()
|
||||
defer oq.pendingMutex.Unlock()
|
||||
|
||||
for i := range oq.pendingPDUs[:pduCount] {
|
||||
oq.pendingPDUs[i] = nil
|
||||
}
|
||||
for i := range oq.pendingEDUs[:eduCount] {
|
||||
oq.pendingEDUs[i] = nil
|
||||
}
|
||||
oq.pendingPDUs = oq.pendingPDUs[pduCount:]
|
||||
oq.pendingEDUs = oq.pendingEDUs[eduCount:]
|
||||
|
||||
if len(oq.pendingPDUs) > 0 || len(oq.pendingEDUs) > 0 {
|
||||
select {
|
||||
case oq.notify <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/getsentry/sentry-go"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tidwall/gjson"
|
||||
|
||||
|
|
@ -162,23 +163,25 @@ func (oqs *OutgoingQueues) getQueue(destination gomatrixserverlib.ServerName) *d
|
|||
if !ok || oq == nil {
|
||||
destinationQueueTotal.Inc()
|
||||
oq = &destinationQueue{
|
||||
queues: oqs,
|
||||
db: oqs.db,
|
||||
process: oqs.process,
|
||||
rsAPI: oqs.rsAPI,
|
||||
origin: oqs.origin,
|
||||
destination: destination,
|
||||
client: oqs.client,
|
||||
statistics: oqs.statistics.ForServer(destination),
|
||||
notify: make(chan struct{}, 1),
|
||||
interruptBackoff: make(chan bool),
|
||||
signing: oqs.signing,
|
||||
queues: oqs,
|
||||
db: oqs.db,
|
||||
process: oqs.process,
|
||||
rsAPI: oqs.rsAPI,
|
||||
origin: oqs.origin,
|
||||
destination: destination,
|
||||
client: oqs.client,
|
||||
statistics: oqs.statistics.ForServer(destination),
|
||||
notify: make(chan struct{}, 1),
|
||||
signing: oqs.signing,
|
||||
}
|
||||
oq.statistics.AssignBackoffNotifier(oq.handleBackoffNotifier)
|
||||
oqs.queues[destination] = oq
|
||||
}
|
||||
return oq
|
||||
}
|
||||
|
||||
// clearQueue removes the queue for the provided destination from the
|
||||
// set of destination queues.
|
||||
func (oqs *OutgoingQueues) clearQueue(oq *destinationQueue) {
|
||||
oqs.queuesMutex.Lock()
|
||||
defer oqs.queuesMutex.Unlock()
|
||||
|
|
@ -244,12 +247,35 @@ func (oqs *OutgoingQueues) SendEvent(
|
|||
return fmt.Errorf("sendevent: oqs.db.StoreJSON: %w", err)
|
||||
}
|
||||
|
||||
destQueues := make([]*destinationQueue, 0, len(destmap))
|
||||
for destination := range destmap {
|
||||
if queue := oqs.getQueue(destination); queue != nil {
|
||||
queue.sendEvent(ev, nid)
|
||||
destQueues = append(destQueues, queue)
|
||||
} else {
|
||||
delete(destmap, destination)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a database entry that associates the given PDU NID with
|
||||
// this destinations queue. We'll then be able to retrieve the PDU
|
||||
// later.
|
||||
if err := oqs.db.AssociatePDUWithDestinations(
|
||||
oqs.process.Context(),
|
||||
destmap,
|
||||
nid, // NIDs from federationapi_queue_json table
|
||||
); err != nil {
|
||||
logrus.WithError(err).Errorf("failed to associate PDUs %q with destinations", nid)
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE : PDUs should be associated with destinations before sending
|
||||
// them, otherwise this is technically a race.
|
||||
// If the send completes before they are associated then they won't
|
||||
// get properly cleaned up in the database.
|
||||
for _, queue := range destQueues {
|
||||
queue.sendEvent(ev, nid)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -318,12 +344,37 @@ func (oqs *OutgoingQueues) SendEDU(
|
|||
return fmt.Errorf("sendevent: oqs.db.StoreJSON: %w", err)
|
||||
}
|
||||
|
||||
destQueues := make([]*destinationQueue, 0, len(destmap))
|
||||
for destination := range destmap {
|
||||
if queue := oqs.getQueue(destination); queue != nil {
|
||||
queue.sendEDU(e, nid)
|
||||
destQueues = append(destQueues, queue)
|
||||
} else {
|
||||
delete(destmap, destination)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a database entry that associates the given PDU NID with
|
||||
// these destination queues. We'll then be able to retrieve the PDU
|
||||
// later.
|
||||
if err := oqs.db.AssociateEDUWithDestinations(
|
||||
oqs.process.Context(),
|
||||
destmap, // the destination server names
|
||||
nid, // NIDs from federationapi_queue_json table
|
||||
e.Type,
|
||||
nil, // this will use the default expireEDUTypes map
|
||||
); err != nil {
|
||||
logrus.WithError(err).Errorf("failed to associate EDU with destinations")
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE : EDUs should be associated with destinations before sending
|
||||
// them, otherwise this is technically a race.
|
||||
// If the send completes before they are associated then they won't
|
||||
// get properly cleaned up in the database.
|
||||
for _, queue := range destQueues {
|
||||
queue.sendEDU(e, nid)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -332,7 +383,9 @@ func (oqs *OutgoingQueues) RetryServer(srv gomatrixserverlib.ServerName) {
|
|||
if oqs.disabled {
|
||||
return
|
||||
}
|
||||
oqs.statistics.ForServer(srv).RemoveBlacklist()
|
||||
if queue := oqs.getQueue(srv); queue != nil {
|
||||
queue.statistics.ClearBackoff()
|
||||
queue.wakeQueueIfNeeded()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
1060
federationapi/queue/queue_test.go
Normal file
1060
federationapi/queue/queue_test.go
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -2,24 +2,29 @@ package routing
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
type PublicRoomReq struct {
|
||||
Since string `json:"since,omitempty"`
|
||||
Limit int16 `json:"limit,omitempty"`
|
||||
Filter filter `json:"filter,omitempty"`
|
||||
Since string `json:"since,omitempty"`
|
||||
Limit int16 `json:"limit,omitempty"`
|
||||
Filter filter `json:"filter,omitempty"`
|
||||
IncludeAllNetworks bool `json:"include_all_networks,omitempty"`
|
||||
NetworkID string `json:"third_party_instance_id,omitempty"`
|
||||
}
|
||||
|
||||
type filter struct {
|
||||
SearchTerms string `json:"generic_search_term,omitempty"`
|
||||
SearchTerms string `json:"generic_search_term,omitempty"`
|
||||
RoomTypes []string `json:"room_types,omitempty"`
|
||||
}
|
||||
|
||||
// GetPostPublicRooms implements GET and POST /publicRooms
|
||||
|
|
@ -57,8 +62,14 @@ func publicRooms(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if request.IncludeAllNetworks && request.NetworkID != "" {
|
||||
return nil, fmt.Errorf("include_all_networks and third_party_instance_id can not be used together")
|
||||
}
|
||||
|
||||
var queryRes roomserverAPI.QueryPublishedRoomsResponse
|
||||
err = rsAPI.QueryPublishedRooms(ctx, &roomserverAPI.QueryPublishedRoomsRequest{}, &queryRes)
|
||||
err = rsAPI.QueryPublishedRooms(ctx, &roomserverAPI.QueryPublishedRoomsRequest{
|
||||
NetworkID: request.NetworkID,
|
||||
}, &queryRes)
|
||||
if err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("QueryPublishedRooms failed")
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@ func Setup(
|
|||
|
||||
mu := internal.NewMutexByRoom()
|
||||
v1fedmux.Handle("/send/{txnID}", MakeFedAPI(
|
||||
"federation_send", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_send", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
return Send(
|
||||
httpReq, request, gomatrixserverlib.TransactionID(vars["txnID"]),
|
||||
|
|
@ -134,7 +134,7 @@ func Setup(
|
|||
)).Methods(http.MethodPut, http.MethodOptions)
|
||||
|
||||
v1fedmux.Handle("/invite/{roomID}/{eventID}", MakeFedAPI(
|
||||
"federation_invite", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_invite", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -150,7 +150,7 @@ func Setup(
|
|||
)).Methods(http.MethodPut, http.MethodOptions)
|
||||
|
||||
v2fedmux.Handle("/invite/{roomID}/{eventID}", MakeFedAPI(
|
||||
"federation_invite", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_invite", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -172,7 +172,7 @@ func Setup(
|
|||
)).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
v1fedmux.Handle("/exchange_third_party_invite/{roomID}", MakeFedAPI(
|
||||
"exchange_third_party_invite", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"exchange_third_party_invite", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
return ExchangeThirdPartyInvite(
|
||||
httpReq, request, vars["roomID"], rsAPI, cfg, federation,
|
||||
|
|
@ -181,7 +181,7 @@ func Setup(
|
|||
)).Methods(http.MethodPut, http.MethodOptions)
|
||||
|
||||
v1fedmux.Handle("/event/{eventID}", MakeFedAPI(
|
||||
"federation_get_event", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_get_event", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
return GetEvent(
|
||||
httpReq.Context(), request, rsAPI, vars["eventID"], cfg.Matrix.ServerName,
|
||||
|
|
@ -190,7 +190,7 @@ func Setup(
|
|||
)).Methods(http.MethodGet)
|
||||
|
||||
v1fedmux.Handle("/state/{roomID}", MakeFedAPI(
|
||||
"federation_get_state", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_get_state", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -205,7 +205,7 @@ func Setup(
|
|||
)).Methods(http.MethodGet)
|
||||
|
||||
v1fedmux.Handle("/state_ids/{roomID}", MakeFedAPI(
|
||||
"federation_get_state_ids", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_get_state_ids", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -220,7 +220,7 @@ func Setup(
|
|||
)).Methods(http.MethodGet)
|
||||
|
||||
v1fedmux.Handle("/event_auth/{roomID}/{eventID}", MakeFedAPI(
|
||||
"federation_get_event_auth", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_get_event_auth", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -235,7 +235,7 @@ func Setup(
|
|||
)).Methods(http.MethodGet)
|
||||
|
||||
v1fedmux.Handle("/query/directory", MakeFedAPI(
|
||||
"federation_query_room_alias", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_query_room_alias", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
return RoomAliasToID(
|
||||
httpReq, federation, cfg, rsAPI, fsAPI,
|
||||
|
|
@ -244,7 +244,7 @@ func Setup(
|
|||
)).Methods(http.MethodGet)
|
||||
|
||||
v1fedmux.Handle("/query/profile", MakeFedAPI(
|
||||
"federation_query_profile", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_query_profile", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
return GetProfile(
|
||||
httpReq, userAPI, cfg,
|
||||
|
|
@ -253,7 +253,7 @@ func Setup(
|
|||
)).Methods(http.MethodGet)
|
||||
|
||||
v1fedmux.Handle("/user/devices/{userID}", MakeFedAPI(
|
||||
"federation_user_devices", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_user_devices", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
return GetUserDevices(
|
||||
httpReq, keyAPI, vars["userID"],
|
||||
|
|
@ -263,7 +263,7 @@ func Setup(
|
|||
|
||||
if mscCfg.Enabled("msc2444") {
|
||||
v1fedmux.Handle("/peek/{roomID}/{peekID}", MakeFedAPI(
|
||||
"federation_peek", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_peek", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -294,7 +294,7 @@ func Setup(
|
|||
}
|
||||
|
||||
v1fedmux.Handle("/make_join/{roomID}/{userID}", MakeFedAPI(
|
||||
"federation_make_join", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_make_join", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -325,7 +325,7 @@ func Setup(
|
|||
)).Methods(http.MethodGet)
|
||||
|
||||
v1fedmux.Handle("/send_join/{roomID}/{eventID}", MakeFedAPI(
|
||||
"federation_send_join", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_send_join", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -357,7 +357,7 @@ func Setup(
|
|||
)).Methods(http.MethodPut)
|
||||
|
||||
v2fedmux.Handle("/send_join/{roomID}/{eventID}", MakeFedAPI(
|
||||
"federation_send_join", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_send_join", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -374,7 +374,7 @@ func Setup(
|
|||
)).Methods(http.MethodPut)
|
||||
|
||||
v1fedmux.Handle("/make_leave/{roomID}/{eventID}", MakeFedAPI(
|
||||
"federation_make_leave", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_make_leave", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -391,7 +391,7 @@ func Setup(
|
|||
)).Methods(http.MethodGet)
|
||||
|
||||
v1fedmux.Handle("/send_leave/{roomID}/{eventID}", MakeFedAPI(
|
||||
"federation_send_leave", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_send_leave", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -423,7 +423,7 @@ func Setup(
|
|||
)).Methods(http.MethodPut)
|
||||
|
||||
v2fedmux.Handle("/send_leave/{roomID}/{eventID}", MakeFedAPI(
|
||||
"federation_send_leave", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_send_leave", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -447,7 +447,7 @@ func Setup(
|
|||
)).Methods(http.MethodGet)
|
||||
|
||||
v1fedmux.Handle("/get_missing_events/{roomID}", MakeFedAPI(
|
||||
"federation_get_missing_events", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_get_missing_events", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -460,7 +460,7 @@ func Setup(
|
|||
)).Methods(http.MethodPost)
|
||||
|
||||
v1fedmux.Handle("/backfill/{roomID}", MakeFedAPI(
|
||||
"federation_backfill", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_backfill", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
if roomserverAPI.IsServerBannedFromRoom(httpReq.Context(), rsAPI, vars["roomID"], request.Origin()) {
|
||||
return util.JSONResponse{
|
||||
|
|
@ -479,14 +479,14 @@ func Setup(
|
|||
).Methods(http.MethodGet, http.MethodPost)
|
||||
|
||||
v1fedmux.Handle("/user/keys/claim", MakeFedAPI(
|
||||
"federation_keys_claim", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_keys_claim", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
return ClaimOneTimeKeys(httpReq, request, keyAPI, cfg.Matrix.ServerName)
|
||||
},
|
||||
)).Methods(http.MethodPost)
|
||||
|
||||
v1fedmux.Handle("/user/keys/query", MakeFedAPI(
|
||||
"federation_keys_query", cfg.Matrix.ServerName, keys, wakeup,
|
||||
"federation_keys_query", cfg.Matrix.ServerName, cfg.Matrix.IsLocalServerName, keys, wakeup,
|
||||
func(httpReq *http.Request, request *gomatrixserverlib.FederationRequest, vars map[string]string) util.JSONResponse {
|
||||
return QueryDeviceKeys(httpReq, request, keyAPI, cfg.Matrix.ServerName)
|
||||
},
|
||||
|
|
@ -525,15 +525,15 @@ func ErrorIfLocalServerNotInRoom(
|
|||
|
||||
// MakeFedAPI makes an http.Handler that checks matrix federation authentication.
|
||||
func MakeFedAPI(
|
||||
metricsName string,
|
||||
serverName gomatrixserverlib.ServerName,
|
||||
metricsName string, serverName gomatrixserverlib.ServerName,
|
||||
isLocalServerName func(gomatrixserverlib.ServerName) bool,
|
||||
keyRing gomatrixserverlib.JSONVerifier,
|
||||
wakeup *FederationWakeups,
|
||||
f func(*http.Request, *gomatrixserverlib.FederationRequest, map[string]string) util.JSONResponse,
|
||||
) http.Handler {
|
||||
h := func(req *http.Request) util.JSONResponse {
|
||||
fedReq, errResp := gomatrixserverlib.VerifyHTTPRequest(
|
||||
req, time.Now(), serverName, keyRing,
|
||||
req, time.Now(), serverName, isLocalServerName, keyRing,
|
||||
)
|
||||
if fedReq == nil {
|
||||
return errResp
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package statistics
|
|||
|
||||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -20,12 +21,23 @@ type Statistics struct {
|
|||
servers map[gomatrixserverlib.ServerName]*ServerStatistics
|
||||
mutex sync.RWMutex
|
||||
|
||||
backoffTimers map[gomatrixserverlib.ServerName]*time.Timer
|
||||
backoffMutex sync.RWMutex
|
||||
|
||||
// How many times should we tolerate consecutive failures before we
|
||||
// just blacklist the host altogether? The backoff is exponential,
|
||||
// so the max time here to attempt is 2**failures seconds.
|
||||
FailuresUntilBlacklist uint32
|
||||
}
|
||||
|
||||
func NewStatistics(db storage.Database, failuresUntilBlacklist uint32) Statistics {
|
||||
return Statistics{
|
||||
DB: db,
|
||||
FailuresUntilBlacklist: failuresUntilBlacklist,
|
||||
backoffTimers: make(map[gomatrixserverlib.ServerName]*time.Timer),
|
||||
}
|
||||
}
|
||||
|
||||
// ForServer returns server statistics for the given server name. If it
|
||||
// does not exist, it will create empty statistics and return those.
|
||||
func (s *Statistics) ForServer(serverName gomatrixserverlib.ServerName) *ServerStatistics {
|
||||
|
|
@ -45,7 +57,6 @@ func (s *Statistics) ForServer(serverName gomatrixserverlib.ServerName) *ServerS
|
|||
server = &ServerStatistics{
|
||||
statistics: s,
|
||||
serverName: serverName,
|
||||
interrupt: make(chan struct{}),
|
||||
}
|
||||
s.servers[serverName] = server
|
||||
s.mutex.Unlock()
|
||||
|
|
@ -64,29 +75,43 @@ func (s *Statistics) ForServer(serverName gomatrixserverlib.ServerName) *ServerS
|
|||
// many times we failed etc. It also manages the backoff time and black-
|
||||
// listing a remote host if it remains uncooperative.
|
||||
type ServerStatistics struct {
|
||||
statistics *Statistics //
|
||||
serverName gomatrixserverlib.ServerName //
|
||||
blacklisted atomic.Bool // is the node blacklisted
|
||||
backoffStarted atomic.Bool // is the backoff started
|
||||
backoffUntil atomic.Value // time.Time until this backoff interval ends
|
||||
backoffCount atomic.Uint32 // number of times BackoffDuration has been called
|
||||
interrupt chan struct{} // interrupts the backoff goroutine
|
||||
successCounter atomic.Uint32 // how many times have we succeeded?
|
||||
statistics *Statistics //
|
||||
serverName gomatrixserverlib.ServerName //
|
||||
blacklisted atomic.Bool // is the node blacklisted
|
||||
backoffStarted atomic.Bool // is the backoff started
|
||||
backoffUntil atomic.Value // time.Time until this backoff interval ends
|
||||
backoffCount atomic.Uint32 // number of times BackoffDuration has been called
|
||||
successCounter atomic.Uint32 // how many times have we succeeded?
|
||||
backoffNotifier func() // notifies destination queue when backoff completes
|
||||
notifierMutex sync.Mutex
|
||||
}
|
||||
|
||||
const maxJitterMultiplier = 1.4
|
||||
const minJitterMultiplier = 0.8
|
||||
|
||||
// duration returns how long the next backoff interval should be.
|
||||
func (s *ServerStatistics) duration(count uint32) time.Duration {
|
||||
return time.Second * time.Duration(math.Exp2(float64(count)))
|
||||
// Add some jitter to minimise the chance of having multiple backoffs
|
||||
// ending at the same time.
|
||||
jitter := rand.Float64()*(maxJitterMultiplier-minJitterMultiplier) + minJitterMultiplier
|
||||
duration := time.Millisecond * time.Duration(math.Exp2(float64(count))*jitter*1000)
|
||||
return duration
|
||||
}
|
||||
|
||||
// cancel will interrupt the currently active backoff.
|
||||
func (s *ServerStatistics) cancel() {
|
||||
s.blacklisted.Store(false)
|
||||
s.backoffUntil.Store(time.Time{})
|
||||
select {
|
||||
case s.interrupt <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
s.ClearBackoff()
|
||||
}
|
||||
|
||||
// AssignBackoffNotifier configures the channel to send to when
|
||||
// a backoff completes.
|
||||
func (s *ServerStatistics) AssignBackoffNotifier(notifier func()) {
|
||||
s.notifierMutex.Lock()
|
||||
defer s.notifierMutex.Unlock()
|
||||
s.backoffNotifier = notifier
|
||||
}
|
||||
|
||||
// Success updates the server statistics with a new successful
|
||||
|
|
@ -95,8 +120,8 @@ func (s *ServerStatistics) cancel() {
|
|||
// we will unblacklist it.
|
||||
func (s *ServerStatistics) Success() {
|
||||
s.cancel()
|
||||
s.successCounter.Inc()
|
||||
s.backoffCount.Store(0)
|
||||
s.successCounter.Inc()
|
||||
if s.statistics.DB != nil {
|
||||
if err := s.statistics.DB.RemoveServerFromBlacklist(s.serverName); err != nil {
|
||||
logrus.WithError(err).Errorf("Failed to remove %q from blacklist", s.serverName)
|
||||
|
|
@ -105,13 +130,17 @@ func (s *ServerStatistics) Success() {
|
|||
}
|
||||
|
||||
// Failure marks a failure and starts backing off if needed.
|
||||
// The next call to BackoffIfRequired will do the right thing
|
||||
// after this. It will return the time that the current failure
|
||||
// It will return the time that the current failure
|
||||
// will result in backoff waiting until, and a bool signalling
|
||||
// whether we have blacklisted and therefore to give up.
|
||||
func (s *ServerStatistics) Failure() (time.Time, bool) {
|
||||
// Return immediately if we have blacklisted this node.
|
||||
if s.blacklisted.Load() {
|
||||
return time.Time{}, true
|
||||
}
|
||||
|
||||
// If we aren't already backing off, this call will start
|
||||
// a new backoff period. Increase the failure counter and
|
||||
// a new backoff period, increase the failure counter and
|
||||
// start a goroutine which will wait out the backoff and
|
||||
// unset the backoffStarted flag when done.
|
||||
if s.backoffStarted.CompareAndSwap(false, true) {
|
||||
|
|
@ -122,40 +151,48 @@ func (s *ServerStatistics) Failure() (time.Time, bool) {
|
|||
logrus.WithError(err).Errorf("Failed to add %q to blacklist", s.serverName)
|
||||
}
|
||||
}
|
||||
s.ClearBackoff()
|
||||
return time.Time{}, true
|
||||
}
|
||||
|
||||
go func() {
|
||||
until, ok := s.backoffUntil.Load().(time.Time)
|
||||
if ok && !until.IsZero() {
|
||||
select {
|
||||
case <-time.After(time.Until(until)):
|
||||
case <-s.interrupt:
|
||||
}
|
||||
s.backoffStarted.Store(false)
|
||||
}
|
||||
}()
|
||||
// We're starting a new back off so work out what the next interval
|
||||
// will be.
|
||||
count := s.backoffCount.Load()
|
||||
until := time.Now().Add(s.duration(count))
|
||||
s.backoffUntil.Store(until)
|
||||
|
||||
s.statistics.backoffMutex.Lock()
|
||||
defer s.statistics.backoffMutex.Unlock()
|
||||
s.statistics.backoffTimers[s.serverName] = time.AfterFunc(time.Until(until), s.backoffFinished)
|
||||
}
|
||||
|
||||
// Check if we have blacklisted this node.
|
||||
if s.blacklisted.Load() {
|
||||
return time.Now(), true
|
||||
}
|
||||
return s.backoffUntil.Load().(time.Time), false
|
||||
}
|
||||
|
||||
// If we're already backing off and we haven't yet surpassed
|
||||
// the deadline then return that. Repeated calls to Failure
|
||||
// within a single backoff interval will have no side effects.
|
||||
if until, ok := s.backoffUntil.Load().(time.Time); ok && !time.Now().After(until) {
|
||||
return until, false
|
||||
// ClearBackoff stops the backoff timer for this destination if it is running
|
||||
// and removes the timer from the backoffTimers map.
|
||||
func (s *ServerStatistics) ClearBackoff() {
|
||||
// If the timer is still running then stop it so it's memory is cleaned up sooner.
|
||||
s.statistics.backoffMutex.Lock()
|
||||
defer s.statistics.backoffMutex.Unlock()
|
||||
if timer, ok := s.statistics.backoffTimers[s.serverName]; ok {
|
||||
timer.Stop()
|
||||
}
|
||||
delete(s.statistics.backoffTimers, s.serverName)
|
||||
|
||||
// We're either backing off and have passed the deadline, or
|
||||
// we aren't backing off, so work out what the next interval
|
||||
// will be.
|
||||
count := s.backoffCount.Load()
|
||||
until := time.Now().Add(s.duration(count))
|
||||
s.backoffUntil.Store(until)
|
||||
return until, false
|
||||
s.backoffStarted.Store(false)
|
||||
}
|
||||
|
||||
// backoffFinished will clear the previous backoff and notify the destination queue.
|
||||
func (s *ServerStatistics) backoffFinished() {
|
||||
s.ClearBackoff()
|
||||
|
||||
// Notify the destinationQueue if one is currently running.
|
||||
s.notifierMutex.Lock()
|
||||
defer s.notifierMutex.Unlock()
|
||||
if s.backoffNotifier != nil {
|
||||
s.backoffNotifier()
|
||||
}
|
||||
}
|
||||
|
||||
// BackoffInfo returns information about the current or previous backoff.
|
||||
|
|
@ -174,6 +211,12 @@ func (s *ServerStatistics) Blacklisted() bool {
|
|||
return s.blacklisted.Load()
|
||||
}
|
||||
|
||||
// RemoveBlacklist removes the blacklisted status from the server.
|
||||
func (s *ServerStatistics) RemoveBlacklist() {
|
||||
s.cancel()
|
||||
s.backoffCount.Store(0)
|
||||
}
|
||||
|
||||
// SuccessCount returns the number of successful requests. This is
|
||||
// usually useful in constructing transaction IDs.
|
||||
func (s *ServerStatistics) SuccessCount() uint32 {
|
||||
|
|
|
|||
|
|
@ -7,9 +7,7 @@ import (
|
|||
)
|
||||
|
||||
func TestBackoff(t *testing.T) {
|
||||
stats := Statistics{
|
||||
FailuresUntilBlacklist: 7,
|
||||
}
|
||||
stats := NewStatistics(nil, 7)
|
||||
server := ServerStatistics{
|
||||
statistics: &stats,
|
||||
serverName: "test.com",
|
||||
|
|
@ -36,7 +34,7 @@ func TestBackoff(t *testing.T) {
|
|||
|
||||
// Get the duration.
|
||||
_, blacklist := server.BackoffInfo()
|
||||
duration := time.Until(until).Round(time.Second)
|
||||
duration := time.Until(until)
|
||||
|
||||
// Unset the backoff, or otherwise our next call will think that
|
||||
// there's a backoff in progress and return the same result.
|
||||
|
|
@ -57,8 +55,17 @@ func TestBackoff(t *testing.T) {
|
|||
|
||||
// Check if the duration is what we expect.
|
||||
t.Logf("Backoff %d is for %s", i, duration)
|
||||
if wanted := time.Second * time.Duration(math.Exp2(float64(i))); !blacklist && duration != wanted {
|
||||
t.Fatalf("Backoff %d should have been %s but was %s", i, wanted, duration)
|
||||
roundingAllowance := 0.01
|
||||
minDuration := time.Millisecond * time.Duration(math.Exp2(float64(i))*minJitterMultiplier*1000-roundingAllowance)
|
||||
maxDuration := time.Millisecond * time.Duration(math.Exp2(float64(i))*maxJitterMultiplier*1000+roundingAllowance)
|
||||
var inJitterRange bool
|
||||
if duration >= minDuration && duration <= maxDuration {
|
||||
inJitterRange = true
|
||||
} else {
|
||||
inJitterRange = false
|
||||
}
|
||||
if !blacklist && !inJitterRange {
|
||||
t.Fatalf("Backoff %d should have been between %s and %s but was %s", i, minDuration, maxDuration, duration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,9 +18,10 @@ import (
|
|||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
"github.com/matrix-org/dendrite/federationapi/storage/shared"
|
||||
"github.com/matrix-org/dendrite/federationapi/types"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
type Database interface {
|
||||
|
|
@ -38,8 +39,8 @@ type Database interface {
|
|||
GetPendingPDUs(ctx context.Context, serverName gomatrixserverlib.ServerName, limit int) (pdus map[*shared.Receipt]*gomatrixserverlib.HeaderedEvent, err error)
|
||||
GetPendingEDUs(ctx context.Context, serverName gomatrixserverlib.ServerName, limit int) (edus map[*shared.Receipt]*gomatrixserverlib.EDU, err error)
|
||||
|
||||
AssociatePDUWithDestination(ctx context.Context, transactionID gomatrixserverlib.TransactionID, serverName gomatrixserverlib.ServerName, receipt *shared.Receipt) error
|
||||
AssociateEDUWithDestination(ctx context.Context, serverName gomatrixserverlib.ServerName, receipt *shared.Receipt, eduType string, expireEDUTypes map[string]time.Duration) error
|
||||
AssociatePDUWithDestinations(ctx context.Context, destinations map[gomatrixserverlib.ServerName]struct{}, receipt *shared.Receipt) error
|
||||
AssociateEDUWithDestinations(ctx context.Context, destinations map[gomatrixserverlib.ServerName]struct{}, receipt *shared.Receipt, eduType string, expireEDUTypes map[string]time.Duration) error
|
||||
|
||||
CleanPDUs(ctx context.Context, serverName gomatrixserverlib.ServerName, receipts []*shared.Receipt) error
|
||||
CleanEDUs(ctx context.Context, serverName gomatrixserverlib.ServerName, receipts []*shared.Receipt) error
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ type Database struct {
|
|||
}
|
||||
|
||||
// NewDatabase opens a new database
|
||||
func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions, cache caching.FederationCache, serverName gomatrixserverlib.ServerName) (*Database, error) {
|
||||
func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions, cache caching.FederationCache, isLocalServerName func(gomatrixserverlib.ServerName) bool) (*Database, error) {
|
||||
var d Database
|
||||
var err error
|
||||
if d.db, d.writer, err = base.DatabaseConnection(dbProperties, sqlutil.NewDummyWriter()); err != nil {
|
||||
|
|
@ -96,7 +96,7 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions,
|
|||
}
|
||||
d.Database = shared.Database{
|
||||
DB: d.db,
|
||||
ServerName: serverName,
|
||||
IsLocalServerName: isLocalServerName,
|
||||
Cache: cache,
|
||||
Writer: d.writer,
|
||||
FederationJoinedHosts: joinedHosts,
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import (
|
|||
|
||||
type Database struct {
|
||||
DB *sql.DB
|
||||
ServerName gomatrixserverlib.ServerName
|
||||
IsLocalServerName func(gomatrixserverlib.ServerName) bool
|
||||
Cache caching.FederationCache
|
||||
Writer sqlutil.Writer
|
||||
FederationQueuePDUs tables.FederationQueuePDUs
|
||||
|
|
@ -52,6 +52,10 @@ type Receipt struct {
|
|||
nid int64
|
||||
}
|
||||
|
||||
func NewReceipt(nid int64) Receipt {
|
||||
return Receipt{nid: nid}
|
||||
}
|
||||
|
||||
func (r *Receipt) String() string {
|
||||
return fmt.Sprintf("%d", r.nid)
|
||||
}
|
||||
|
|
@ -120,7 +124,7 @@ func (d *Database) GetJoinedHostsForRooms(ctx context.Context, roomIDs []string,
|
|||
}
|
||||
if excludeSelf {
|
||||
for i, server := range servers {
|
||||
if server == d.ServerName {
|
||||
if d.IsLocalServerName(server) {
|
||||
servers = append(servers[:i], servers[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,9 +38,9 @@ var defaultExpireEDUTypes = map[string]time.Duration{
|
|||
// AssociateEDUWithDestination creates an association that the
|
||||
// destination queues will use to determine which JSON blobs to send
|
||||
// to which servers.
|
||||
func (d *Database) AssociateEDUWithDestination(
|
||||
func (d *Database) AssociateEDUWithDestinations(
|
||||
ctx context.Context,
|
||||
serverName gomatrixserverlib.ServerName,
|
||||
destinations map[gomatrixserverlib.ServerName]struct{},
|
||||
receipt *Receipt,
|
||||
eduType string,
|
||||
expireEDUTypes map[string]time.Duration,
|
||||
|
|
@ -59,17 +59,18 @@ func (d *Database) AssociateEDUWithDestination(
|
|||
expiresAt = 0
|
||||
}
|
||||
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||
if err := d.FederationQueueEDUs.InsertQueueEDU(
|
||||
ctx, // context
|
||||
txn, // SQL transaction
|
||||
eduType, // EDU type for coalescing
|
||||
serverName, // destination server name
|
||||
receipt.nid, // NID from the federationapi_queue_json table
|
||||
expiresAt, // The timestamp this EDU will expire
|
||||
); err != nil {
|
||||
return fmt.Errorf("InsertQueueEDU: %w", err)
|
||||
var err error
|
||||
for destination := range destinations {
|
||||
err = d.FederationQueueEDUs.InsertQueueEDU(
|
||||
ctx, // context
|
||||
txn, // SQL transaction
|
||||
eduType, // EDU type for coalescing
|
||||
destination, // destination server name
|
||||
receipt.nid, // NID from the federationapi_queue_json table
|
||||
expiresAt, // The timestamp this EDU will expire
|
||||
)
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,23 +27,23 @@ import (
|
|||
// AssociatePDUWithDestination creates an association that the
|
||||
// destination queues will use to determine which JSON blobs to send
|
||||
// to which servers.
|
||||
func (d *Database) AssociatePDUWithDestination(
|
||||
func (d *Database) AssociatePDUWithDestinations(
|
||||
ctx context.Context,
|
||||
transactionID gomatrixserverlib.TransactionID,
|
||||
serverName gomatrixserverlib.ServerName,
|
||||
destinations map[gomatrixserverlib.ServerName]struct{},
|
||||
receipt *Receipt,
|
||||
) error {
|
||||
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||
if err := d.FederationQueuePDUs.InsertQueuePDU(
|
||||
ctx, // context
|
||||
txn, // SQL transaction
|
||||
transactionID, // transaction ID
|
||||
serverName, // destination server name
|
||||
receipt.nid, // NID from the federationapi_queue_json table
|
||||
); err != nil {
|
||||
return fmt.Errorf("InsertQueuePDU: %w", err)
|
||||
var err error
|
||||
for destination := range destinations {
|
||||
err = d.FederationQueuePDUs.InsertQueuePDU(
|
||||
ctx, // context
|
||||
txn, // SQL transaction
|
||||
"", // transaction ID
|
||||
destination, // destination server name
|
||||
receipt.nid, // NID from the federationapi_queue_json table
|
||||
)
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ type Database struct {
|
|||
}
|
||||
|
||||
// NewDatabase opens a new database
|
||||
func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions, cache caching.FederationCache, serverName gomatrixserverlib.ServerName) (*Database, error) {
|
||||
func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions, cache caching.FederationCache, isLocalServerName func(gomatrixserverlib.ServerName) bool) (*Database, error) {
|
||||
var d Database
|
||||
var err error
|
||||
if d.db, d.writer, err = base.DatabaseConnection(dbProperties, sqlutil.NewExclusiveWriter()); err != nil {
|
||||
|
|
@ -95,7 +95,7 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions,
|
|||
}
|
||||
d.Database = shared.Database{
|
||||
DB: d.db,
|
||||
ServerName: serverName,
|
||||
IsLocalServerName: isLocalServerName,
|
||||
Cache: cache,
|
||||
Writer: d.writer,
|
||||
FederationJoinedHosts: joinedHosts,
|
||||
|
|
|
|||
|
|
@ -29,12 +29,12 @@ import (
|
|||
)
|
||||
|
||||
// NewDatabase opens a new database
|
||||
func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions, cache caching.FederationCache, serverName gomatrixserverlib.ServerName) (Database, error) {
|
||||
func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions, cache caching.FederationCache, isLocalServerName func(gomatrixserverlib.ServerName) bool) (Database, error) {
|
||||
switch {
|
||||
case dbProperties.ConnectionString.IsSQLite():
|
||||
return sqlite3.NewDatabase(base, dbProperties, cache, serverName)
|
||||
return sqlite3.NewDatabase(base, dbProperties, cache, isLocalServerName)
|
||||
case dbProperties.ConnectionString.IsPostgres():
|
||||
return postgres.NewDatabase(base, dbProperties, cache, serverName)
|
||||
return postgres.NewDatabase(base, dbProperties, cache, isLocalServerName)
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected database type")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ func mustCreateFederationDatabase(t *testing.T, dbType test.DBType) (storage.Dat
|
|||
connStr, dbClose := test.PrepareDBConnectionString(t, dbType)
|
||||
db, err := storage.NewDatabase(b, &config.DatabaseOptions{
|
||||
ConnectionString: config.DataSource(connStr),
|
||||
}, b.Caches, b.Cfg.Global.ServerName)
|
||||
}, b.Caches, func(server gomatrixserverlib.ServerName) bool { return server == "localhost" })
|
||||
if err != nil {
|
||||
t.Fatalf("NewDatabase returned %s", err)
|
||||
}
|
||||
|
|
@ -35,6 +35,7 @@ func TestExpireEDUs(t *testing.T) {
|
|||
}
|
||||
|
||||
ctx := context.Background()
|
||||
destinations := map[gomatrixserverlib.ServerName]struct{}{"localhost": {}}
|
||||
test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) {
|
||||
db, close := mustCreateFederationDatabase(t, dbType)
|
||||
defer close()
|
||||
|
|
@ -43,7 +44,7 @@ func TestExpireEDUs(t *testing.T) {
|
|||
receipt, err := db.StoreJSON(ctx, "{}")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.AssociateEDUWithDestination(ctx, "localhost", receipt, gomatrixserverlib.MReceipt, expireEDUTypes)
|
||||
err = db.AssociateEDUWithDestinations(ctx, destinations, receipt, gomatrixserverlib.MReceipt, expireEDUTypes)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
// add data without expiry
|
||||
|
|
@ -51,7 +52,7 @@ func TestExpireEDUs(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
|
||||
// m.read_marker gets the default expiry of 24h, so won't be deleted further down in this test
|
||||
err = db.AssociateEDUWithDestination(ctx, "localhost", receipt, "m.read_marker", expireEDUTypes)
|
||||
err = db.AssociateEDUWithDestinations(ctx, destinations, receipt, "m.read_marker", expireEDUTypes)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Delete expired EDUs
|
||||
|
|
@ -67,7 +68,7 @@ func TestExpireEDUs(t *testing.T) {
|
|||
receipt, err = db.StoreJSON(ctx, "{}")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.AssociateEDUWithDestination(ctx, "localhost", receipt, gomatrixserverlib.MDirectToDevice, expireEDUTypes)
|
||||
err = db.AssociateEDUWithDestinations(ctx, destinations, receipt, gomatrixserverlib.MDirectToDevice, expireEDUTypes)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.DeleteExpiredEDUs(ctx)
|
||||
|
|
|
|||
28
go.mod
28
go.mod
|
|
@ -1,10 +1,8 @@
|
|||
module github.com/matrix-org/dendrite
|
||||
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/Arceliar/ironwood v0.0.0-20220924160422-ed4b6d4750b6
|
||||
github.com/Arceliar/phony v0.0.0-20220903101357-530938a4b13d
|
||||
github.com/Arceliar/ironwood v0.0.0-20221025225125-45b4281814c2
|
||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0
|
||||
github.com/MFAshby/stdemuxerhook v1.0.0
|
||||
github.com/Masterminds/semver/v3 v3.1.1
|
||||
|
|
@ -15,7 +13,7 @@ require (
|
|||
github.com/docker/docker v20.10.19+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/ethereum/go-ethereum v1.10.25
|
||||
github.com/getsentry/sentry-go v0.13.0
|
||||
github.com/getsentry/sentry-go v0.14.0
|
||||
github.com/gologme/log v1.3.0
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/google/uuid v1.3.0
|
||||
|
|
@ -27,8 +25,8 @@ require (
|
|||
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e
|
||||
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91
|
||||
github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20221014061925-a132619fa241
|
||||
github.com/matrix-org/pinecone v0.0.0-20221007145426-3adc85477dd3
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20221031151122-0885c35ebe74
|
||||
github.com/matrix-org/pinecone v0.0.0-20221026160848-639feeff74d6
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
|
||||
github.com/mattn/go-sqlite3 v1.14.15
|
||||
github.com/nats-io/nats-server/v2 v2.9.3
|
||||
|
|
@ -47,7 +45,7 @@ require (
|
|||
github.com/tidwall/sjson v1.2.5
|
||||
github.com/uber/jaeger-client-go v2.30.0+incompatible
|
||||
github.com/uber/jaeger-lib v2.4.1+incompatible
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.4.5-0.20220901155642-4f2abece817c
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.4.6
|
||||
go.uber.org/atomic v1.10.0
|
||||
golang.org/x/crypto v0.0.0-20221012134737-56aed061732a
|
||||
golang.org/x/image v0.0.0-20220902085622-e7cb96979f69
|
||||
|
|
@ -56,6 +54,7 @@ require (
|
|||
golang.org/x/term v0.0.0-20220919170432-7a66f970e087
|
||||
gopkg.in/h2non/bimg.v1 v1.1.9
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gotest.tools/v3 v3.4.0
|
||||
nhooyr.io/websocket v1.8.7
|
||||
)
|
||||
|
||||
|
|
@ -64,10 +63,8 @@ require (
|
|||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/RoaringBitmap/roaring v1.2.1 // indirect
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
|
||||
github.com/anacrolix/envpprof v1.2.1 // indirect
|
||||
github.com/anacrolix/missinggo v1.3.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.2.0 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.3.3 // indirect
|
||||
github.com/blevesearch/bleve_index_api v1.0.3 // indirect
|
||||
github.com/blevesearch/geo v0.1.15 // indirect
|
||||
github.com/blevesearch/go-porterstemmer v1.0.3 // indirect
|
||||
|
|
@ -83,7 +80,7 @@ require (
|
|||
github.com/blevesearch/zapx/v13 v13.3.5 // indirect
|
||||
github.com/blevesearch/zapx/v14 v14.3.5 // indirect
|
||||
github.com/blevesearch/zapx/v15 v15.3.5 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dchest/uniuri v0.0.0-20200228104902-7aecb25e1fe5 // indirect
|
||||
|
|
@ -96,9 +93,9 @@ require (
|
|||
github.com/go-ole/go-ole v1.2.1 // indirect
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||
github.com/gogo/protobuf v1.2.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
github.com/golang/mock v1.6.0 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
|
|
@ -148,5 +145,6 @@ require (
|
|||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
gotest.tools/v3 v3.4.0 // indirect
|
||||
)
|
||||
|
||||
go 1.19
|
||||
|
|
|
|||
124
go.sum
124
go.sum
|
|
@ -32,19 +32,16 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
|||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797/go.mod h1:sXBiorCo8c46JlQV3oXPKINnZ8mcqnye1EkVkqsectk=
|
||||
crawshaw.io/sqlite v0.3.2/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4=
|
||||
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
|
||||
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/Arceliar/ironwood v0.0.0-20220924160422-ed4b6d4750b6 h1:iwL6nm2ibyuHXYimRNtFof7RJfe8JB+6CPDskV7K7gA=
|
||||
github.com/Arceliar/ironwood v0.0.0-20220924160422-ed4b6d4750b6/go.mod h1:RP72rucOFm5udrnEzTmIWLRVGQiV/fSUAQXJ0RST/nk=
|
||||
github.com/Arceliar/ironwood v0.0.0-20221025225125-45b4281814c2 h1:Usab30pNT2i/vZvpXcN9uOr5IO1RZPcUqoGH0DIAPnU=
|
||||
github.com/Arceliar/ironwood v0.0.0-20221025225125-45b4281814c2/go.mod h1:RP72rucOFm5udrnEzTmIWLRVGQiV/fSUAQXJ0RST/nk=
|
||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979 h1:WndgpSW13S32VLQ3ugUxx2EnnWmgba1kCqPkd4Gk1yQ=
|
||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979/go.mod h1:6Lkn+/zJilRMsKmbmG1RPoamiArC6HS73xbwRyp3UyI=
|
||||
github.com/Arceliar/phony v0.0.0-20220903101357-530938a4b13d h1:UK9fsWbWqwIQkMCz1CP+v5pGbsGoWAw6g4AyvMpm1EM=
|
||||
github.com/Arceliar/phony v0.0.0-20220903101357-530938a4b13d/go.mod h1:BCnxhRf47C/dy/e/D2pmB8NkB3dQVIrkD98b220rx5Q=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
|
|
@ -60,15 +57,11 @@ github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0
|
|||
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
|
||||
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
|
||||
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
|
||||
github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI=
|
||||
github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
|
||||
github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA=
|
||||
github.com/RoaringBitmap/roaring v1.2.1 h1:58/LJlg/81wfEHd5L9qsHduznOIhyv4qb1yWcSvVq9A=
|
||||
github.com/RoaringBitmap/roaring v1.2.1/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA=
|
||||
github.com/RyanCarrier/dijkstra v1.1.0/go.mod h1:5agGUBNEtUAGIANmbw09fuO3a2htPEkc1jNH01qxCWA=
|
||||
github.com/RyanCarrier/dijkstra-1 v0.0.0-20170512020943-0e5801a26345/go.mod h1:OK4EvWJ441LQqGzed5NGB6vKBAE34n3z7iayPcEwr30=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
|
||||
|
|
@ -81,36 +74,24 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
|
|||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
|
||||
github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
|
||||
github.com/anacrolix/envpprof v1.1.0/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4=
|
||||
github.com/anacrolix/envpprof v1.1.1 h1:sHQCyj7HtiSfaZAzL2rJrQdyS7odLqlwO6nhk/tG/j8=
|
||||
github.com/anacrolix/envpprof v1.1.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4=
|
||||
github.com/anacrolix/envpprof v1.2.1 h1:25TJe6t/i0AfzzldiGFKCpD+s+dk8lONBcacJZB2rdE=
|
||||
github.com/anacrolix/envpprof v1.2.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4=
|
||||
github.com/anacrolix/log v0.3.0 h1:Btxh7GkT4JYWvWJ1uKOwgobf+7q/1eFQaDdCUXCtssw=
|
||||
github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU=
|
||||
github.com/anacrolix/log v0.6.0 h1:5y+wtTWoecbrAWWuoBCH7UuGFiD6q2jnQxrLK01RC+Q=
|
||||
github.com/anacrolix/log v0.6.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU=
|
||||
github.com/anacrolix/missinggo v1.1.0/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
|
||||
github.com/anacrolix/missinggo v1.1.2-0.20190815015349-b888af804467/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
|
||||
github.com/anacrolix/missinggo v1.2.1 h1:0IE3TqX5y5D0IxeMwTyIgqdDew4QrzcXaaEnJQyjHvw=
|
||||
github.com/anacrolix/missinggo v1.2.1/go.mod h1:J5cMhif8jPmFoC3+Uvob3OXXNIhOUikzMt+uUjeM21Y=
|
||||
github.com/anacrolix/missinggo v1.3.0 h1:06HlMsudotL7BAELRZs0yDZ4yVXsHXGi323QBjAVASw=
|
||||
github.com/anacrolix/missinggo v1.3.0/go.mod h1:bqHm8cE8xr+15uVfMG3BFui/TxyB6//H5fwlq/TeqMc=
|
||||
github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5urunoLS0Cbvb4V0uMQ=
|
||||
github.com/anacrolix/missinggo/v2 v2.2.0/go.mod h1:o0jgJoYOyaoYQ4E2ZMISVa9c88BbUBVQQW4QeRkNCGY=
|
||||
github.com/anacrolix/missinggo/v2 v2.5.1 h1:aCQcBYPdUaABfXolqKyHMIh7K/xuBUnunxCfS4CeDzE=
|
||||
github.com/anacrolix/missinggo/v2 v2.5.1/go.mod h1:WEjqh2rmKECd0t1VhQkLGTdIWXO6f6NLjp5GlMZ+6FA=
|
||||
github.com/anacrolix/stm v0.2.0/go.mod h1:zoVQRvSiGjGoTmbM0vSLIiaKjWtNPeTvXUSdJQA4hsg=
|
||||
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
|
||||
github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
|
||||
github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA=
|
||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/bits-and-blooms/bitset v1.3.3 h1:R1XWiopGiXf66xygsiLpzLo67xEYvMkHw3w+rCOSAwg=
|
||||
github.com/bits-and-blooms/bitset v1.3.3/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/blevesearch/bleve/v2 v2.3.4 h1:SSb7/cwGzo85LWX1jchIsXM8ZiNNMX3shT5lROM63ew=
|
||||
github.com/blevesearch/bleve/v2 v2.3.4/go.mod h1:Ot0zYum8XQRfPcwhae8bZmNyYubynsoMjVvl1jPqL30=
|
||||
github.com/blevesearch/bleve_index_api v1.0.3 h1:DDSWaPXOZZJ2BB73ZTWjKxydAugjwywcqU+91AAqcAg=
|
||||
|
|
@ -154,8 +135,8 @@ github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2w
|
|||
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8=
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
|
|
@ -210,9 +191,6 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3
|
|||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
|
|
@ -232,22 +210,18 @@ github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmV
|
|||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/getsentry/sentry-go v0.13.0 h1:20dgTiUSfxRB/EhMPtxcL9ZEbM1ZdR+W/7f7NWD+xWo=
|
||||
github.com/getsentry/sentry-go v0.13.0/go.mod h1:EOsfu5ZdvKPfeHYV6pTVQnsjfp30+XA7//UooKNumH0=
|
||||
github.com/getsentry/sentry-go v0.14.0 h1:rlOBkuFZRKKdUnKO+0U3JclRDQKlRu5vVQtkWSQvC70=
|
||||
github.com/getsentry/sentry-go v0.14.0/go.mod h1:RZPJKSw+adu8PBNygiri/A98FqVr2HtRckJk9XVxJ9I=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
|
||||
github.com/gin-gonic/gin v1.7.7 h1:3DoBmSbJbZAWqXJC3SLjAPfutPJJRN1U5pALB7EeTTs=
|
||||
github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
|
|
@ -262,12 +236,12 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE
|
|||
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
|
||||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||
github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
|
||||
github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU=
|
||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||
github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
|
||||
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
|
||||
github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE=
|
||||
github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
||||
|
|
@ -278,16 +252,18 @@ github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8=
|
|||
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=
|
||||
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
|
||||
github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
||||
github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog=
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo=
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
|
@ -360,10 +336,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
|||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
|
|
@ -382,10 +354,8 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T
|
|||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huandu/xstrings v1.0.0 h1:pO2K/gKgKaat5LdpAhxhluX2GPQMaI3W5FUz/I/UnWk=
|
||||
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
|
||||
github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
|
||||
github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs=
|
||||
github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
|
|
@ -404,7 +374,6 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
|
|||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/juju/errors v1.0.0 h1:yiq7kjCLll1BiaRuNY53MGI0+EQ3rF6GB+wvboZDefM=
|
||||
github.com/juju/errors v1.0.0/go.mod h1:B5x9thDqx0wIMH3+aLIMP9HjItInYWObRovoCFM5Qe8=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
|
|
@ -412,6 +381,7 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
|
|||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||
github.com/kardianos/minwinsvc v1.0.2 h1:JmZKFJQrmTGa/WiW+vkJXKmfzdjabuEW4Tirj5lLdR0=
|
||||
github.com/kardianos/minwinsvc v1.0.2/go.mod h1:LUZNYhNmxujx2tR7FbdxqYJ9XDDoCd3MQcl1o//FWl4=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
|
|
@ -428,8 +398,8 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
|
||||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
|
||||
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0=
|
||||
|
|
@ -453,15 +423,15 @@ github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91 h1:s7fexw
|
|||
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91/go.mod h1:e+cg2q7C7yE5QnAXgzo512tgFh1RbQLC0+jozuegKgo=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530 h1:kHKxCOLcHH8r4Fzarl4+Y3K5hjothkVW5z7T1dUM11U=
|
||||
github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20221014061925-a132619fa241 h1:e5o68MWeU7wjTvvNKmVo655oCYesoNRoPeBb1Xfz54g=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20221014061925-a132619fa241/go.mod h1:Mtifyr8q8htcBeugvlDnkBcNUy5LO8OzUoplAf1+mb4=
|
||||
github.com/matrix-org/pinecone v0.0.0-20221007145426-3adc85477dd3 h1:lzkSQvBv8TuqKJCPoVwOVvEnARTlua5rrNy/Qw2Vxeo=
|
||||
github.com/matrix-org/pinecone v0.0.0-20221007145426-3adc85477dd3/go.mod h1:K0N1ixHQxXoCyqolDqVxPM3ArrDtcMs8yegOx2Lfv9k=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20221031151122-0885c35ebe74 h1:I4LUlFqxZ72m3s9wIvUIV2FpprsxW28dO/0lAgepCZY=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20221031151122-0885c35ebe74/go.mod h1:Mtifyr8q8htcBeugvlDnkBcNUy5LO8OzUoplAf1+mb4=
|
||||
github.com/matrix-org/pinecone v0.0.0-20221026160848-639feeff74d6 h1:nAT5w41Q9uWTSnpKW55/hBwP91j2IFYPDRs0jJ8TyFI=
|
||||
github.com/matrix-org/pinecone v0.0.0-20221026160848-639feeff74d6/go.mod h1:K0N1ixHQxXoCyqolDqVxPM3ArrDtcMs8yegOx2Lfv9k=
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 h1:eCEHXWDv9Rm335MSuB49mFUK44bwZPFSDde3ORE3syk=
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
||||
github.com/mattn/go-colorable v0.1.11 h1:nQ+aFkoE2TMGc0b68U2OKSexC+eq46+XwZzWXHRmPYs=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
|
||||
github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI=
|
||||
github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
|
|
@ -537,12 +507,12 @@ github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6
|
|||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
|
||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
|
@ -553,24 +523,19 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
|||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
|
|
@ -578,17 +543,13 @@ github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8
|
|||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/relvacode/iso8601 v1.1.0 h1:2nV8sp0eOjpoKQ2vD3xSDygsjAx37NHG2UlZiCkDH4I=
|
||||
github.com/relvacode/iso8601 v1.1.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
|
|
@ -633,9 +594,7 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf
|
|||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs=
|
||||
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
|
|
@ -673,8 +632,6 @@ github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhso
|
|||
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
||||
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4=
|
||||
github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
|
||||
github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA=
|
||||
|
|
@ -687,8 +644,8 @@ github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6
|
|||
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0=
|
||||
github.com/urfave/cli/v2 v2.10.2 h1:x3p8awjp/2arX+Nl/G2040AZpOCHS/eMJJ1/a+mye4Y=
|
||||
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||
|
|
@ -696,11 +653,10 @@ github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYp
|
|||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.4.5-0.20220901155642-4f2abece817c h1:/cTmA6pV2Z20BT/FGSmnb5BmJ8eRbDP0HbCB5IO1aKw=
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.4.5-0.20220901155642-4f2abece817c/go.mod h1:cIwhYwX9yT9Bcei59O0oOBSaj+kQP+9aVQUMWHh5R00=
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.4.6 h1:GALUDV9QPz/5FVkbazpkTc9EABHufA556JwUJZr41j4=
|
||||
github.com/yggdrasil-network/yggdrasil-go v0.4.6/go.mod h1:PBMoAOvQjA9geNEeGyMXA9QgCS6Bu+9V+1VkWM84wpw=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
|
|
@ -712,8 +668,6 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
|||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
|
|
@ -795,7 +749,6 @@ golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73r
|
|||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
|
|
@ -865,7 +818,6 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
@ -897,7 +849,6 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -995,10 +946,12 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY
|
|||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.8-0.20211022200916-316ba0b74098/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
||||
|
|
@ -1018,7 +971,6 @@ gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZ
|
|||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
|
|
|
|||
|
|
@ -13,6 +13,8 @@
|
|||
package transactions
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -29,6 +31,7 @@ type txnsMap map[CacheKey]*util.JSONResponse
|
|||
type CacheKey struct {
|
||||
AccessToken string
|
||||
TxnID string
|
||||
Endpoint string
|
||||
}
|
||||
|
||||
// Cache represents a temporary store for response entries.
|
||||
|
|
@ -57,14 +60,14 @@ func NewWithCleanupPeriod(cleanupPeriod time.Duration) *Cache {
|
|||
return &t
|
||||
}
|
||||
|
||||
// FetchTransaction looks up an entry for the (accessToken, txnID) tuple in Cache.
|
||||
// FetchTransaction looks up an entry for the (accessToken, txnID, req.URL) tuple in Cache.
|
||||
// Looks in both the txnMaps.
|
||||
// Returns (JSON response, true) if txnID is found, else the returned bool is false.
|
||||
func (t *Cache) FetchTransaction(accessToken, txnID string) (*util.JSONResponse, bool) {
|
||||
func (t *Cache) FetchTransaction(accessToken, txnID string, u *url.URL) (*util.JSONResponse, bool) {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
for _, txns := range t.txnsMaps {
|
||||
res, ok := txns[CacheKey{accessToken, txnID}]
|
||||
res, ok := txns[CacheKey{accessToken, txnID, filepath.Dir(u.Path)}]
|
||||
if ok {
|
||||
return res, true
|
||||
}
|
||||
|
|
@ -72,13 +75,12 @@ func (t *Cache) FetchTransaction(accessToken, txnID string) (*util.JSONResponse,
|
|||
return nil, false
|
||||
}
|
||||
|
||||
// AddTransaction adds an entry for the (accessToken, txnID) tuple in Cache.
|
||||
// AddTransaction adds an entry for the (accessToken, txnID, req.URL) tuple in Cache.
|
||||
// Adds to the front txnMap.
|
||||
func (t *Cache) AddTransaction(accessToken, txnID string, res *util.JSONResponse) {
|
||||
func (t *Cache) AddTransaction(accessToken, txnID string, u *url.URL, res *util.JSONResponse) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
t.txnsMaps[0][CacheKey{accessToken, txnID}] = res
|
||||
t.txnsMaps[0][CacheKey{accessToken, txnID, filepath.Dir(u.Path)}] = res
|
||||
}
|
||||
|
||||
// cacheCleanService is responsible for cleaning up entries after cleanupPeriod.
|
||||
|
|
|
|||
|
|
@ -14,6 +14,9 @@ package transactions
|
|||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
|
|
@ -24,6 +27,16 @@ type fakeType struct {
|
|||
ID string `json:"ID"`
|
||||
}
|
||||
|
||||
func TestCompare(t *testing.T) {
|
||||
u1, _ := url.Parse("/send/1?accessToken=123")
|
||||
u2, _ := url.Parse("/send/1")
|
||||
c1 := CacheKey{"1", "2", filepath.Dir(u1.Path)}
|
||||
c2 := CacheKey{"1", "2", filepath.Dir(u2.Path)}
|
||||
if !reflect.DeepEqual(c1, c2) {
|
||||
t.Fatalf("Cache keys differ: %+v <> %+v", c1, c2)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
fakeAccessToken = "aRandomAccessToken"
|
||||
fakeAccessToken2 = "anotherRandomAccessToken"
|
||||
|
|
@ -34,23 +47,28 @@ var (
|
|||
fakeResponse2 = &util.JSONResponse{
|
||||
Code: http.StatusOK, JSON: fakeType{ID: "1"},
|
||||
}
|
||||
fakeResponse3 = &util.JSONResponse{
|
||||
Code: http.StatusOK, JSON: fakeType{ID: "2"},
|
||||
}
|
||||
)
|
||||
|
||||
// TestCache creates a New Cache and tests AddTransaction & FetchTransaction
|
||||
func TestCache(t *testing.T) {
|
||||
fakeTxnCache := New()
|
||||
fakeTxnCache.AddTransaction(fakeAccessToken, fakeTxnID, fakeResponse)
|
||||
u, _ := url.Parse("")
|
||||
fakeTxnCache.AddTransaction(fakeAccessToken, fakeTxnID, u, fakeResponse)
|
||||
|
||||
// Add entries for noise.
|
||||
for i := 1; i <= 100; i++ {
|
||||
fakeTxnCache.AddTransaction(
|
||||
fakeAccessToken,
|
||||
fakeTxnID+strconv.Itoa(i),
|
||||
u,
|
||||
&util.JSONResponse{Code: http.StatusOK, JSON: fakeType{ID: strconv.Itoa(i)}},
|
||||
)
|
||||
}
|
||||
|
||||
testResponse, ok := fakeTxnCache.FetchTransaction(fakeAccessToken, fakeTxnID)
|
||||
testResponse, ok := fakeTxnCache.FetchTransaction(fakeAccessToken, fakeTxnID, u)
|
||||
if !ok {
|
||||
t.Error("Failed to retrieve entry for txnID: ", fakeTxnID)
|
||||
} else if testResponse.JSON != fakeResponse.JSON {
|
||||
|
|
@ -59,20 +77,30 @@ func TestCache(t *testing.T) {
|
|||
}
|
||||
|
||||
// TestCacheScope ensures transactions with the same transaction ID are not shared
|
||||
// across multiple access tokens.
|
||||
// across multiple access tokens and endpoints.
|
||||
func TestCacheScope(t *testing.T) {
|
||||
cache := New()
|
||||
cache.AddTransaction(fakeAccessToken, fakeTxnID, fakeResponse)
|
||||
cache.AddTransaction(fakeAccessToken2, fakeTxnID, fakeResponse2)
|
||||
sendEndpoint, _ := url.Parse("/send/1?accessToken=test")
|
||||
sendToDeviceEndpoint, _ := url.Parse("/sendToDevice/1")
|
||||
cache.AddTransaction(fakeAccessToken, fakeTxnID, sendEndpoint, fakeResponse)
|
||||
cache.AddTransaction(fakeAccessToken2, fakeTxnID, sendEndpoint, fakeResponse2)
|
||||
cache.AddTransaction(fakeAccessToken2, fakeTxnID, sendToDeviceEndpoint, fakeResponse3)
|
||||
|
||||
if res, ok := cache.FetchTransaction(fakeAccessToken, fakeTxnID); !ok {
|
||||
if res, ok := cache.FetchTransaction(fakeAccessToken, fakeTxnID, sendEndpoint); !ok {
|
||||
t.Errorf("failed to retrieve entry for (%s, %s)", fakeAccessToken, fakeTxnID)
|
||||
} else if res.JSON != fakeResponse.JSON {
|
||||
t.Errorf("Wrong cache entry for (%s, %s). Expected: %v; got: %v", fakeAccessToken, fakeTxnID, fakeResponse.JSON, res.JSON)
|
||||
}
|
||||
if res, ok := cache.FetchTransaction(fakeAccessToken2, fakeTxnID); !ok {
|
||||
if res, ok := cache.FetchTransaction(fakeAccessToken2, fakeTxnID, sendEndpoint); !ok {
|
||||
t.Errorf("failed to retrieve entry for (%s, %s)", fakeAccessToken, fakeTxnID)
|
||||
} else if res.JSON != fakeResponse2.JSON {
|
||||
t.Errorf("Wrong cache entry for (%s, %s). Expected: %v; got: %v", fakeAccessToken, fakeTxnID, fakeResponse2.JSON, res.JSON)
|
||||
}
|
||||
|
||||
// Ensure the txnID is not shared across endpoints
|
||||
if res, ok := cache.FetchTransaction(fakeAccessToken2, fakeTxnID, sendToDeviceEndpoint); !ok {
|
||||
t.Errorf("failed to retrieve entry for (%s, %s)", fakeAccessToken, fakeTxnID)
|
||||
} else if res.JSON != fakeResponse3.JSON {
|
||||
t.Errorf("Wrong cache entry for (%s, %s). Expected: %v; got: %v", fakeAccessToken, fakeTxnID, fakeResponse2.JSON, res.JSON)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ var build string
|
|||
const (
|
||||
VersionMajor = 0
|
||||
VersionMinor = 10
|
||||
VersionPatch = 3
|
||||
VersionPatch = 5
|
||||
VersionTag = "" // example: "rc1"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -128,58 +128,49 @@ func (a *KeyInternalAPI) PerformClaimKeys(ctx context.Context, req *api.PerformC
|
|||
func (a *KeyInternalAPI) claimRemoteKeys(
|
||||
ctx context.Context, timeout time.Duration, res *api.PerformClaimKeysResponse, domainToDeviceKeys map[string]map[string]map[string]string,
|
||||
) {
|
||||
resultCh := make(chan *gomatrixserverlib.RespClaimKeys, len(domainToDeviceKeys))
|
||||
// allows us to wait until all federation servers have been poked
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(domainToDeviceKeys))
|
||||
// mutex for failures
|
||||
var failMu sync.Mutex
|
||||
util.GetLogger(ctx).WithField("num_servers", len(domainToDeviceKeys)).Info("Claiming remote keys from servers")
|
||||
var wg sync.WaitGroup // Wait for fan-out goroutines to finish
|
||||
var mu sync.Mutex // Protects the response struct
|
||||
var claimed int // Number of keys claimed in total
|
||||
var failures int // Number of servers we failed to ask
|
||||
|
||||
util.GetLogger(ctx).Infof("Claiming remote keys from %d server(s)", len(domainToDeviceKeys))
|
||||
wg.Add(len(domainToDeviceKeys))
|
||||
|
||||
// fan out
|
||||
for d, k := range domainToDeviceKeys {
|
||||
go func(domain string, keysToClaim map[string]map[string]string) {
|
||||
defer wg.Done()
|
||||
fedCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
defer wg.Done()
|
||||
|
||||
claimKeyRes, err := a.FedClient.ClaimKeys(fedCtx, gomatrixserverlib.ServerName(domain), keysToClaim)
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
util.GetLogger(ctx).WithError(err).WithField("server", domain).Error("ClaimKeys failed")
|
||||
failMu.Lock()
|
||||
res.Failures[domain] = map[string]interface{}{
|
||||
"message": err.Error(),
|
||||
}
|
||||
failMu.Unlock()
|
||||
failures++
|
||||
return
|
||||
}
|
||||
resultCh <- &claimKeyRes
|
||||
|
||||
for userID, deviceIDToKeys := range claimKeyRes.OneTimeKeys {
|
||||
res.OneTimeKeys[userID] = make(map[string]map[string]json.RawMessage)
|
||||
for deviceID, keys := range deviceIDToKeys {
|
||||
res.OneTimeKeys[userID][deviceID] = keys
|
||||
claimed += len(keys)
|
||||
}
|
||||
}
|
||||
}(d, k)
|
||||
}
|
||||
|
||||
// Close the result channel when the goroutines have quit so the for .. range exits
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(resultCh)
|
||||
}()
|
||||
|
||||
keysClaimed := 0
|
||||
for result := range resultCh {
|
||||
for userID, nest := range result.OneTimeKeys {
|
||||
res.OneTimeKeys[userID] = make(map[string]map[string]json.RawMessage)
|
||||
for deviceID, nest2 := range nest {
|
||||
res.OneTimeKeys[userID][deviceID] = make(map[string]json.RawMessage)
|
||||
for keyIDWithAlgo, otk := range nest2 {
|
||||
keyJSON, err := json.Marshal(otk)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
res.OneTimeKeys[userID][deviceID][keyIDWithAlgo] = keyJSON
|
||||
keysClaimed++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
util.GetLogger(ctx).WithField("num_keys", keysClaimed).Info("Claimed remote keys")
|
||||
wg.Wait()
|
||||
util.GetLogger(ctx).WithFields(logrus.Fields{
|
||||
"num_keys": claimed,
|
||||
"num_failures": failures,
|
||||
}).Infof("Claimed remote keys from %d server(s)", len(domainToDeviceKeys))
|
||||
}
|
||||
|
||||
func (a *KeyInternalAPI) PerformDeleteKeys(ctx context.Context, req *api.PerformDeleteKeysRequest, res *api.PerformDeleteKeysResponse) error {
|
||||
|
|
@ -250,15 +241,13 @@ func (a *KeyInternalAPI) PerformMarkAsStaleIfNeeded(ctx context.Context, req *ap
|
|||
|
||||
// nolint:gocyclo
|
||||
func (a *KeyInternalAPI) QueryKeys(ctx context.Context, req *api.QueryKeysRequest, res *api.QueryKeysResponse) error {
|
||||
var respMu sync.Mutex
|
||||
res.DeviceKeys = make(map[string]map[string]json.RawMessage)
|
||||
res.MasterKeys = make(map[string]gomatrixserverlib.CrossSigningKey)
|
||||
res.SelfSigningKeys = make(map[string]gomatrixserverlib.CrossSigningKey)
|
||||
res.UserSigningKeys = make(map[string]gomatrixserverlib.CrossSigningKey)
|
||||
res.Failures = make(map[string]interface{})
|
||||
|
||||
// get cross-signing keys from the database
|
||||
a.crossSigningKeysFromDatabase(ctx, req, res)
|
||||
|
||||
// make a map from domain to device keys
|
||||
domainToDeviceKeys := make(map[string]map[string][]string)
|
||||
domainToCrossSigningKeys := make(map[string]map[string]struct{})
|
||||
|
|
@ -329,12 +318,16 @@ func (a *KeyInternalAPI) QueryKeys(ctx context.Context, req *api.QueryKeysReques
|
|||
}
|
||||
|
||||
// attempt to satisfy key queries from the local database first as we should get device updates pushed to us
|
||||
domainToDeviceKeys = a.remoteKeysFromDatabase(ctx, res, domainToDeviceKeys)
|
||||
domainToDeviceKeys = a.remoteKeysFromDatabase(ctx, res, &respMu, domainToDeviceKeys)
|
||||
if len(domainToDeviceKeys) > 0 || len(domainToCrossSigningKeys) > 0 {
|
||||
// perform key queries for remote devices
|
||||
a.queryRemoteKeys(ctx, req.Timeout, res, domainToDeviceKeys, domainToCrossSigningKeys)
|
||||
}
|
||||
|
||||
// Now that we've done the potentially expensive work of asking the federation,
|
||||
// try filling the cross-signing keys from the database that we know about.
|
||||
a.crossSigningKeysFromDatabase(ctx, req, res)
|
||||
|
||||
// Finally, append signatures that we know about
|
||||
// TODO: This is horrible because we need to round-trip the signature from
|
||||
// JSON, add the signatures and marshal it again, for some reason?
|
||||
|
|
@ -407,7 +400,7 @@ func (a *KeyInternalAPI) QueryKeys(ctx context.Context, req *api.QueryKeysReques
|
|||
}
|
||||
|
||||
func (a *KeyInternalAPI) remoteKeysFromDatabase(
|
||||
ctx context.Context, res *api.QueryKeysResponse, domainToDeviceKeys map[string]map[string][]string,
|
||||
ctx context.Context, res *api.QueryKeysResponse, respMu *sync.Mutex, domainToDeviceKeys map[string]map[string][]string,
|
||||
) map[string]map[string][]string {
|
||||
fetchRemote := make(map[string]map[string][]string)
|
||||
for domain, userToDeviceMap := range domainToDeviceKeys {
|
||||
|
|
@ -415,7 +408,7 @@ func (a *KeyInternalAPI) remoteKeysFromDatabase(
|
|||
// we can't safely return keys from the db when all devices are requested as we don't
|
||||
// know if one has just been added.
|
||||
if len(deviceIDs) > 0 {
|
||||
err := a.populateResponseWithDeviceKeysFromDatabase(ctx, res, userID, deviceIDs)
|
||||
err := a.populateResponseWithDeviceKeysFromDatabase(ctx, res, respMu, userID, deviceIDs)
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
|
|
@ -471,7 +464,9 @@ func (a *KeyInternalAPI) queryRemoteKeys(
|
|||
close(resultCh)
|
||||
}()
|
||||
|
||||
for result := range resultCh {
|
||||
processResult := func(result *gomatrixserverlib.RespQueryKeys) {
|
||||
respMu.Lock()
|
||||
defer respMu.Unlock()
|
||||
for userID, nest := range result.DeviceKeys {
|
||||
res.DeviceKeys[userID] = make(map[string]json.RawMessage)
|
||||
for deviceID, deviceKey := range nest {
|
||||
|
|
@ -494,6 +489,10 @@ func (a *KeyInternalAPI) queryRemoteKeys(
|
|||
// TODO: do we want to persist these somewhere now
|
||||
// that we have fetched them?
|
||||
}
|
||||
|
||||
for result := range resultCh {
|
||||
processResult(result)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *KeyInternalAPI) queryRemoteKeysOnServer(
|
||||
|
|
@ -541,9 +540,7 @@ func (a *KeyInternalAPI) queryRemoteKeysOnServer(
|
|||
}
|
||||
// refresh entries from DB: unlike remoteKeysFromDatabase we know we previously had no device info for this
|
||||
// user so the fact that we're populating all devices here isn't a problem so long as we have devices.
|
||||
respMu.Lock()
|
||||
err = a.populateResponseWithDeviceKeysFromDatabase(ctx, res, userID, nil)
|
||||
respMu.Unlock()
|
||||
err = a.populateResponseWithDeviceKeysFromDatabase(ctx, res, respMu, userID, nil)
|
||||
if err != nil {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
logrus.ErrorKey: err,
|
||||
|
|
@ -567,25 +564,26 @@ func (a *KeyInternalAPI) queryRemoteKeysOnServer(
|
|||
res.Failures[serverName] = map[string]interface{}{
|
||||
"message": err.Error(),
|
||||
}
|
||||
respMu.Unlock()
|
||||
|
||||
// last ditch, use the cache only. This is good for when clients hit /keys/query and the remote server
|
||||
// is down, better to return something than nothing at all. Clients can know about the failure by
|
||||
// inspecting the failures map though so they can know it's a cached response.
|
||||
for userID, dkeys := range devKeys {
|
||||
// drop the error as it's already a failure at this point
|
||||
_ = a.populateResponseWithDeviceKeysFromDatabase(ctx, res, userID, dkeys)
|
||||
_ = a.populateResponseWithDeviceKeysFromDatabase(ctx, res, respMu, userID, dkeys)
|
||||
}
|
||||
|
||||
// Sytest expects no failures, if we still could retrieve keys, e.g. from local cache
|
||||
respMu.Lock()
|
||||
if len(res.DeviceKeys) > 0 {
|
||||
delete(res.Failures, serverName)
|
||||
}
|
||||
respMu.Unlock()
|
||||
|
||||
}
|
||||
|
||||
func (a *KeyInternalAPI) populateResponseWithDeviceKeysFromDatabase(
|
||||
ctx context.Context, res *api.QueryKeysResponse, userID string, deviceIDs []string,
|
||||
ctx context.Context, res *api.QueryKeysResponse, respMu *sync.Mutex, userID string, deviceIDs []string,
|
||||
) error {
|
||||
keys, err := a.DB.DeviceKeysForUser(ctx, userID, deviceIDs, false)
|
||||
// if we can't query the db or there are fewer keys than requested, fetch from remote.
|
||||
|
|
@ -598,9 +596,11 @@ func (a *KeyInternalAPI) populateResponseWithDeviceKeysFromDatabase(
|
|||
if len(deviceIDs) == 0 && len(keys) == 0 {
|
||||
return fmt.Errorf("DeviceKeysForUser %s returned no keys but wanted all keys, falling back to remote", userID)
|
||||
}
|
||||
respMu.Lock()
|
||||
if res.DeviceKeys[userID] == nil {
|
||||
res.DeviceKeys[userID] = make(map[string]json.RawMessage)
|
||||
}
|
||||
respMu.Unlock()
|
||||
|
||||
for _, key := range keys {
|
||||
if len(key.KeyJSON) == 0 {
|
||||
|
|
@ -610,7 +610,9 @@ func (a *KeyInternalAPI) populateResponseWithDeviceKeysFromDatabase(
|
|||
key.KeyJSON, _ = sjson.SetBytes(key.KeyJSON, "unsigned", struct {
|
||||
DisplayName string `json:"device_display_name,omitempty"`
|
||||
}{key.DisplayName})
|
||||
respMu.Lock()
|
||||
res.DeviceKeys[userID][key.DeviceID] = key.KeyJSON
|
||||
respMu.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ CREATE INDEX IF NOT EXISTS keyserver_cross_signing_sigs_idx ON keyserver_cross_s
|
|||
|
||||
const selectCrossSigningSigsForTargetSQL = "" +
|
||||
"SELECT origin_user_id, origin_key_id, signature FROM keyserver_cross_signing_sigs" +
|
||||
" WHERE (origin_user_id = $1 OR origin_user_id = target_user_id) AND target_user_id = $2 AND target_key_id = $3"
|
||||
" WHERE (origin_user_id = $1 OR origin_user_id = $2) AND target_user_id = $2 AND target_key_id = $3"
|
||||
|
||||
const upsertCrossSigningSigsForTargetSQL = "" +
|
||||
"INSERT INTO keyserver_cross_signing_sigs (origin_user_id, origin_key_id, target_user_id, target_key_id, signature)" +
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ CREATE INDEX IF NOT EXISTS keyserver_cross_signing_sigs_idx ON keyserver_cross_s
|
|||
|
||||
const selectCrossSigningSigsForTargetSQL = "" +
|
||||
"SELECT origin_user_id, origin_key_id, signature FROM keyserver_cross_signing_sigs" +
|
||||
" WHERE (origin_user_id = $1 OR origin_user_id = target_user_id) AND target_user_id = $2 AND target_key_id = $3"
|
||||
" WHERE (origin_user_id = $1 OR origin_user_id = $2) AND target_user_id = $3 AND target_key_id = $4"
|
||||
|
||||
const upsertCrossSigningSigsForTargetSQL = "" +
|
||||
"INSERT OR REPLACE INTO keyserver_cross_signing_sigs (origin_user_id, origin_key_id, target_user_id, target_key_id, signature)" +
|
||||
|
|
@ -85,7 +85,7 @@ func NewSqliteCrossSigningSigsTable(db *sql.DB) (tables.CrossSigningSigs, error)
|
|||
func (s *crossSigningSigsStatements) SelectCrossSigningSigsForTarget(
|
||||
ctx context.Context, txn *sql.Tx, originUserID, targetUserID string, targetKeyID gomatrixserverlib.KeyID,
|
||||
) (r types.CrossSigningSigMap, err error) {
|
||||
rows, err := sqlutil.TxStmt(txn, s.selectCrossSigningSigsForTargetStmt).QueryContext(ctx, originUserID, targetUserID, targetKeyID)
|
||||
rows, err := sqlutil.TxStmt(txn, s.selectCrossSigningSigsForTargetStmt).QueryContext(ctx, originUserID, targetUserID, targetUserID, targetKeyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -150,6 +150,7 @@ type ClientRoomserverAPI interface {
|
|||
PerformRoomUpgrade(ctx context.Context, req *PerformRoomUpgradeRequest, resp *PerformRoomUpgradeResponse) error
|
||||
PerformAdminEvacuateRoom(ctx context.Context, req *PerformAdminEvacuateRoomRequest, res *PerformAdminEvacuateRoomResponse) error
|
||||
PerformAdminEvacuateUser(ctx context.Context, req *PerformAdminEvacuateUserRequest, res *PerformAdminEvacuateUserResponse) error
|
||||
PerformAdminDownloadState(ctx context.Context, req *PerformAdminDownloadStateRequest, res *PerformAdminDownloadStateResponse) error
|
||||
PerformPeek(ctx context.Context, req *PerformPeekRequest, res *PerformPeekResponse) error
|
||||
PerformUnpeek(ctx context.Context, req *PerformUnpeekRequest, res *PerformUnpeekResponse) error
|
||||
PerformInvite(ctx context.Context, req *PerformInviteRequest, res *PerformInviteResponse) error
|
||||
|
|
@ -167,6 +168,7 @@ type UserRoomserverAPI interface {
|
|||
QueryCurrentState(ctx context.Context, req *QueryCurrentStateRequest, res *QueryCurrentStateResponse) error
|
||||
QueryMembershipsForRoom(ctx context.Context, req *QueryMembershipsForRoomRequest, res *QueryMembershipsForRoomResponse) error
|
||||
PerformAdminEvacuateUser(ctx context.Context, req *PerformAdminEvacuateUserRequest, res *PerformAdminEvacuateUserResponse) error
|
||||
PerformJoin(ctx context.Context, req *PerformJoinRequest, res *PerformJoinResponse) error
|
||||
}
|
||||
|
||||
type FederationRoomserverAPI interface {
|
||||
|
|
|
|||
|
|
@ -131,6 +131,16 @@ func (t *RoomserverInternalAPITrace) PerformAdminEvacuateUser(
|
|||
return err
|
||||
}
|
||||
|
||||
func (t *RoomserverInternalAPITrace) PerformAdminDownloadState(
|
||||
ctx context.Context,
|
||||
req *PerformAdminDownloadStateRequest,
|
||||
res *PerformAdminDownloadStateResponse,
|
||||
) error {
|
||||
err := t.Impl.PerformAdminDownloadState(ctx, req, res)
|
||||
util.GetLogger(ctx).WithError(err).Infof("PerformAdminDownloadState req=%+v res=%+v", js(req), js(res))
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *RoomserverInternalAPITrace) PerformInboundPeek(
|
||||
ctx context.Context,
|
||||
req *PerformInboundPeekRequest,
|
||||
|
|
|
|||
|
|
@ -168,8 +168,10 @@ type PerformBackfillResponse struct {
|
|||
}
|
||||
|
||||
type PerformPublishRequest struct {
|
||||
RoomID string
|
||||
Visibility string
|
||||
RoomID string
|
||||
Visibility string
|
||||
AppserviceID string
|
||||
NetworkID string
|
||||
}
|
||||
|
||||
type PerformPublishResponse struct {
|
||||
|
|
@ -235,3 +237,13 @@ type PerformAdminEvacuateUserResponse struct {
|
|||
Affected []string `json:"affected"`
|
||||
Error *PerformError
|
||||
}
|
||||
|
||||
type PerformAdminDownloadStateRequest struct {
|
||||
RoomID string `json:"room_id"`
|
||||
UserID string `json:"user_id"`
|
||||
ServerName gomatrixserverlib.ServerName `json:"server_name"`
|
||||
}
|
||||
|
||||
type PerformAdminDownloadStateResponse struct {
|
||||
Error *PerformError `json:"error,omitempty"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,8 +21,9 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
||||
)
|
||||
|
||||
// QueryLatestEventsAndStateRequest is a request to QueryLatestEventsAndState
|
||||
|
|
@ -257,7 +258,9 @@ type QueryRoomVersionForRoomResponse struct {
|
|||
|
||||
type QueryPublishedRoomsRequest struct {
|
||||
// Optional. If specified, returns whether this room is published or not.
|
||||
RoomID string
|
||||
RoomID string
|
||||
NetworkID string
|
||||
IncludeAllNetworks bool
|
||||
}
|
||||
|
||||
type QueryPublishedRoomsResponse struct {
|
||||
|
|
|
|||
|
|
@ -117,6 +117,11 @@ func (r *Admin) PerformAdminEvacuateRoom(
|
|||
PrevEvents: prevEvents,
|
||||
}
|
||||
|
||||
_, senderDomain, err := gomatrixserverlib.SplitID('@', fledglingEvent.Sender)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if fledglingEvent.Content, err = json.Marshal(memberContent); err != nil {
|
||||
res.Error = &api.PerformError{
|
||||
Code: api.PerformErrorBadRequest,
|
||||
|
|
@ -146,8 +151,8 @@ func (r *Admin) PerformAdminEvacuateRoom(
|
|||
inputEvents = append(inputEvents, api.InputRoomEvent{
|
||||
Kind: api.KindNew,
|
||||
Event: event,
|
||||
Origin: r.Cfg.Matrix.ServerName,
|
||||
SendAsServer: string(r.Cfg.Matrix.ServerName),
|
||||
Origin: senderDomain,
|
||||
SendAsServer: string(senderDomain),
|
||||
})
|
||||
res.Affected = append(res.Affected, stateKey)
|
||||
prevEvents = []gomatrixserverlib.EventReference{
|
||||
|
|
@ -176,7 +181,7 @@ func (r *Admin) PerformAdminEvacuateUser(
|
|||
}
|
||||
return nil
|
||||
}
|
||||
if domain != r.Cfg.Matrix.ServerName {
|
||||
if !r.Cfg.Matrix.IsLocalServerName(domain) {
|
||||
res.Error = &api.PerformError{
|
||||
Code: api.PerformErrorBadRequest,
|
||||
Msg: "Can only evacuate local users using this endpoint",
|
||||
|
|
@ -231,3 +236,145 @@ func (r *Admin) PerformAdminEvacuateUser(
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Admin) PerformAdminDownloadState(
|
||||
ctx context.Context,
|
||||
req *api.PerformAdminDownloadStateRequest,
|
||||
res *api.PerformAdminDownloadStateResponse,
|
||||
) error {
|
||||
roomInfo, err := r.DB.RoomInfo(ctx, req.RoomID)
|
||||
if err != nil {
|
||||
res.Error = &api.PerformError{
|
||||
Code: api.PerformErrorBadRequest,
|
||||
Msg: fmt.Sprintf("r.DB.RoomInfo: %s", err),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if roomInfo == nil || roomInfo.IsStub() {
|
||||
res.Error = &api.PerformError{
|
||||
Code: api.PerformErrorBadRequest,
|
||||
Msg: fmt.Sprintf("room %q not found", req.RoomID),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
fwdExtremities, _, depth, err := r.DB.LatestEventIDs(ctx, roomInfo.RoomNID)
|
||||
if err != nil {
|
||||
res.Error = &api.PerformError{
|
||||
Code: api.PerformErrorBadRequest,
|
||||
Msg: fmt.Sprintf("r.DB.LatestEventIDs: %s", err),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
authEventMap := map[string]*gomatrixserverlib.Event{}
|
||||
stateEventMap := map[string]*gomatrixserverlib.Event{}
|
||||
|
||||
for _, fwdExtremity := range fwdExtremities {
|
||||
var state gomatrixserverlib.RespState
|
||||
state, err = r.Inputer.FSAPI.LookupState(ctx, req.ServerName, req.RoomID, fwdExtremity.EventID, roomInfo.RoomVersion)
|
||||
if err != nil {
|
||||
res.Error = &api.PerformError{
|
||||
Code: api.PerformErrorBadRequest,
|
||||
Msg: fmt.Sprintf("r.Inputer.FSAPI.LookupState (%q): %s", fwdExtremity.EventID, err),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, authEvent := range state.AuthEvents.UntrustedEvents(roomInfo.RoomVersion) {
|
||||
if err = authEvent.VerifyEventSignatures(ctx, r.Inputer.KeyRing); err != nil {
|
||||
continue
|
||||
}
|
||||
authEventMap[authEvent.EventID()] = authEvent
|
||||
}
|
||||
for _, stateEvent := range state.StateEvents.UntrustedEvents(roomInfo.RoomVersion) {
|
||||
if err = stateEvent.VerifyEventSignatures(ctx, r.Inputer.KeyRing); err != nil {
|
||||
continue
|
||||
}
|
||||
stateEventMap[stateEvent.EventID()] = stateEvent
|
||||
}
|
||||
}
|
||||
|
||||
authEvents := make([]*gomatrixserverlib.HeaderedEvent, 0, len(authEventMap))
|
||||
stateEvents := make([]*gomatrixserverlib.HeaderedEvent, 0, len(stateEventMap))
|
||||
stateIDs := make([]string, 0, len(stateEventMap))
|
||||
|
||||
for _, authEvent := range authEventMap {
|
||||
authEvents = append(authEvents, authEvent.Headered(roomInfo.RoomVersion))
|
||||
}
|
||||
for _, stateEvent := range stateEventMap {
|
||||
stateEvents = append(stateEvents, stateEvent.Headered(roomInfo.RoomVersion))
|
||||
stateIDs = append(stateIDs, stateEvent.EventID())
|
||||
}
|
||||
|
||||
builder := &gomatrixserverlib.EventBuilder{
|
||||
Type: "org.matrix.dendrite.state_download",
|
||||
Sender: req.UserID,
|
||||
RoomID: req.RoomID,
|
||||
Content: gomatrixserverlib.RawJSON("{}"),
|
||||
}
|
||||
|
||||
eventsNeeded, err := gomatrixserverlib.StateNeededForEventBuilder(builder)
|
||||
if err != nil {
|
||||
res.Error = &api.PerformError{
|
||||
Code: api.PerformErrorBadRequest,
|
||||
Msg: fmt.Sprintf("gomatrixserverlib.StateNeededForEventBuilder: %s", err),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
queryRes := &api.QueryLatestEventsAndStateResponse{
|
||||
RoomExists: true,
|
||||
RoomVersion: roomInfo.RoomVersion,
|
||||
LatestEvents: fwdExtremities,
|
||||
StateEvents: stateEvents,
|
||||
Depth: depth,
|
||||
}
|
||||
|
||||
ev, err := eventutil.BuildEvent(ctx, builder, r.Cfg.Matrix, time.Now(), &eventsNeeded, queryRes)
|
||||
if err != nil {
|
||||
res.Error = &api.PerformError{
|
||||
Code: api.PerformErrorBadRequest,
|
||||
Msg: fmt.Sprintf("eventutil.BuildEvent: %s", err),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
inputReq := &api.InputRoomEventsRequest{
|
||||
Asynchronous: false,
|
||||
}
|
||||
inputRes := &api.InputRoomEventsResponse{}
|
||||
|
||||
for _, authEvent := range append(authEvents, stateEvents...) {
|
||||
inputReq.InputRoomEvents = append(inputReq.InputRoomEvents, api.InputRoomEvent{
|
||||
Kind: api.KindOutlier,
|
||||
Event: authEvent,
|
||||
})
|
||||
}
|
||||
|
||||
inputReq.InputRoomEvents = append(inputReq.InputRoomEvents, api.InputRoomEvent{
|
||||
Kind: api.KindNew,
|
||||
Event: ev,
|
||||
Origin: r.Cfg.Matrix.ServerName,
|
||||
HasState: true,
|
||||
StateEventIDs: stateIDs,
|
||||
SendAsServer: string(r.Cfg.Matrix.ServerName),
|
||||
})
|
||||
|
||||
if err := r.Inputer.InputRoomEvents(ctx, inputReq, inputRes); err != nil {
|
||||
res.Error = &api.PerformError{
|
||||
Code: api.PerformErrorBadRequest,
|
||||
Msg: fmt.Sprintf("r.Inputer.InputRoomEvents: %s", err),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if inputRes.ErrMsg != "" {
|
||||
res.Error = &api.PerformError{
|
||||
Code: api.PerformErrorBadRequest,
|
||||
Msg: inputRes.ErrMsg,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -322,6 +323,7 @@ FederationHit:
|
|||
b.eventIDToBeforeStateIDs[targetEvent.EventID()] = res
|
||||
return res, nil
|
||||
}
|
||||
sentry.CaptureException(lastErr) // temporary to see if we might need to raise the server limit
|
||||
return nil, lastErr
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -70,8 +70,8 @@ func (r *Inviter) PerformInvite(
|
|||
}
|
||||
return nil, nil
|
||||
}
|
||||
isTargetLocal := domain == r.Cfg.Matrix.ServerName
|
||||
isOriginLocal := senderDomain == r.Cfg.Matrix.ServerName
|
||||
isTargetLocal := r.Cfg.Matrix.IsLocalServerName(domain)
|
||||
isOriginLocal := r.Cfg.Matrix.IsLocalServerName(senderDomain)
|
||||
if !isOriginLocal && !isTargetLocal {
|
||||
res.Error = &api.PerformError{
|
||||
Code: api.PerformErrorBadRequest,
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ func (r *Joiner) performJoin(
|
|||
Msg: fmt.Sprintf("Supplied user ID %q in incorrect format", req.UserID),
|
||||
}
|
||||
}
|
||||
if domain != r.Cfg.Matrix.ServerName {
|
||||
if !r.Cfg.Matrix.IsLocalServerName(domain) {
|
||||
return "", "", &rsAPI.PerformError{
|
||||
Code: rsAPI.PerformErrorBadRequest,
|
||||
Msg: fmt.Sprintf("User %q does not belong to this homeserver", req.UserID),
|
||||
|
|
@ -124,7 +124,7 @@ func (r *Joiner) performJoinRoomByAlias(
|
|||
// Check if this alias matches our own server configuration. If it
|
||||
// doesn't then we'll need to try a federated join.
|
||||
var roomID string
|
||||
if domain != r.Cfg.Matrix.ServerName {
|
||||
if !r.Cfg.Matrix.IsLocalServerName(domain) {
|
||||
// The alias isn't owned by us, so we will need to try joining using
|
||||
// a remote server.
|
||||
dirReq := fsAPI.PerformDirectoryLookupRequest{
|
||||
|
|
@ -172,7 +172,7 @@ func (r *Joiner) performJoinRoomByID(
|
|||
// The original client request ?server_name=... may include this HS so filter that out so we
|
||||
// don't attempt to make_join with ourselves
|
||||
for i := 0; i < len(req.ServerNames); i++ {
|
||||
if req.ServerNames[i] == r.Cfg.Matrix.ServerName {
|
||||
if r.Cfg.Matrix.IsLocalServerName(req.ServerNames[i]) {
|
||||
// delete this entry
|
||||
req.ServerNames = append(req.ServerNames[:i], req.ServerNames[i+1:]...)
|
||||
i--
|
||||
|
|
@ -191,12 +191,19 @@ func (r *Joiner) performJoinRoomByID(
|
|||
// If the server name in the room ID isn't ours then it's a
|
||||
// possible candidate for finding the room via federation. Add
|
||||
// it to the list of servers to try.
|
||||
if domain != r.Cfg.Matrix.ServerName {
|
||||
if !r.Cfg.Matrix.IsLocalServerName(domain) {
|
||||
req.ServerNames = append(req.ServerNames, domain)
|
||||
}
|
||||
|
||||
// Prepare the template for the join event.
|
||||
userID := req.UserID
|
||||
_, userDomain, err := gomatrixserverlib.SplitID('@', userID)
|
||||
if err != nil {
|
||||
return "", "", &rsAPI.PerformError{
|
||||
Code: rsAPI.PerformErrorBadRequest,
|
||||
Msg: fmt.Sprintf("User ID %q is invalid: %s", userID, err),
|
||||
}
|
||||
}
|
||||
eb := gomatrixserverlib.EventBuilder{
|
||||
Type: gomatrixserverlib.MRoomMember,
|
||||
Sender: userID,
|
||||
|
|
@ -247,7 +254,7 @@ func (r *Joiner) performJoinRoomByID(
|
|||
|
||||
// If we were invited by someone from another server then we can
|
||||
// assume they are in the room so we can join via them.
|
||||
if inviterDomain != r.Cfg.Matrix.ServerName {
|
||||
if !r.Cfg.Matrix.IsLocalServerName(inviterDomain) {
|
||||
req.ServerNames = append(req.ServerNames, inviterDomain)
|
||||
forceFederatedJoin = true
|
||||
memberEvent := gjson.Parse(string(inviteEvent.JSON()))
|
||||
|
|
@ -300,7 +307,7 @@ func (r *Joiner) performJoinRoomByID(
|
|||
{
|
||||
Kind: rsAPI.KindNew,
|
||||
Event: event.Headered(buildRes.RoomVersion),
|
||||
SendAsServer: string(r.Cfg.Matrix.ServerName),
|
||||
SendAsServer: string(userDomain),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -323,7 +330,7 @@ func (r *Joiner) performJoinRoomByID(
|
|||
// The room doesn't exist locally. If the room ID looks like it should
|
||||
// be ours then this probably means that we've nuked our database at
|
||||
// some point.
|
||||
if domain == r.Cfg.Matrix.ServerName {
|
||||
if r.Cfg.Matrix.IsLocalServerName(domain) {
|
||||
// If there are no more server names to try then give up here.
|
||||
// Otherwise we'll try a federated join as normal, since it's quite
|
||||
// possible that the room still exists on other servers.
|
||||
|
|
@ -348,7 +355,7 @@ func (r *Joiner) performJoinRoomByID(
|
|||
// it will have been overwritten with a room ID by performJoinRoomByAlias.
|
||||
// We should now include this in the response so that the CS API can
|
||||
// return the right room ID.
|
||||
return req.RoomIDOrAlias, r.Cfg.Matrix.ServerName, nil
|
||||
return req.RoomIDOrAlias, userDomain, nil
|
||||
}
|
||||
|
||||
func (r *Joiner) performFederatedJoinRoomByID(
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ func (r *Leaver) PerformLeave(
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("supplied user ID %q in incorrect format", req.UserID)
|
||||
}
|
||||
if domain != r.Cfg.Matrix.ServerName {
|
||||
if !r.Cfg.Matrix.IsLocalServerName(domain) {
|
||||
return nil, fmt.Errorf("user %q does not belong to this homeserver", req.UserID)
|
||||
}
|
||||
logger := logrus.WithContext(ctx).WithFields(logrus.Fields{
|
||||
|
|
@ -85,7 +85,7 @@ func (r *Leaver) performLeaveRoomByID(
|
|||
if serr != nil {
|
||||
return nil, fmt.Errorf("sender %q is invalid", senderUser)
|
||||
}
|
||||
if senderDomain != r.Cfg.Matrix.ServerName {
|
||||
if !r.Cfg.Matrix.IsLocalServerName(senderDomain) {
|
||||
return r.performFederatedRejectInvite(ctx, req, res, senderUser, eventID)
|
||||
}
|
||||
// check that this is not a "server notice room"
|
||||
|
|
@ -186,7 +186,7 @@ func (r *Leaver) performLeaveRoomByID(
|
|||
Kind: api.KindNew,
|
||||
Event: event.Headered(buildRes.RoomVersion),
|
||||
Origin: senderDomain,
|
||||
SendAsServer: string(r.Cfg.Matrix.ServerName),
|
||||
SendAsServer: string(senderDomain),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ func (r *Peeker) performPeek(
|
|||
Msg: fmt.Sprintf("Supplied user ID %q in incorrect format", req.UserID),
|
||||
}
|
||||
}
|
||||
if domain != r.Cfg.Matrix.ServerName {
|
||||
if !r.Cfg.Matrix.IsLocalServerName(domain) {
|
||||
return "", &api.PerformError{
|
||||
Code: api.PerformErrorBadRequest,
|
||||
Msg: fmt.Sprintf("User %q does not belong to this homeserver", req.UserID),
|
||||
|
|
@ -104,7 +104,7 @@ func (r *Peeker) performPeekRoomByAlias(
|
|||
// Check if this alias matches our own server configuration. If it
|
||||
// doesn't then we'll need to try a federated peek.
|
||||
var roomID string
|
||||
if domain != r.Cfg.Matrix.ServerName {
|
||||
if !r.Cfg.Matrix.IsLocalServerName(domain) {
|
||||
// The alias isn't owned by us, so we will need to try peeking using
|
||||
// a remote server.
|
||||
dirReq := fsAPI.PerformDirectoryLookupRequest{
|
||||
|
|
@ -154,7 +154,7 @@ func (r *Peeker) performPeekRoomByID(
|
|||
|
||||
// handle federated peeks
|
||||
// FIXME: don't create an outbound peek if we already have one going.
|
||||
if domain != r.Cfg.Matrix.ServerName {
|
||||
if !r.Cfg.Matrix.IsLocalServerName(domain) {
|
||||
// If the server name in the room ID isn't ours then it's a
|
||||
// possible candidate for finding the room via federation. Add
|
||||
// it to the list of servers to try.
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ func (r *Publisher) PerformPublish(
|
|||
req *api.PerformPublishRequest,
|
||||
res *api.PerformPublishResponse,
|
||||
) error {
|
||||
err := r.DB.PublishRoom(ctx, req.RoomID, req.Visibility == "public")
|
||||
err := r.DB.PublishRoom(ctx, req.RoomID, req.AppserviceID, req.NetworkID, req.Visibility == "public")
|
||||
if err != nil {
|
||||
res.Error = &api.PerformError{
|
||||
Msg: err.Error(),
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ func (r *Unpeeker) performUnpeek(
|
|||
Msg: fmt.Sprintf("Supplied user ID %q in incorrect format", req.UserID),
|
||||
}
|
||||
}
|
||||
if domain != r.Cfg.Matrix.ServerName {
|
||||
if !r.Cfg.Matrix.IsLocalServerName(domain) {
|
||||
return &api.PerformError{
|
||||
Code: api.PerformErrorBadRequest,
|
||||
Msg: fmt.Sprintf("User %q does not belong to this homeserver", req.UserID),
|
||||
|
|
|
|||
|
|
@ -60,6 +60,13 @@ func (r *Upgrader) performRoomUpgrade(
|
|||
) (string, *api.PerformError) {
|
||||
roomID := req.RoomID
|
||||
userID := req.UserID
|
||||
_, userDomain, err := gomatrixserverlib.SplitID('@', userID)
|
||||
if err != nil {
|
||||
return "", &api.PerformError{
|
||||
Code: api.PerformErrorNotAllowed,
|
||||
Msg: "Error validating the user ID",
|
||||
}
|
||||
}
|
||||
evTime := time.Now()
|
||||
|
||||
// Return an immediate error if the room does not exist
|
||||
|
|
@ -80,7 +87,7 @@ func (r *Upgrader) performRoomUpgrade(
|
|||
|
||||
// TODO (#267): Check room ID doesn't clash with an existing one, and we
|
||||
// probably shouldn't be using pseudo-random strings, maybe GUIDs?
|
||||
newRoomID := fmt.Sprintf("!%s:%s", util.RandomString(16), r.Cfg.Matrix.ServerName)
|
||||
newRoomID := fmt.Sprintf("!%s:%s", util.RandomString(16), userDomain)
|
||||
|
||||
// Get the existing room state for the old room.
|
||||
oldRoomReq := &api.QueryLatestEventsAndStateRequest{
|
||||
|
|
@ -107,12 +114,12 @@ func (r *Upgrader) performRoomUpgrade(
|
|||
}
|
||||
|
||||
// Send the setup events to the new room
|
||||
if pErr = r.sendInitialEvents(ctx, evTime, userID, newRoomID, string(req.RoomVersion), eventsToMake); pErr != nil {
|
||||
if pErr = r.sendInitialEvents(ctx, evTime, userID, userDomain, newRoomID, string(req.RoomVersion), eventsToMake); pErr != nil {
|
||||
return "", pErr
|
||||
}
|
||||
|
||||
// 5. Send the tombstone event to the old room
|
||||
if pErr = r.sendHeaderedEvent(ctx, tombstoneEvent, string(r.Cfg.Matrix.ServerName)); pErr != nil {
|
||||
if pErr = r.sendHeaderedEvent(ctx, userDomain, tombstoneEvent, string(userDomain)); pErr != nil {
|
||||
return "", pErr
|
||||
}
|
||||
|
||||
|
|
@ -122,7 +129,7 @@ func (r *Upgrader) performRoomUpgrade(
|
|||
}
|
||||
|
||||
// If the old room had a canonical alias event, it should be deleted in the old room
|
||||
if pErr = r.clearOldCanonicalAliasEvent(ctx, oldRoomRes, evTime, userID, roomID); pErr != nil {
|
||||
if pErr = r.clearOldCanonicalAliasEvent(ctx, oldRoomRes, evTime, userID, userDomain, roomID); pErr != nil {
|
||||
return "", pErr
|
||||
}
|
||||
|
||||
|
|
@ -132,7 +139,7 @@ func (r *Upgrader) performRoomUpgrade(
|
|||
}
|
||||
|
||||
// 6. Restrict power levels in the old room
|
||||
if pErr = r.restrictOldRoomPowerLevels(ctx, evTime, userID, roomID); pErr != nil {
|
||||
if pErr = r.restrictOldRoomPowerLevels(ctx, evTime, userID, userDomain, roomID); pErr != nil {
|
||||
return "", pErr
|
||||
}
|
||||
|
||||
|
|
@ -154,7 +161,7 @@ func (r *Upgrader) getRoomPowerLevels(ctx context.Context, roomID string) (*goma
|
|||
return powerLevelContent, nil
|
||||
}
|
||||
|
||||
func (r *Upgrader) restrictOldRoomPowerLevels(ctx context.Context, evTime time.Time, userID, roomID string) *api.PerformError {
|
||||
func (r *Upgrader) restrictOldRoomPowerLevels(ctx context.Context, evTime time.Time, userID string, userDomain gomatrixserverlib.ServerName, roomID string) *api.PerformError {
|
||||
restrictedPowerLevelContent, pErr := r.getRoomPowerLevels(ctx, roomID)
|
||||
if pErr != nil {
|
||||
return pErr
|
||||
|
|
@ -183,7 +190,7 @@ func (r *Upgrader) restrictOldRoomPowerLevels(ctx context.Context, evTime time.T
|
|||
return resErr
|
||||
}
|
||||
} else {
|
||||
if resErr = r.sendHeaderedEvent(ctx, restrictedPowerLevelsHeadered, api.DoNotSendToOtherServers); resErr != nil {
|
||||
if resErr = r.sendHeaderedEvent(ctx, userDomain, restrictedPowerLevelsHeadered, api.DoNotSendToOtherServers); resErr != nil {
|
||||
return resErr
|
||||
}
|
||||
}
|
||||
|
|
@ -223,7 +230,7 @@ func moveLocalAliases(ctx context.Context,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (r *Upgrader) clearOldCanonicalAliasEvent(ctx context.Context, oldRoom *api.QueryLatestEventsAndStateResponse, evTime time.Time, userID, roomID string) *api.PerformError {
|
||||
func (r *Upgrader) clearOldCanonicalAliasEvent(ctx context.Context, oldRoom *api.QueryLatestEventsAndStateResponse, evTime time.Time, userID string, userDomain gomatrixserverlib.ServerName, roomID string) *api.PerformError {
|
||||
for _, event := range oldRoom.StateEvents {
|
||||
if event.Type() != gomatrixserverlib.MRoomCanonicalAlias || !event.StateKeyEquals("") {
|
||||
continue
|
||||
|
|
@ -254,7 +261,7 @@ func (r *Upgrader) clearOldCanonicalAliasEvent(ctx context.Context, oldRoom *api
|
|||
return resErr
|
||||
}
|
||||
} else {
|
||||
if resErr = r.sendHeaderedEvent(ctx, emptyCanonicalAliasEvent, api.DoNotSendToOtherServers); resErr != nil {
|
||||
if resErr = r.sendHeaderedEvent(ctx, userDomain, emptyCanonicalAliasEvent, api.DoNotSendToOtherServers); resErr != nil {
|
||||
return resErr
|
||||
}
|
||||
}
|
||||
|
|
@ -495,7 +502,7 @@ func (r *Upgrader) generateInitialEvents(ctx context.Context, oldRoom *api.Query
|
|||
return eventsToMake, nil
|
||||
}
|
||||
|
||||
func (r *Upgrader) sendInitialEvents(ctx context.Context, evTime time.Time, userID, newRoomID, newVersion string, eventsToMake []fledglingEvent) *api.PerformError {
|
||||
func (r *Upgrader) sendInitialEvents(ctx context.Context, evTime time.Time, userID string, userDomain gomatrixserverlib.ServerName, newRoomID, newVersion string, eventsToMake []fledglingEvent) *api.PerformError {
|
||||
var err error
|
||||
var builtEvents []*gomatrixserverlib.HeaderedEvent
|
||||
authEvents := gomatrixserverlib.NewAuthEvents(nil)
|
||||
|
|
@ -519,7 +526,7 @@ func (r *Upgrader) sendInitialEvents(ctx context.Context, evTime time.Time, user
|
|||
builder.PrevEvents = []gomatrixserverlib.EventReference{builtEvents[i-1].EventReference()}
|
||||
}
|
||||
var event *gomatrixserverlib.Event
|
||||
event, err = r.buildEvent(&builder, &authEvents, evTime, gomatrixserverlib.RoomVersion(newVersion))
|
||||
event, err = r.buildEvent(&builder, userDomain, &authEvents, evTime, gomatrixserverlib.RoomVersion(newVersion))
|
||||
if err != nil {
|
||||
return &api.PerformError{
|
||||
Msg: fmt.Sprintf("Failed to build new %q event: %s", builder.Type, err),
|
||||
|
|
@ -547,7 +554,7 @@ func (r *Upgrader) sendInitialEvents(ctx context.Context, evTime time.Time, user
|
|||
inputs = append(inputs, api.InputRoomEvent{
|
||||
Kind: api.KindNew,
|
||||
Event: event,
|
||||
Origin: r.Cfg.Matrix.ServerName,
|
||||
Origin: userDomain,
|
||||
SendAsServer: api.DoNotSendToOtherServers,
|
||||
})
|
||||
}
|
||||
|
|
@ -668,6 +675,7 @@ func createTemporaryPowerLevels(powerLevelContent *gomatrixserverlib.PowerLevelC
|
|||
|
||||
func (r *Upgrader) sendHeaderedEvent(
|
||||
ctx context.Context,
|
||||
serverName gomatrixserverlib.ServerName,
|
||||
headeredEvent *gomatrixserverlib.HeaderedEvent,
|
||||
sendAsServer string,
|
||||
) *api.PerformError {
|
||||
|
|
@ -675,7 +683,7 @@ func (r *Upgrader) sendHeaderedEvent(
|
|||
inputs = append(inputs, api.InputRoomEvent{
|
||||
Kind: api.KindNew,
|
||||
Event: headeredEvent,
|
||||
Origin: r.Cfg.Matrix.ServerName,
|
||||
Origin: serverName,
|
||||
SendAsServer: sendAsServer,
|
||||
})
|
||||
if err := api.SendInputRoomEvents(ctx, r.URSAPI, inputs, false); err != nil {
|
||||
|
|
@ -689,6 +697,7 @@ func (r *Upgrader) sendHeaderedEvent(
|
|||
|
||||
func (r *Upgrader) buildEvent(
|
||||
builder *gomatrixserverlib.EventBuilder,
|
||||
serverName gomatrixserverlib.ServerName,
|
||||
provider gomatrixserverlib.AuthEventProvider,
|
||||
evTime time.Time,
|
||||
roomVersion gomatrixserverlib.RoomVersion,
|
||||
|
|
@ -703,7 +712,7 @@ func (r *Upgrader) buildEvent(
|
|||
}
|
||||
builder.AuthEvents = refs
|
||||
event, err := builder.Build(
|
||||
evTime, r.Cfg.Matrix.ServerName, r.Cfg.Matrix.KeyID,
|
||||
evTime, serverName, r.Cfg.Matrix.KeyID,
|
||||
r.Cfg.Matrix.PrivateKey, roomVersion,
|
||||
)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -702,7 +702,7 @@ func (r *Queryer) QueryPublishedRooms(
|
|||
}
|
||||
return err
|
||||
}
|
||||
rooms, err := r.DB.GetPublishedRooms(ctx)
|
||||
rooms, err := r.DB.GetPublishedRooms(ctx, req.NetworkID, req.IncludeAllNetworks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,18 +27,19 @@ const (
|
|||
RoomserverInputRoomEventsPath = "/roomserver/inputRoomEvents"
|
||||
|
||||
// Perform operations
|
||||
RoomserverPerformInvitePath = "/roomserver/performInvite"
|
||||
RoomserverPerformPeekPath = "/roomserver/performPeek"
|
||||
RoomserverPerformUnpeekPath = "/roomserver/performUnpeek"
|
||||
RoomserverPerformRoomUpgradePath = "/roomserver/performRoomUpgrade"
|
||||
RoomserverPerformJoinPath = "/roomserver/performJoin"
|
||||
RoomserverPerformLeavePath = "/roomserver/performLeave"
|
||||
RoomserverPerformBackfillPath = "/roomserver/performBackfill"
|
||||
RoomserverPerformPublishPath = "/roomserver/performPublish"
|
||||
RoomserverPerformInboundPeekPath = "/roomserver/performInboundPeek"
|
||||
RoomserverPerformForgetPath = "/roomserver/performForget"
|
||||
RoomserverPerformAdminEvacuateRoomPath = "/roomserver/performAdminEvacuateRoom"
|
||||
RoomserverPerformAdminEvacuateUserPath = "/roomserver/performAdminEvacuateUser"
|
||||
RoomserverPerformInvitePath = "/roomserver/performInvite"
|
||||
RoomserverPerformPeekPath = "/roomserver/performPeek"
|
||||
RoomserverPerformUnpeekPath = "/roomserver/performUnpeek"
|
||||
RoomserverPerformRoomUpgradePath = "/roomserver/performRoomUpgrade"
|
||||
RoomserverPerformJoinPath = "/roomserver/performJoin"
|
||||
RoomserverPerformLeavePath = "/roomserver/performLeave"
|
||||
RoomserverPerformBackfillPath = "/roomserver/performBackfill"
|
||||
RoomserverPerformPublishPath = "/roomserver/performPublish"
|
||||
RoomserverPerformInboundPeekPath = "/roomserver/performInboundPeek"
|
||||
RoomserverPerformForgetPath = "/roomserver/performForget"
|
||||
RoomserverPerformAdminEvacuateRoomPath = "/roomserver/performAdminEvacuateRoom"
|
||||
RoomserverPerformAdminEvacuateUserPath = "/roomserver/performAdminEvacuateUser"
|
||||
RoomserverPerformAdminDownloadStatePath = "/roomserver/performAdminDownloadState"
|
||||
|
||||
// Query operations
|
||||
RoomserverQueryLatestEventsAndStatePath = "/roomserver/queryLatestEventsAndState"
|
||||
|
|
@ -261,6 +262,17 @@ func (h *httpRoomserverInternalAPI) PerformAdminEvacuateRoom(
|
|||
)
|
||||
}
|
||||
|
||||
func (h *httpRoomserverInternalAPI) PerformAdminDownloadState(
|
||||
ctx context.Context,
|
||||
request *api.PerformAdminDownloadStateRequest,
|
||||
response *api.PerformAdminDownloadStateResponse,
|
||||
) error {
|
||||
return httputil.CallInternalRPCAPI(
|
||||
"PerformAdminDownloadState", h.roomserverURL+RoomserverPerformAdminDownloadStatePath,
|
||||
h.httpClient, ctx, request, response,
|
||||
)
|
||||
}
|
||||
|
||||
func (h *httpRoomserverInternalAPI) PerformAdminEvacuateUser(
|
||||
ctx context.Context,
|
||||
request *api.PerformAdminEvacuateUserRequest,
|
||||
|
|
|
|||
|
|
@ -65,6 +65,11 @@ func AddRoutes(r api.RoomserverInternalAPI, internalAPIMux *mux.Router) {
|
|||
httputil.MakeInternalRPCAPI("RoomserverPerformAdminEvacuateUser", r.PerformAdminEvacuateUser),
|
||||
)
|
||||
|
||||
internalAPIMux.Handle(
|
||||
RoomserverPerformAdminDownloadStatePath,
|
||||
httputil.MakeInternalRPCAPI("RoomserverPerformAdminDownloadState", r.PerformAdminDownloadState),
|
||||
)
|
||||
|
||||
internalAPIMux.Handle(
|
||||
RoomserverQueryPublishedRoomsPath,
|
||||
httputil.MakeInternalRPCAPI("RoomserverQueryPublishedRooms", r.QueryPublishedRooms),
|
||||
|
|
|
|||
|
|
@ -139,9 +139,9 @@ type Database interface {
|
|||
// Returns an error if the retrieval went wrong.
|
||||
EventsFromIDs(ctx context.Context, eventIDs []string) ([]types.Event, error)
|
||||
// Publish or unpublish a room from the room directory.
|
||||
PublishRoom(ctx context.Context, roomID string, publish bool) error
|
||||
PublishRoom(ctx context.Context, roomID, appserviceID, networkID string, publish bool) error
|
||||
// Returns a list of room IDs for rooms which are published.
|
||||
GetPublishedRooms(ctx context.Context) ([]string, error)
|
||||
GetPublishedRooms(ctx context.Context, networkID string, includeAllNetworks bool) ([]string, error)
|
||||
// Returns whether a given room is published or not.
|
||||
GetPublishedRoom(ctx context.Context, roomID string) (bool, error)
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,45 @@
|
|||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package deltas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func UpPulishedAppservice(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `ALTER TABLE roomserver_published ADD COLUMN IF NOT EXISTS appservice_id TEXT NOT NULL DEFAULT '';`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||
}
|
||||
_, err = tx.ExecContext(ctx, `ALTER TABLE roomserver_published ADD COLUMN IF NOT EXISTS network_id TEXT NOT NULL DEFAULT '';`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DownPublishedAppservice(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `ALTER TABLE roomserver_published DROP COLUMN IF EXISTS appservice_id;`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute downgrade: %w", err)
|
||||
}
|
||||
_, err = tx.ExecContext(ctx, `ALTER TABLE roomserver_published DROP COLUMN IF EXISTS network_id;`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute downgrade: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -20,6 +20,7 @@ import (
|
|||
|
||||
"github.com/matrix-org/dendrite/internal"
|
||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||
"github.com/matrix-org/dendrite/roomserver/storage/postgres/deltas"
|
||||
"github.com/matrix-org/dendrite/roomserver/storage/tables"
|
||||
)
|
||||
|
||||
|
|
@ -27,31 +28,48 @@ const publishedSchema = `
|
|||
-- Stores which rooms are published in the room directory
|
||||
CREATE TABLE IF NOT EXISTS roomserver_published (
|
||||
-- The room ID of the room
|
||||
room_id TEXT NOT NULL PRIMARY KEY,
|
||||
room_id TEXT NOT NULL,
|
||||
-- The appservice ID of the room
|
||||
appservice_id TEXT NOT NULL,
|
||||
-- The network_id of the room
|
||||
network_id TEXT NOT NULL,
|
||||
-- Whether it is published or not
|
||||
published BOOLEAN NOT NULL DEFAULT false
|
||||
published BOOLEAN NOT NULL DEFAULT false,
|
||||
PRIMARY KEY (room_id, appservice_id, network_id)
|
||||
);
|
||||
`
|
||||
|
||||
const upsertPublishedSQL = "" +
|
||||
"INSERT INTO roomserver_published (room_id, published) VALUES ($1, $2) " +
|
||||
"ON CONFLICT (room_id) DO UPDATE SET published=$2"
|
||||
"INSERT INTO roomserver_published (room_id, appservice_id, network_id, published) VALUES ($1, $2, $3, $4) " +
|
||||
"ON CONFLICT (room_id, appservice_id, network_id) DO UPDATE SET published=$4"
|
||||
|
||||
const selectAllPublishedSQL = "" +
|
||||
"SELECT room_id FROM roomserver_published WHERE published = $1 ORDER BY room_id ASC"
|
||||
"SELECT room_id FROM roomserver_published WHERE published = $1 AND CASE WHEN $2 THEN 1=1 ELSE network_id = '' END ORDER BY room_id ASC"
|
||||
|
||||
const selectNetworkPublishedSQL = "" +
|
||||
"SELECT room_id FROM roomserver_published WHERE published = $1 AND network_id = $2 ORDER BY room_id ASC"
|
||||
|
||||
const selectPublishedSQL = "" +
|
||||
"SELECT published FROM roomserver_published WHERE room_id = $1"
|
||||
|
||||
type publishedStatements struct {
|
||||
upsertPublishedStmt *sql.Stmt
|
||||
selectAllPublishedStmt *sql.Stmt
|
||||
selectPublishedStmt *sql.Stmt
|
||||
upsertPublishedStmt *sql.Stmt
|
||||
selectAllPublishedStmt *sql.Stmt
|
||||
selectPublishedStmt *sql.Stmt
|
||||
selectNetworkPublishedStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func CreatePublishedTable(db *sql.DB) error {
|
||||
_, err := db.Exec(publishedSchema)
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m := sqlutil.NewMigrator(db)
|
||||
m.AddMigrations(sqlutil.Migration{
|
||||
Version: "roomserver: published appservice",
|
||||
Up: deltas.UpPulishedAppservice,
|
||||
})
|
||||
return m.Up(context.Background())
|
||||
}
|
||||
|
||||
func PreparePublishedTable(db *sql.DB) (tables.Published, error) {
|
||||
|
|
@ -61,14 +79,15 @@ func PreparePublishedTable(db *sql.DB) (tables.Published, error) {
|
|||
{&s.upsertPublishedStmt, upsertPublishedSQL},
|
||||
{&s.selectAllPublishedStmt, selectAllPublishedSQL},
|
||||
{&s.selectPublishedStmt, selectPublishedSQL},
|
||||
{&s.selectNetworkPublishedStmt, selectNetworkPublishedSQL},
|
||||
}.Prepare(db)
|
||||
}
|
||||
|
||||
func (s *publishedStatements) UpsertRoomPublished(
|
||||
ctx context.Context, txn *sql.Tx, roomID string, published bool,
|
||||
ctx context.Context, txn *sql.Tx, roomID, appserviceID, networkID string, published bool,
|
||||
) (err error) {
|
||||
stmt := sqlutil.TxStmt(txn, s.upsertPublishedStmt)
|
||||
_, err = stmt.ExecContext(ctx, roomID, published)
|
||||
_, err = stmt.ExecContext(ctx, roomID, appserviceID, networkID, published)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -84,10 +103,18 @@ func (s *publishedStatements) SelectPublishedFromRoomID(
|
|||
}
|
||||
|
||||
func (s *publishedStatements) SelectAllPublishedRooms(
|
||||
ctx context.Context, txn *sql.Tx, published bool,
|
||||
ctx context.Context, txn *sql.Tx, networkID string, published, includeAllNetworks bool,
|
||||
) ([]string, error) {
|
||||
stmt := sqlutil.TxStmt(txn, s.selectAllPublishedStmt)
|
||||
rows, err := stmt.QueryContext(ctx, published)
|
||||
var rows *sql.Rows
|
||||
var err error
|
||||
if networkID != "" {
|
||||
stmt := sqlutil.TxStmt(txn, s.selectNetworkPublishedStmt)
|
||||
rows, err = stmt.QueryContext(ctx, published, networkID)
|
||||
} else {
|
||||
stmt := sqlutil.TxStmt(txn, s.selectAllPublishedStmt)
|
||||
rows, err = stmt.QueryContext(ctx, published, includeAllNetworks)
|
||||
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -722,9 +722,9 @@ func (d *Database) storeEvent(
|
|||
}, redactionEvent, redactedEventID, err
|
||||
}
|
||||
|
||||
func (d *Database) PublishRoom(ctx context.Context, roomID string, publish bool) error {
|
||||
func (d *Database) PublishRoom(ctx context.Context, roomID, appserviceID, networkID string, publish bool) error {
|
||||
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||
return d.PublishedTable.UpsertRoomPublished(ctx, txn, roomID, publish)
|
||||
return d.PublishedTable.UpsertRoomPublished(ctx, txn, roomID, appserviceID, networkID, publish)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -732,8 +732,8 @@ func (d *Database) GetPublishedRoom(ctx context.Context, roomID string) (bool, e
|
|||
return d.PublishedTable.SelectPublishedFromRoomID(ctx, nil, roomID)
|
||||
}
|
||||
|
||||
func (d *Database) GetPublishedRooms(ctx context.Context) ([]string, error) {
|
||||
return d.PublishedTable.SelectAllPublishedRooms(ctx, nil, true)
|
||||
func (d *Database) GetPublishedRooms(ctx context.Context, networkID string, includeAllNetworks bool) ([]string, error) {
|
||||
return d.PublishedTable.SelectAllPublishedRooms(ctx, nil, networkID, true, includeAllNetworks)
|
||||
}
|
||||
|
||||
func (d *Database) MissingAuthPrevEvents(
|
||||
|
|
|
|||
|
|
@ -0,0 +1,64 @@
|
|||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package deltas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func UpPulishedAppservice(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, ` ALTER TABLE roomserver_published RENAME TO roomserver_published_tmp;
|
||||
CREATE TABLE IF NOT EXISTS roomserver_published (
|
||||
room_id TEXT NOT NULL,
|
||||
appservice_id TEXT NOT NULL,
|
||||
network_id TEXT NOT NULL,
|
||||
published BOOLEAN NOT NULL DEFAULT false,
|
||||
CONSTRAINT unique_published_idx PRIMARY KEY (room_id, appservice_id, network_id)
|
||||
);
|
||||
INSERT
|
||||
INTO roomserver_published (
|
||||
room_id, published
|
||||
) SELECT
|
||||
room_id, published
|
||||
FROM roomserver_published_tmp
|
||||
;
|
||||
DROP TABLE roomserver_published_tmp;`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DownPublishedAppservice(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, ` ALTER TABLE roomserver_published RENAME TO roomserver_published_tmp;
|
||||
CREATE TABLE IF NOT EXISTS roomserver_published (
|
||||
room_id TEXT NOT NULL PRIMARY KEY,
|
||||
published BOOLEAN NOT NULL DEFAULT false
|
||||
);
|
||||
INSERT
|
||||
INTO roomserver_published (
|
||||
room_id, published
|
||||
) SELECT
|
||||
room_id, published
|
||||
FROM roomserver_published_tmp
|
||||
;
|
||||
DROP TABLE roomserver_published_tmp;`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue