diff --git a/.github/codecov.yaml b/.github/codecov.yaml index 78122c990..3462e91ee 100644 --- a/.github/codecov.yaml +++ b/.github/codecov.yaml @@ -7,7 +7,7 @@ coverage: project: default: target: auto - threshold: 0% + threshold: 0.1% base: auto flags: - unittests diff --git a/.github/workflows/dendrite.yml b/.github/workflows/dendrite.yml index ac40f06b0..5edb1924e 100644 --- a/.github/workflows/dendrite.yml +++ b/.github/workflows/dendrite.yml @@ -28,10 +28,10 @@ jobs: runs-on: ubuntu-latest if: ${{ false }} # disable for now steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: "stable" cache: true @@ -41,7 +41,7 @@ jobs: with: node-version: 14 - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: ~/.npm key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} @@ -66,11 +66,11 @@ jobs: name: Linting runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install libolm run: sudo apt-get install libolm-dev libolm3 - name: Install Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: "stable" - name: golangci-lint @@ -102,14 +102,14 @@ jobs: --health-timeout 5s --health-retries 5 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install libolm run: sudo apt-get install libolm-dev libolm3 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: "stable" - - uses: actions/cache@v3 + - uses: actions/cache@v4 # manually set up caches, as they otherwise clash with different steps using setup-go with cache=true with: path: | @@ -141,12 +141,12 @@ jobs: goos: ["linux"] goarch: ["amd64", "386"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: "stable" - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: | ~/.cache/go-build @@ -174,12 +174,12 @@ jobs: goos: ["windows"] goarch: ["amd64"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: "stable" - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: | ~/.cache/go-build @@ -235,11 +235,11 @@ jobs: --health-timeout 5s --health-retries 5 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install libolm run: sudo apt-get install libolm-dev libolm3 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: "stable" - name: Set up gotestfmt @@ -247,7 +247,7 @@ jobs: with: # Optional: pass GITHUB_TOKEN to avoid rate limiting. token: ${{ secrets.GITHUB_TOKEN }} - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: | ~/.cache/go-build @@ -262,10 +262,11 @@ jobs: POSTGRES_PASSWORD: postgres POSTGRES_DB: dendrite - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: flags: unittests fail_ci_if_error: true + token: ${{ secrets.CODECOV_TOKEN }} # run database upgrade tests upgrade_test: @@ -274,12 +275,20 @@ jobs: needs: initial-tests-done runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: "stable" cache: true + - uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-upgrade-test-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go-upgrade-test- - name: Docker version run: docker version - name: Build upgrade-tests @@ -296,12 +305,20 @@ jobs: needs: initial-tests-done runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: "stable" cache: true + - uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-upgrade-direct-test-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go-upgrade-direct-test- - name: Docker version run: docker version - name: Build upgrade-tests @@ -340,8 +357,8 @@ jobs: SYTEST_BRANCH: ${{ github.head_ref }} CGO_ENABLED: ${{ matrix.cgo && 1 }} steps: - - uses: actions/checkout@v3 - - uses: actions/cache@v3 + - uses: actions/checkout@v4 + - uses: actions/cache@v4 with: path: | ~/.cache/go-build @@ -364,7 +381,7 @@ jobs: run: /src/are-we-synapse-yet.py /logs/results.tap -v continue-on-error: true # not fatal - name: Upload Sytest logs - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 if: ${{ always() }} with: name: Sytest Logs - ${{ job.status }} - (Dendrite, ${{ join(matrix.*, ', ') }}) @@ -404,8 +421,8 @@ jobs: run: | sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest - - name: Run actions/checkout@v3 for dendrite - uses: actions/checkout@v3 + - name: Run actions/checkout@v4 for dendrite + uses: actions/checkout@v4 with: path: dendrite diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 8d3a8d674..c795cd366 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -27,22 +27,22 @@ jobs: security-events: write # To upload Trivy sarif files steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get release tag & build flags if: github.event_name == 'release' # Only for GitHub releases run: | echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to Docker Hub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ env.DOCKER_HUB_USER }} password: ${{ secrets.DOCKER_TOKEN }} - name: Login to GitHub Containers - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} @@ -98,22 +98,22 @@ jobs: packages: write steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get release tag & build flags if: github.event_name == 'release' # Only for GitHub releases run: | echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to Docker Hub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ env.DOCKER_HUB_USER }} password: ${{ secrets.DOCKER_TOKEN }} - name: Login to GitHub Containers - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} @@ -159,22 +159,22 @@ jobs: packages: write steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get release tag & build flags if: github.event_name == 'release' # Only for GitHub releases run: | echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to Docker Hub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ env.DOCKER_HUB_USER }} password: ${{ secrets.DOCKER_TOKEN }} - name: Login to GitHub Containers - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml index 9df3cceae..30f55b7c8 100644 --- a/.github/workflows/gh-pages.yml +++ b/.github/workflows/gh-pages.yml @@ -28,7 +28,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Pages uses: actions/configure-pages@v2 - name: Build with Jekyll diff --git a/.github/workflows/helm.yml b/.github/workflows/helm.yml index 9a5eb2b62..d4772e106 100644 --- a/.github/workflows/helm.yml +++ b/.github/workflows/helm.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/k8s.yml b/.github/workflows/k8s.yml index af2750356..a49042bf2 100644 --- a/.github/workflows/k8s.yml +++ b/.github/workflows/k8s.yml @@ -17,7 +17,7 @@ jobs: outputs: changed: ${{ steps.list-changed.outputs.changed }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 - uses: azure/setup-helm@v3 @@ -48,7 +48,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 ref: ${{ inputs.checkoutCommit }} @@ -66,7 +66,7 @@ jobs: - name: Create k3d cluster uses: nolar/setup-k3d-k3s@v1 with: - version: v1.21 + version: v1.28 - name: Remove node taints run: | kubectl taint --all=true nodes node.cloudprovider.kubernetes.io/uninitialized- || true diff --git a/.github/workflows/schedules.yaml b/.github/workflows/schedules.yaml index 509861860..e339c14d3 100644 --- a/.github/workflows/schedules.yaml +++ b/.github/workflows/schedules.yaml @@ -10,8 +10,26 @@ concurrency: cancel-in-progress: true jobs: + check_date: # https://stackoverflow.com/questions/63014786/how-to-schedule-a-github-actions-nightly-build-but-run-it-only-when-there-where + runs-on: ubuntu-latest + name: Check latest commit + outputs: + should_run: ${{ steps.should_run.outputs.should_run }} + steps: + - uses: actions/checkout@v4 + - name: print latest_commit + run: echo ${{ github.sha }} + + - id: should_run + continue-on-error: true + name: check latest commit is less than a day + if: ${{ github.event_name == 'schedule' }} + run: test -z $(git rev-list --after="24 hours" ${{ github.sha }}) && echo "::set-output name=should_run::false" + # run Sytest in different variations sytest: + needs: check_date + if: ${{ needs.check_date.outputs.should_run != 'false' }} timeout-minutes: 60 name: "Sytest (${{ matrix.label }})" runs-on: ubuntu-latest @@ -38,8 +56,8 @@ jobs: RACE_DETECTION: 1 COVER: 1 steps: - - uses: actions/checkout@v3 - - uses: actions/cache@v3 + - uses: actions/checkout@v4 + - uses: actions/cache@v4 with: path: | ~/.cache/go-build @@ -62,7 +80,7 @@ jobs: run: /src/are-we-synapse-yet.py /logs/results.tap -v continue-on-error: true # not fatal - name: Upload Sytest logs - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 if: ${{ always() }} with: name: Sytest Logs - ${{ job.status }} - (Dendrite ${{ join(matrix.*, ' ') }}) @@ -75,31 +93,34 @@ jobs: timeout-minutes: 5 name: "Sytest Coverage" runs-on: ubuntu-latest - needs: sytest # only run once Sytest is done - if: ${{ always() }} + needs: [ sytest, check_date ] # only run once Sytest is done and there was a commit + if: ${{ always() && needs.check_date.outputs.should_run != 'false' }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: 'stable' cache: true - name: Download all artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 - name: Collect coverage run: | go tool covdata textfmt -i="$(find Sytest* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" -o sytest.cov grep -Ev 'relayapi|setup/mscs|api_trace' sytest.cov > final.cov go tool covdata func -i="$(find Sytest* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: files: ./final.cov flags: sytest fail_ci_if_error: true + token: ${{ secrets.CODECOV_TOKEN }} # run Complement complement: + needs: check_date + if: ${{ needs.check_date.outputs.should_run != 'false' }} name: "Complement (${{ matrix.label }})" timeout-minutes: 60 runs-on: ubuntu-latest @@ -129,8 +150,8 @@ jobs: run: | sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest - - name: Run actions/checkout@v3 for dendrite - uses: actions/checkout@v3 + - name: Run actions/checkout@v4 for dendrite + uses: actions/checkout@v4 with: path: dendrite @@ -174,7 +195,7 @@ jobs: # Run Complement - run: | set -o pipefail && - go test -v -json -tags dendrite_blacklist ./tests/... 2>&1 | gotestfmt + go test -v -json -tags dendrite_blacklist ./tests ./tests/csapi 2>&1 | gotestfmt -hide all shell: bash name: Run Complement Tests env: @@ -185,7 +206,7 @@ jobs: working-directory: complement - name: Upload Complement logs - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 if: ${{ always() }} with: name: Complement Logs - (Dendrite ${{ join(matrix.*, ' ') }}) @@ -196,30 +217,32 @@ jobs: timeout-minutes: 5 name: "Complement Coverage" runs-on: ubuntu-latest - needs: complement # only run once Complement is done - if: ${{ always() }} + needs: [ complement, check_date ] # only run once Complements is done and there was a commit + if: ${{ always() && needs.check_date.outputs.should_run != 'false' }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: 'stable' cache: true - name: Download all artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 - name: Collect coverage run: | go tool covdata textfmt -i="$(find Complement* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" -o complement.cov grep -Ev 'relayapi|setup/mscs|api_trace' complement.cov > final.cov go tool covdata func -i="$(find Complement* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: files: ./final.cov flags: complement fail_ci_if_error: true + token: ${{ secrets.CODECOV_TOKEN }} # required element-web: + if: ${{ false }} # disable for now, as Cypress has been replaced by Playwright timeout-minutes: 120 runs-on: ubuntu-latest steps: @@ -228,7 +251,7 @@ jobs: # Our test suite includes some screenshot tests with unusual diacritics, which are # supposed to be covered by STIXGeneral. tools: fonts-stix - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: repository: matrix-org/matrix-react-sdk - uses: actions/setup-node@v3 @@ -259,6 +282,7 @@ jobs: TMPDIR: ${{ runner.temp }} element-web-pinecone: + if: ${{ false }} # disable for now, as Cypress has been replaced by Playwright timeout-minutes: 120 runs-on: ubuntu-latest steps: @@ -267,7 +291,7 @@ jobs: # Our test suite includes some screenshot tests with unusual diacritics, which are # supposed to be covered by STIXGeneral. tools: fonts-stix - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: repository: matrix-org/matrix-react-sdk - uses: actions/setup-node@v3 diff --git a/CHANGES.md b/CHANGES.md index 97ec7bec4..c41025e91 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,26 @@ # Changelog +## Dendrite 0.13.6 (2024-01-26) + +Upgrading to this version is **highly** recommended, as it contains several QoL improvements. + +### Fixes + +- Use `AckExplicitPolicy` for JetStream consumers, so messages don't pile up in NATS +- A rare panic when assigning a state key NID has been fixed +- A rare panic when checking powerlevels has been fixed +- Notary keys requests for all keys now work correctly +- Spec compliance: + - Return `M_INVALID_PARAM` when querying room aliases + - Handle empty `from` parameter when requesting `/messages` + - Add CORP headers on media endpoints + - Remove `aliases` from `/publicRooms` responses + - Allow `+` in MXIDs (Contributed by [RosstheRoss](https://github.com/RosstheRoss)) +- Fixes membership transitions from `knock` to `join` in `knock_restricted` rooms +- Incremental syncs now batch querying events (Contributed by [recht](https://github.com/recht)) +- Move `/joined_members` back to the clientAPI/roomserver, which should make bridges happier again +- Backfilling from other servers now only uses at max 100 events instead of potentially thousands + ## Dendrite 0.13.5 (2023-12-12) Upgrading to this version is **highly** recommended, as it fixes several long-standing bugs in diff --git a/appservice/api/query.go b/appservice/api/query.go index 472266d9e..8e159152e 100644 --- a/appservice/api/query.go +++ b/appservice/api/query.go @@ -82,9 +82,17 @@ type UserIDExistsResponse struct { } const ( - ASProtocolPath = "/_matrix/app/unstable/thirdparty/protocol/" - ASUserPath = "/_matrix/app/unstable/thirdparty/user" - ASLocationPath = "/_matrix/app/unstable/thirdparty/location" + ASProtocolLegacyPath = "/_matrix/app/unstable/thirdparty/protocol/" + ASUserLegacyPath = "/_matrix/app/unstable/thirdparty/user" + ASLocationLegacyPath = "/_matrix/app/unstable/thirdparty/location" + ASRoomAliasExistsLegacyPath = "/rooms/" + ASUserExistsLegacyPath = "/users/" + + ASProtocolPath = "/_matrix/app/v1/thirdparty/protocol/" + ASUserPath = "/_matrix/app/v1/thirdparty/user" + ASLocationPath = "/_matrix/app/v1/thirdparty/location" + ASRoomAliasExistsPath = "/_matrix/app/v1/rooms/" + ASUserExistsPath = "/_matrix/app/v1/users/" ) type ProtocolRequest struct { diff --git a/appservice/consumers/roomserver.go b/appservice/consumers/roomserver.go index b7fc1f698..b07b24fcc 100644 --- a/appservice/consumers/roomserver.go +++ b/appservice/consumers/roomserver.go @@ -206,13 +206,21 @@ func (s *OutputRoomEventConsumer) sendEvents( } // Send the transaction to the appservice. - // https://matrix.org/docs/spec/application_service/r0.1.2#put-matrix-app-v1-transactions-txnid - address := fmt.Sprintf("%s/transactions/%s?access_token=%s", state.RequestUrl(), txnID, url.QueryEscape(state.HSToken)) + // https://spec.matrix.org/v1.9/application-service-api/#pushing-events + path := "_matrix/app/v1/transactions" + if s.cfg.LegacyPaths { + path = "transactions" + } + address := fmt.Sprintf("%s/%s/%s", state.RequestUrl(), path, txnID) + if s.cfg.LegacyAuth { + address += "?access_token=" + url.QueryEscape(state.HSToken) + } req, err := http.NewRequestWithContext(ctx, "PUT", address, bytes.NewBuffer(transaction)) if err != nil { return err } req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", state.HSToken)) resp, err := state.HTTPClient.Do(req) if err != nil { return state.backoffAndPause(err) diff --git a/appservice/query/query.go b/appservice/query/query.go index 5c736f379..7f33e17f8 100644 --- a/appservice/query/query.go +++ b/appservice/query/query.go @@ -19,10 +19,10 @@ package query import ( "context" "encoding/json" + "fmt" "io" "net/http" "net/url" - "strings" "sync" log "github.com/sirupsen/logrus" @@ -32,9 +32,6 @@ import ( "github.com/matrix-org/dendrite/setup/config" ) -const roomAliasExistsPath = "/rooms/" -const userIDExistsPath = "/users/" - // AppServiceQueryAPI is an implementation of api.AppServiceQueryAPI type AppServiceQueryAPI struct { Cfg *config.AppServiceAPI @@ -55,14 +52,23 @@ func (a *AppServiceQueryAPI) RoomAliasExists( // Determine which application service should handle this request for _, appservice := range a.Cfg.Derived.ApplicationServices { if appservice.URL != "" && appservice.IsInterestedInRoomAlias(request.Alias) { + path := api.ASRoomAliasExistsPath + if a.Cfg.LegacyPaths { + path = api.ASRoomAliasExistsLegacyPath + } // The full path to the rooms API, includes hs token - URL, err := url.Parse(appservice.RequestUrl() + roomAliasExistsPath) + URL, err := url.Parse(appservice.RequestUrl() + path) if err != nil { return err } URL.Path += request.Alias - apiURL := URL.String() + "?access_token=" + appservice.HSToken + if a.Cfg.LegacyAuth { + q := URL.Query() + q.Set("access_token", appservice.HSToken) + URL.RawQuery = q.Encode() + } + apiURL := URL.String() // Send a request to each application service. If one responds that it has // created the room, immediately return. @@ -70,6 +76,7 @@ func (a *AppServiceQueryAPI) RoomAliasExists( if err != nil { return err } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", appservice.HSToken)) req = req.WithContext(ctx) resp, err := appservice.HTTPClient.Do(req) @@ -123,12 +130,21 @@ func (a *AppServiceQueryAPI) UserIDExists( for _, appservice := range a.Cfg.Derived.ApplicationServices { if appservice.URL != "" && appservice.IsInterestedInUserID(request.UserID) { // The full path to the rooms API, includes hs token - URL, err := url.Parse(appservice.RequestUrl() + userIDExistsPath) + path := api.ASUserExistsPath + if a.Cfg.LegacyPaths { + path = api.ASUserExistsLegacyPath + } + URL, err := url.Parse(appservice.RequestUrl() + path) if err != nil { return err } URL.Path += request.UserID - apiURL := URL.String() + "?access_token=" + appservice.HSToken + if a.Cfg.LegacyAuth { + q := URL.Query() + q.Set("access_token", appservice.HSToken) + URL.RawQuery = q.Encode() + } + apiURL := URL.String() // Send a request to each application service. If one responds that it has // created the user, immediately return. @@ -136,6 +152,7 @@ func (a *AppServiceQueryAPI) UserIDExists( if err != nil { return err } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", appservice.HSToken)) resp, err := appservice.HTTPClient.Do(req.WithContext(ctx)) if resp != nil { defer func() { @@ -176,25 +193,22 @@ type thirdpartyResponses interface { api.ASProtocolResponse | []api.ASUserResponse | []api.ASLocationResponse } -func requestDo[T thirdpartyResponses](client *http.Client, url string, response *T) (err error) { - origURL := url - // try v1 and unstable appservice endpoints - for _, version := range []string{"v1", "unstable"} { - var resp *http.Response - var body []byte - asURL := strings.Replace(origURL, "unstable", version, 1) - resp, err = client.Get(asURL) - if err != nil { - continue - } - defer resp.Body.Close() // nolint: errcheck - body, err = io.ReadAll(resp.Body) - if err != nil { - continue - } - return json.Unmarshal(body, &response) +func requestDo[T thirdpartyResponses](as *config.ApplicationService, url string, response *T) error { + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return err } - return err + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", as.HSToken)) + resp, err := as.HTTPClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() // nolint: errcheck + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + return json.Unmarshal(body, &response) } func (a *AppServiceQueryAPI) Locations( @@ -207,16 +221,22 @@ func (a *AppServiceQueryAPI) Locations( return err } + path := api.ASLocationPath + if a.Cfg.LegacyPaths { + path = api.ASLocationLegacyPath + } for _, as := range a.Cfg.Derived.ApplicationServices { var asLocations []api.ASLocationResponse - params.Set("access_token", as.HSToken) + if a.Cfg.LegacyAuth { + params.Set("access_token", as.HSToken) + } - url := as.RequestUrl() + api.ASLocationPath + url := as.RequestUrl() + path if req.Protocol != "" { url += "/" + req.Protocol } - if err := requestDo[[]api.ASLocationResponse](as.HTTPClient, url+"?"+params.Encode(), &asLocations); err != nil { + if err := requestDo[[]api.ASLocationResponse](&as, url+"?"+params.Encode(), &asLocations); err != nil { log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'locations' from application service") continue } @@ -242,16 +262,22 @@ func (a *AppServiceQueryAPI) User( return err } + path := api.ASUserPath + if a.Cfg.LegacyPaths { + path = api.ASUserLegacyPath + } for _, as := range a.Cfg.Derived.ApplicationServices { var asUsers []api.ASUserResponse - params.Set("access_token", as.HSToken) + if a.Cfg.LegacyAuth { + params.Set("access_token", as.HSToken) + } - url := as.RequestUrl() + api.ASUserPath + url := as.RequestUrl() + path if req.Protocol != "" { url += "/" + req.Protocol } - if err := requestDo[[]api.ASUserResponse](as.HTTPClient, url+"?"+params.Encode(), &asUsers); err != nil { + if err := requestDo[[]api.ASUserResponse](&as, url+"?"+params.Encode(), &asUsers); err != nil { log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'user' from application service") continue } @@ -272,6 +298,10 @@ func (a *AppServiceQueryAPI) Protocols( req *api.ProtocolRequest, resp *api.ProtocolResponse, ) error { + protocolPath := api.ASProtocolPath + if a.Cfg.LegacyPaths { + protocolPath = api.ASProtocolLegacyPath + } // get a single protocol response if req.Protocol != "" { @@ -289,7 +319,7 @@ func (a *AppServiceQueryAPI) Protocols( response := api.ASProtocolResponse{} for _, as := range a.Cfg.Derived.ApplicationServices { var proto api.ASProtocolResponse - if err := requestDo[api.ASProtocolResponse](as.HTTPClient, as.RequestUrl()+api.ASProtocolPath+req.Protocol, &proto); err != nil { + if err := requestDo[api.ASProtocolResponse](&as, as.RequestUrl()+protocolPath+req.Protocol, &proto); err != nil { log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'protocol' from application service") continue } @@ -319,7 +349,7 @@ func (a *AppServiceQueryAPI) Protocols( for _, as := range a.Cfg.Derived.ApplicationServices { for _, p := range as.Protocols { var proto api.ASProtocolResponse - if err := requestDo[api.ASProtocolResponse](as.HTTPClient, as.RequestUrl()+api.ASProtocolPath+p, &proto); err != nil { + if err := requestDo[api.ASProtocolResponse](&as, as.RequestUrl()+protocolPath+p, &proto); err != nil { log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'protocol' from application service") continue } diff --git a/clientapi/clientapi_test.go b/clientapi/clientapi_test.go index 2ff4b6503..fffe4b6b8 100644 --- a/clientapi/clientapi_test.go +++ b/clientapi/clientapi_test.go @@ -958,7 +958,8 @@ func TestCapabilities(t *testing.T) { cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions) // Needed to create accounts - rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, nil, caching.DisableMetrics) + caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics) + rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics) rsAPI.SetFederationAPI(nil, nil) userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff) // We mostly need the rsAPI/userAPI for this test, so nil for other APIs etc. @@ -1005,7 +1006,8 @@ func TestTurnserver(t *testing.T) { cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions) // Needed to create accounts - rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, nil, caching.DisableMetrics) + caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics) + rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics) rsAPI.SetFederationAPI(nil, nil) userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff) //rsAPI.SetUserAPI(userAPI) @@ -1103,7 +1105,8 @@ func Test3PID(t *testing.T) { cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions) // Needed to create accounts - rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, nil, caching.DisableMetrics) + caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics) + rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics) rsAPI.SetFederationAPI(nil, nil) userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff) // We mostly need the rsAPI/userAPI for this test, so nil for other APIs etc. @@ -2151,3 +2154,195 @@ func TestKeyBackup(t *testing.T) { } }) } + +func TestGetMembership(t *testing.T) { + alice := test.NewUser(t) + bob := test.NewUser(t) + + testCases := []struct { + name string + roomID string + user *test.User + additionalEvents func(t *testing.T, room *test.Room) + request func(t *testing.T, room *test.Room, accessToken string) *http.Request + wantOK bool + wantMemberCount int + }{ + + { + name: "/joined_members - Bob never joined", + user: bob, + request: func(t *testing.T, room *test.Room, accessToken string) *http.Request { + return test.NewRequest(t, "GET", fmt.Sprintf("/_matrix/client/v3/rooms/%s/joined_members", room.ID), test.WithQueryParams(map[string]string{ + "access_token": accessToken, + })) + }, + wantOK: false, + }, + { + name: "/joined_members - Alice joined", + user: alice, + request: func(t *testing.T, room *test.Room, accessToken string) *http.Request { + return test.NewRequest(t, "GET", fmt.Sprintf("/_matrix/client/v3/rooms/%s/joined_members", room.ID), test.WithQueryParams(map[string]string{ + "access_token": accessToken, + })) + }, + wantOK: true, + wantMemberCount: 1, + }, + { + name: "/joined_members - Alice leaves, shouldn't be able to see members ", + user: alice, + request: func(t *testing.T, room *test.Room, accessToken string) *http.Request { + return test.NewRequest(t, "GET", fmt.Sprintf("/_matrix/client/v3/rooms/%s/joined_members", room.ID), test.WithQueryParams(map[string]string{ + "access_token": accessToken, + })) + }, + additionalEvents: func(t *testing.T, room *test.Room) { + room.CreateAndInsert(t, alice, spec.MRoomMember, map[string]interface{}{ + "membership": "leave", + }, test.WithStateKey(alice.ID)) + }, + wantOK: false, + }, + { + name: "/joined_members - Bob joins, Alice sees two members", + user: alice, + request: func(t *testing.T, room *test.Room, accessToken string) *http.Request { + return test.NewRequest(t, "GET", fmt.Sprintf("/_matrix/client/v3/rooms/%s/joined_members", room.ID), test.WithQueryParams(map[string]string{ + "access_token": accessToken, + })) + }, + additionalEvents: func(t *testing.T, room *test.Room) { + room.CreateAndInsert(t, bob, spec.MRoomMember, map[string]interface{}{ + "membership": "join", + }, test.WithStateKey(bob.ID)) + }, + wantOK: true, + wantMemberCount: 2, + }, + } + + test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) { + + cfg, processCtx, close := testrig.CreateConfig(t, dbType) + routers := httputil.NewRouters() + cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions) + caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics) + defer close() + natsInstance := jetstream.NATSInstance{} + jsctx, _ := natsInstance.Prepare(processCtx, &cfg.Global.JetStream) + defer jetstream.DeleteAllStreams(jsctx, &cfg.Global.JetStream) + + // Use an actual roomserver for this + rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics) + rsAPI.SetFederationAPI(nil, nil) + userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff) + + // We mostly need the rsAPI for this test, so nil for other APIs/caches etc. + AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics) + + accessTokens := map[*test.User]userDevice{ + alice: {}, + bob: {}, + } + createAccessTokens(t, accessTokens, userAPI, processCtx.Context(), routers) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + room := test.NewRoom(t, alice) + t.Cleanup(func() { + t.Logf("running cleanup for %s", tc.name) + }) + // inject additional events + if tc.additionalEvents != nil { + tc.additionalEvents(t, room) + } + if err := api.SendEvents(context.Background(), rsAPI, api.KindNew, room.Events(), "test", "test", "test", nil, false); err != nil { + t.Fatalf("failed to send events: %v", err) + } + + w := httptest.NewRecorder() + routers.Client.ServeHTTP(w, tc.request(t, room, accessTokens[tc.user].accessToken)) + if w.Code != 200 && tc.wantOK { + t.Logf("%s", w.Body.String()) + t.Fatalf("got HTTP %d want %d", w.Code, 200) + } + t.Logf("[%s] Resp: %s", tc.name, w.Body.String()) + + // check we got the expected events + if tc.wantOK { + memberCount := len(gjson.GetBytes(w.Body.Bytes(), "joined").Map()) + if memberCount != tc.wantMemberCount { + t.Fatalf("expected %d members, got %d", tc.wantMemberCount, memberCount) + } + } + }) + } + }) +} + +func TestCreateRoomInvite(t *testing.T) { + alice := test.NewUser(t) + bob := test.NewUser(t) + + test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) { + + cfg, processCtx, close := testrig.CreateConfig(t, dbType) + routers := httputil.NewRouters() + cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions) + caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics) + defer close() + natsInstance := jetstream.NATSInstance{} + jsctx, _ := natsInstance.Prepare(processCtx, &cfg.Global.JetStream) + defer jetstream.DeleteAllStreams(jsctx, &cfg.Global.JetStream) + + // Use an actual roomserver for this + rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics) + rsAPI.SetFederationAPI(nil, nil) + userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff) + + // We mostly need the rsAPI for this test, so nil for other APIs/caches etc. + AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics) + + accessTokens := map[*test.User]userDevice{ + alice: {}, + } + createAccessTokens(t, accessTokens, userAPI, processCtx.Context(), routers) + + reqBody := map[string]any{ + "invite": []string{bob.ID}, + } + body, err := json.Marshal(reqBody) + if err != nil { + t.Fatal(err) + } + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/_matrix/client/v3/createRoom", strings.NewReader(string(body))) + req.Header.Set("Authorization", "Bearer "+accessTokens[alice].accessToken) + + routers.Client.ServeHTTP(w, req) + + if w.Code != http.StatusOK { + t.Fatalf("expected room creation to be successful, got HTTP %d instead: %s", w.Code, w.Body.String()) + } + + roomID := gjson.GetBytes(w.Body.Bytes(), "room_id").Str + validRoomID, _ := spec.NewRoomID(roomID) + // Now ask the roomserver about the membership event of Bob + ev, err := rsAPI.CurrentStateEvent(context.Background(), *validRoomID, spec.MRoomMember, bob.ID) + if err != nil { + t.Fatal(err) + } + + if ev == nil { + t.Fatal("Membership event for Bob does not exist") + } + + // Validate that there is NO displayname in content + if gjson.GetBytes(ev.Content(), "displayname").Exists() { + t.Fatal("Found displayname in invite") + } + }) +} diff --git a/clientapi/routing/directory.go b/clientapi/routing/directory.go index 907727662..9466f583f 100644 --- a/clientapi/routing/directory.go +++ b/clientapi/routing/directory.go @@ -55,7 +55,7 @@ func DirectoryRoom( if err != nil { return util.JSONResponse{ Code: http.StatusBadRequest, - JSON: spec.BadJSON("Room alias must be in the form '#localpart:domain'"), + JSON: spec.InvalidParam("Room alias must be in the form '#localpart:domain'"), } } @@ -134,7 +134,7 @@ func SetLocalAlias( if err != nil { return util.JSONResponse{ Code: http.StatusBadRequest, - JSON: spec.BadJSON("Room alias must be in the form '#localpart:domain'"), + JSON: spec.InvalidParam("Room alias must be in the form '#localpart:domain'"), } } diff --git a/clientapi/routing/keys.go b/clientapi/routing/keys.go index 72785cda8..871b8b08e 100644 --- a/clientapi/routing/keys.go +++ b/clientapi/routing/keys.go @@ -93,7 +93,6 @@ func UploadKeys(req *http.Request, keyAPI api.ClientKeyAPI, device *api.Device) type queryKeysRequest struct { Timeout int `json:"timeout"` - Token string `json:"token"` DeviceKeys map[string][]string `json:"device_keys"` } @@ -119,7 +118,6 @@ func QueryKeys(req *http.Request, keyAPI api.ClientKeyAPI, device *api.Device) u UserID: device.UserID, UserToDevices: r.DeviceKeys, Timeout: r.GetTimeout(), - // TODO: Token? }, &queryRes) return util.JSONResponse{ Code: 200, diff --git a/clientapi/routing/membership.go b/clientapi/routing/membership.go index 06683c47d..9e41a3794 100644 --- a/clientapi/routing/membership.go +++ b/clientapi/routing/membership.go @@ -324,19 +324,18 @@ func SendInvite( } // We already received the return value, so no need to check for an error here. - response, _ := sendInvite(req.Context(), profileAPI, device, roomID, body.UserID, body.Reason, cfg, rsAPI, asAPI, evTime) + response, _ := sendInvite(req.Context(), device, roomID, body.UserID, body.Reason, cfg, rsAPI, evTime) return response } // sendInvite sends an invitation to a user. Returns a JSONResponse and an error func sendInvite( ctx context.Context, - profileAPI userapi.ClientUserAPI, device *userapi.Device, roomID, userID, reason string, cfg *config.ClientAPI, rsAPI roomserverAPI.ClientRoomserverAPI, - asAPI appserviceAPI.AppServiceInternalAPI, evTime time.Time, + evTime time.Time, ) (util.JSONResponse, error) { validRoomID, err := spec.NewRoomID(roomID) if err != nil { @@ -359,13 +358,7 @@ func sendInvite( JSON: spec.InvalidParam("UserID is invalid"), }, err } - profile, err := loadProfile(ctx, userID, cfg, profileAPI, asAPI) - if err != nil { - return util.JSONResponse{ - Code: http.StatusInternalServerError, - JSON: spec.InternalServerError{}, - }, err - } + identity, err := cfg.Matrix.SigningIdentityFor(device.UserDomain()) if err != nil { return util.JSONResponse{ @@ -375,16 +368,14 @@ func sendInvite( } err = rsAPI.PerformInvite(ctx, &api.PerformInviteRequest{ InviteInput: roomserverAPI.InviteInput{ - RoomID: *validRoomID, - Inviter: *inviter, - Invitee: *invitee, - DisplayName: profile.DisplayName, - AvatarURL: profile.AvatarURL, - Reason: reason, - IsDirect: false, - KeyID: identity.KeyID, - PrivateKey: identity.PrivateKey, - EventTime: evTime, + RoomID: *validRoomID, + Inviter: *inviter, + Invitee: *invitee, + Reason: reason, + IsDirect: false, + KeyID: identity.KeyID, + PrivateKey: identity.PrivateKey, + EventTime: evTime, }, InviteRoomState: nil, // ask the roomserver to draw up invite room state for us SendAsServer: string(device.UserDomain()), diff --git a/clientapi/routing/memberships.go b/clientapi/routing/memberships.go new file mode 100644 index 000000000..84be498d6 --- /dev/null +++ b/clientapi/routing/memberships.go @@ -0,0 +1,139 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package routing + +import ( + "encoding/json" + "net/http" + + "github.com/matrix-org/dendrite/roomserver/api" + userapi "github.com/matrix-org/dendrite/userapi/api" + "github.com/matrix-org/gomatrixserverlib/spec" + "github.com/matrix-org/util" +) + +// https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-rooms-roomid-joined-members +type getJoinedMembersResponse struct { + Joined map[string]joinedMember `json:"joined"` +} + +type joinedMember struct { + DisplayName string `json:"display_name"` + AvatarURL string `json:"avatar_url"` +} + +// The database stores 'displayname' without an underscore. +// Deserialize into this and then change to the actual API response +type databaseJoinedMember struct { + DisplayName string `json:"displayname"` + AvatarURL string `json:"avatar_url"` +} + +// GetJoinedMembers implements +// +// GET /rooms/{roomId}/joined_members +func GetJoinedMembers( + req *http.Request, device *userapi.Device, roomID string, + rsAPI api.ClientRoomserverAPI, +) util.JSONResponse { + // Validate the userID + userID, err := spec.NewUserID(device.UserID, true) + if err != nil { + return util.JSONResponse{ + Code: http.StatusBadRequest, + JSON: spec.InvalidParam("Device UserID is invalid"), + } + } + + // Validate the roomID + validRoomID, err := spec.NewRoomID(roomID) + if err != nil { + return util.JSONResponse{ + Code: http.StatusBadRequest, + JSON: spec.InvalidParam("RoomID is invalid"), + } + } + + // Get the current memberships for the requesting user to determine + // if they are allowed to query this endpoint. + queryReq := api.QueryMembershipForUserRequest{ + RoomID: validRoomID.String(), + UserID: *userID, + } + + var queryRes api.QueryMembershipForUserResponse + if queryErr := rsAPI.QueryMembershipForUser(req.Context(), &queryReq, &queryRes); queryErr != nil { + util.GetLogger(req.Context()).WithError(queryErr).Error("rsAPI.QueryMembershipsForRoom failed") + return util.JSONResponse{ + Code: http.StatusInternalServerError, + JSON: spec.InternalServerError{}, + } + } + + if !queryRes.HasBeenInRoom { + return util.JSONResponse{ + Code: http.StatusForbidden, + JSON: spec.Forbidden("You aren't a member of the room and weren't previously a member of the room."), + } + } + + if !queryRes.IsInRoom { + return util.JSONResponse{ + Code: http.StatusForbidden, + JSON: spec.Forbidden("You aren't a member of the room and weren't previously a member of the room."), + } + } + + // Get the current membership events + var membershipsForRoomResp api.QueryMembershipsForRoomResponse + if err = rsAPI.QueryMembershipsForRoom(req.Context(), &api.QueryMembershipsForRoomRequest{ + JoinedOnly: true, + RoomID: validRoomID.String(), + }, &membershipsForRoomResp); err != nil { + util.GetLogger(req.Context()).WithError(err).Error("rsAPI.QueryEventsByID failed") + return util.JSONResponse{ + Code: http.StatusInternalServerError, + JSON: spec.InternalServerError{}, + } + } + + var res getJoinedMembersResponse + res.Joined = make(map[string]joinedMember) + for _, ev := range membershipsForRoomResp.JoinEvents { + var content databaseJoinedMember + if err := json.Unmarshal(ev.Content, &content); err != nil { + util.GetLogger(req.Context()).WithError(err).Error("failed to unmarshal event content") + return util.JSONResponse{ + Code: http.StatusInternalServerError, + JSON: spec.InternalServerError{}, + } + } + + userID, err := rsAPI.QueryUserIDForSender(req.Context(), *validRoomID, spec.SenderID(ev.Sender)) + if err != nil || userID == nil { + util.GetLogger(req.Context()).WithError(err).Error("rsAPI.QueryUserIDForSender failed") + return util.JSONResponse{ + Code: http.StatusInternalServerError, + JSON: spec.InternalServerError{}, + } + } + + res.Joined[userID.String()] = joinedMember(content) + } + return util.JSONResponse{ + Code: http.StatusOK, + JSON: res, + } +} diff --git a/clientapi/routing/register.go b/clientapi/routing/register.go index 558418a6f..5235e9092 100644 --- a/clientapi/routing/register.go +++ b/clientapi/routing/register.go @@ -630,6 +630,7 @@ func handleGuestRegistration( AccessToken: token, IPAddr: req.RemoteAddr, UserAgent: req.UserAgent(), + FromRegistration: true, }, &devRes) if err != nil { return util.JSONResponse{ @@ -919,6 +920,7 @@ func completeRegistration( DeviceID: deviceID, IPAddr: ipAddr, UserAgent: userAgent, + FromRegistration: true, }, &devRes) if err != nil { return util.JSONResponse{ diff --git a/clientapi/routing/routing.go b/clientapi/routing/routing.go index d4aa1d08d..3e23ab405 100644 --- a/clientapi/routing/routing.go +++ b/clientapi/routing/routing.go @@ -1513,4 +1513,14 @@ func Setup( return GetPresence(req, device, natsClient, cfg.Matrix.JetStream.Prefixed(jetstream.RequestPresence), vars["userId"]) }), ).Methods(http.MethodGet, http.MethodOptions) + + v3mux.Handle("/rooms/{roomID}/joined_members", + httputil.MakeAuthAPI("rooms_members", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse { + vars, err := httputil.URLDecodeMapValues(mux.Vars(req)) + if err != nil { + return util.ErrorResponse(err) + } + return GetJoinedMembers(req, device, vars["roomID"], rsAPI) + }), + ).Methods(http.MethodGet, http.MethodOptions) } diff --git a/clientapi/routing/server_notices.go b/clientapi/routing/server_notices.go index 5deb559df..d4644b3e5 100644 --- a/clientapi/routing/server_notices.go +++ b/clientapi/routing/server_notices.go @@ -215,7 +215,7 @@ func SendServerNotice( } if !membershipRes.IsInRoom { // re-invite the user - res, err := sendInvite(ctx, userAPI, senderDevice, roomID, r.UserID, "Server notice room", cfgClient, rsAPI, asAPI, time.Now()) + res, err := sendInvite(ctx, senderDevice, roomID, r.UserID, "Server notice room", cfgClient, rsAPI, time.Now()) if err != nil { return res } diff --git a/dendrite-sample.yaml b/dendrite-sample.yaml index e143a7398..8616e120e 100644 --- a/dendrite-sample.yaml +++ b/dendrite-sample.yaml @@ -154,6 +154,13 @@ app_service_api: # to be sent to an insecure endpoint. disable_tls_validation: false + # Send the access_token query parameter with appservice requests in addition + # to the Authorization header. This can cause hs_tokens to be saved to logs, + # so it should not be enabled unless absolutely necessary. + legacy_auth: false + # Use the legacy unprefixed paths for appservice requests. + legacy_paths: false + # Appservice configuration files to load into this homeserver. config_files: # - /path/to/appservice_registration.yaml diff --git a/docs/FAQ.md b/docs/FAQ.md index 82b1581ea..2ef9e6c2b 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -117,6 +117,7 @@ The list of files that need to be stored is: - matrix-key.pem - dendrite.yaml - the postgres or sqlite DB +- the jetstream directory - the media store - the search index (although this can be regenerated) diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock index 195f60c6f..791d0f857 100644 --- a/docs/Gemfile.lock +++ b/docs/Gemfile.lock @@ -231,9 +231,9 @@ GEM jekyll-seo-tag (~> 2.1) minitest (5.17.0) multipart-post (2.1.1) - nokogiri (1.14.3-arm64-darwin) + nokogiri (1.16.2-arm64-darwin) racc (~> 1.4) - nokogiri (1.14.3-x86_64-linux) + nokogiri (1.16.2-x86_64-linux) racc (~> 1.4) octokit (4.22.0) faraday (>= 0.9) @@ -241,7 +241,7 @@ GEM pathutil (0.16.2) forwardable-extended (~> 2.6) public_suffix (4.0.7) - racc (1.6.2) + racc (1.7.3) rb-fsevent (0.11.1) rb-inotify (0.10.1) ffi (~> 1.0) diff --git a/federationapi/federationapi.go b/federationapi/federationapi.go index efbfa3315..d3730c7ca 100644 --- a/federationapi/federationapi.go +++ b/federationapi/federationapi.go @@ -113,10 +113,7 @@ func NewInternalAPI( _ = federationDB.RemoveAllServersFromBlacklist() } - stats := statistics.NewStatistics( - federationDB, - cfg.FederationMaxRetries+1, - cfg.P2PFederationRetriesUntilAssumedOffline+1) + stats := statistics.NewStatistics(federationDB, cfg.FederationMaxRetries+1, cfg.P2PFederationRetriesUntilAssumedOffline+1, cfg.EnableRelays) js, nats := natsInstance.Prepare(processContext, &cfg.Matrix.JetStream) diff --git a/federationapi/federationapi_test.go b/federationapi/federationapi_test.go index 1ea8c40ea..79f4b3f21 100644 --- a/federationapi/federationapi_test.go +++ b/federationapi/federationapi_test.go @@ -5,11 +5,14 @@ import ( "crypto/ed25519" "encoding/json" "fmt" + "net/http" + "net/http/httptest" "strings" "sync" "testing" "time" + "github.com/matrix-org/dendrite/federationapi/routing" "github.com/matrix-org/dendrite/internal/caching" "github.com/matrix-org/dendrite/internal/httputil" "github.com/matrix-org/dendrite/internal/sqlutil" @@ -17,7 +20,10 @@ import ( "github.com/matrix-org/gomatrixserverlib" "github.com/matrix-org/gomatrixserverlib/fclient" "github.com/matrix-org/gomatrixserverlib/spec" + "github.com/matrix-org/util" "github.com/nats-io/nats.go" + "github.com/stretchr/testify/assert" + "github.com/tidwall/gjson" "github.com/matrix-org/dendrite/federationapi" "github.com/matrix-org/dendrite/federationapi/api" @@ -362,3 +368,126 @@ func TestRoomsV3URLEscapeDoNot404(t *testing.T) { } } } + +func TestNotaryServer(t *testing.T) { + testCases := []struct { + name string + httpBody string + pubKeyRequest *gomatrixserverlib.PublicKeyNotaryLookupRequest + validateFunc func(t *testing.T, response util.JSONResponse) + }{ + { + name: "empty httpBody", + validateFunc: func(t *testing.T, resp util.JSONResponse) { + assert.Equal(t, http.StatusBadRequest, resp.Code) + nk, ok := resp.JSON.(spec.MatrixError) + assert.True(t, ok) + assert.Equal(t, spec.ErrorBadJSON, nk.ErrCode) + }, + }, + { + name: "valid but empty httpBody", + httpBody: "{}", + validateFunc: func(t *testing.T, resp util.JSONResponse) { + want := util.JSONResponse{ + Code: http.StatusOK, + JSON: routing.NotaryKeysResponse{ServerKeys: []json.RawMessage{}}, + } + assert.Equal(t, want, resp) + }, + }, + { + name: "request all keys using an empty criteria", + httpBody: `{"server_keys":{"servera":{}}}`, + validateFunc: func(t *testing.T, resp util.JSONResponse) { + assert.Equal(t, http.StatusOK, resp.Code) + nk, ok := resp.JSON.(routing.NotaryKeysResponse) + assert.True(t, ok) + assert.Equal(t, "servera", gjson.GetBytes(nk.ServerKeys[0], "server_name").Str) + assert.True(t, gjson.GetBytes(nk.ServerKeys[0], "verify_keys.ed25519:someID").Exists()) + }, + }, + { + name: "request all keys using null as the criteria", + httpBody: `{"server_keys":{"servera":null}}`, + validateFunc: func(t *testing.T, resp util.JSONResponse) { + assert.Equal(t, http.StatusOK, resp.Code) + nk, ok := resp.JSON.(routing.NotaryKeysResponse) + assert.True(t, ok) + assert.Equal(t, "servera", gjson.GetBytes(nk.ServerKeys[0], "server_name").Str) + assert.True(t, gjson.GetBytes(nk.ServerKeys[0], "verify_keys.ed25519:someID").Exists()) + }, + }, + { + name: "request specific key", + httpBody: `{"server_keys":{"servera":{"ed25519:someID":{}}}}`, + validateFunc: func(t *testing.T, resp util.JSONResponse) { + assert.Equal(t, http.StatusOK, resp.Code) + nk, ok := resp.JSON.(routing.NotaryKeysResponse) + assert.True(t, ok) + assert.Equal(t, "servera", gjson.GetBytes(nk.ServerKeys[0], "server_name").Str) + assert.True(t, gjson.GetBytes(nk.ServerKeys[0], "verify_keys.ed25519:someID").Exists()) + }, + }, + { + name: "request multiple servers", + httpBody: `{"server_keys":{"servera":{"ed25519:someID":{}},"serverb":{"ed25519:someID":{}}}}`, + validateFunc: func(t *testing.T, resp util.JSONResponse) { + assert.Equal(t, http.StatusOK, resp.Code) + nk, ok := resp.JSON.(routing.NotaryKeysResponse) + assert.True(t, ok) + wantServers := map[string]struct{}{ + "servera": {}, + "serverb": {}, + } + for _, js := range nk.ServerKeys { + serverName := gjson.GetBytes(js, "server_name").Str + _, ok = wantServers[serverName] + assert.True(t, ok, "unexpected servername: %s", serverName) + delete(wantServers, serverName) + assert.True(t, gjson.GetBytes(js, "verify_keys.ed25519:someID").Exists()) + } + if len(wantServers) > 0 { + t.Fatalf("expected response to also contain: %#v", wantServers) + } + }, + }, + } + + test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) { + cfg, processCtx, close := testrig.CreateConfig(t, dbType) + defer close() + cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions) + caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics) + natsInstance := jetstream.NATSInstance{} + fc := &fedClient{ + keys: map[spec.ServerName]struct { + key ed25519.PrivateKey + keyID gomatrixserverlib.KeyID + }{ + "servera": { + key: test.PrivateKeyA, + keyID: "ed25519:someID", + }, + "serverb": { + key: test.PrivateKeyB, + keyID: "ed25519:someID", + }, + }, + } + + fedAPI := federationapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, fc, nil, caches, nil, true) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + req := httptest.NewRequest(http.MethodPost, "/", strings.NewReader(tc.httpBody)) + req.Host = string(cfg.Global.ServerName) + + resp := routing.NotaryKeys(req, &cfg.FederationAPI, fedAPI, tc.pubKeyRequest) + // assert that we received the expected response + tc.validateFunc(t, resp) + }) + } + + }) +} diff --git a/federationapi/internal/federationclient_test.go b/federationapi/internal/federationclient_test.go index fe8d84ffb..47efb11da 100644 --- a/federationapi/internal/federationclient_test.go +++ b/federationapi/internal/federationclient_test.go @@ -61,7 +61,7 @@ func TestFederationClientQueryKeys(t *testing.T) { }, } fedClient := &testFedClient{} - stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) + stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false) queues := queue.NewOutgoingQueues( testDB, process.NewProcessContext(), false, @@ -92,7 +92,7 @@ func TestFederationClientQueryKeysBlacklisted(t *testing.T) { }, } fedClient := &testFedClient{} - stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) + stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false) queues := queue.NewOutgoingQueues( testDB, process.NewProcessContext(), false, @@ -122,7 +122,7 @@ func TestFederationClientQueryKeysFailure(t *testing.T) { }, } fedClient := &testFedClient{shouldFail: true} - stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) + stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false) queues := queue.NewOutgoingQueues( testDB, process.NewProcessContext(), false, @@ -152,7 +152,7 @@ func TestFederationClientClaimKeys(t *testing.T) { }, } fedClient := &testFedClient{} - stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) + stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false) queues := queue.NewOutgoingQueues( testDB, process.NewProcessContext(), false, @@ -183,7 +183,7 @@ func TestFederationClientClaimKeysBlacklisted(t *testing.T) { }, } fedClient := &testFedClient{} - stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) + stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false) queues := queue.NewOutgoingQueues( testDB, process.NewProcessContext(), false, diff --git a/federationapi/internal/perform_test.go b/federationapi/internal/perform_test.go index 2795a018a..82f9b9db1 100644 --- a/federationapi/internal/perform_test.go +++ b/federationapi/internal/perform_test.go @@ -66,7 +66,7 @@ func TestPerformWakeupServers(t *testing.T) { }, } fedClient := &testFedClient{} - stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) + stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, true) queues := queue.NewOutgoingQueues( testDB, process.NewProcessContext(), false, @@ -112,7 +112,7 @@ func TestQueryRelayServers(t *testing.T) { }, } fedClient := &testFedClient{} - stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) + stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false) queues := queue.NewOutgoingQueues( testDB, process.NewProcessContext(), false, @@ -153,7 +153,7 @@ func TestRemoveRelayServers(t *testing.T) { }, } fedClient := &testFedClient{} - stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) + stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false) queues := queue.NewOutgoingQueues( testDB, process.NewProcessContext(), false, @@ -193,7 +193,7 @@ func TestPerformDirectoryLookup(t *testing.T) { }, } fedClient := &testFedClient{} - stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) + stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false) queues := queue.NewOutgoingQueues( testDB, process.NewProcessContext(), false, @@ -232,7 +232,7 @@ func TestPerformDirectoryLookupRelaying(t *testing.T) { }, } fedClient := &testFedClient{} - stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) + stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, true) queues := queue.NewOutgoingQueues( testDB, process.NewProcessContext(), false, diff --git a/federationapi/internal/query.go b/federationapi/internal/query.go index e53f19ff8..21e77c48d 100644 --- a/federationapi/internal/query.go +++ b/federationapi/internal/query.go @@ -43,6 +43,15 @@ func (a *FederationInternalAPI) fetchServerKeysFromCache( ctx context.Context, req *api.QueryServerKeysRequest, ) ([]gomatrixserverlib.ServerKeys, error) { var results []gomatrixserverlib.ServerKeys + + // We got a request for _all_ server keys, return them. + if len(req.KeyIDToCriteria) == 0 { + serverKeysResponses, _ := a.db.GetNotaryKeys(ctx, req.ServerName, []gomatrixserverlib.KeyID{}) + if len(serverKeysResponses) == 0 { + return nil, fmt.Errorf("failed to find server key response for server %s", req.ServerName) + } + return serverKeysResponses, nil + } for keyID, criteria := range req.KeyIDToCriteria { serverKeysResponses, _ := a.db.GetNotaryKeys(ctx, req.ServerName, []gomatrixserverlib.KeyID{keyID}) if len(serverKeysResponses) == 0 { diff --git a/federationapi/queue/queue_test.go b/federationapi/queue/queue_test.go index 73d3b0598..6da863427 100644 --- a/federationapi/queue/queue_test.go +++ b/federationapi/queue/queue_test.go @@ -117,7 +117,7 @@ func testSetup(failuresUntilBlacklist uint32, failuresUntilAssumedOffline uint32 txRelayCount: *atomic.NewUint32(0), } - stats := statistics.NewStatistics(db, failuresUntilBlacklist, failuresUntilAssumedOffline) + stats := statistics.NewStatistics(db, failuresUntilBlacklist, failuresUntilAssumedOffline, false) signingInfo := []*fclient.SigningIdentity{ { KeyID: "ed21019:auto", diff --git a/federationapi/routing/backfill.go b/federationapi/routing/backfill.go index 75a007265..bc4138839 100644 --- a/federationapi/routing/backfill.go +++ b/federationapi/routing/backfill.go @@ -95,6 +95,12 @@ func Backfill( } } + // Enforce a limit of 100 events, as not to hit the DB to hard. + // Synapse has a hard limit of 100 events as well. + if req.Limit > 100 { + req.Limit = 100 + } + // Query the Roomserver. if err = rsAPI.PerformBackfill(httpReq.Context(), &req, &res); err != nil { util.GetLogger(httpReq.Context()).WithError(err).Error("query.PerformBackfill failed") diff --git a/federationapi/routing/keys.go b/federationapi/routing/keys.go index 3d8ff2dea..38a88e4b1 100644 --- a/federationapi/routing/keys.go +++ b/federationapi/routing/keys.go @@ -197,6 +197,10 @@ func localKeys(cfg *config.FederationAPI, serverName spec.ServerName) (*gomatrix return &keys, err } +type NotaryKeysResponse struct { + ServerKeys []json.RawMessage `json:"server_keys"` +} + func NotaryKeys( httpReq *http.Request, cfg *config.FederationAPI, fsAPI federationAPI.FederationInternalAPI, @@ -217,10 +221,9 @@ func NotaryKeys( } } - var response struct { - ServerKeys []json.RawMessage `json:"server_keys"` + response := NotaryKeysResponse{ + ServerKeys: []json.RawMessage{}, } - response.ServerKeys = []json.RawMessage{} for serverName, kidToCriteria := range req.ServerKeys { var keyList []gomatrixserverlib.ServerKeys diff --git a/federationapi/routing/routing.go b/federationapi/routing/routing.go index dc7a363e7..6328d165e 100644 --- a/federationapi/routing/routing.go +++ b/federationapi/routing/routing.go @@ -647,6 +647,8 @@ func MakeFedAPI( // add the user to Sentry, if enabled hub := sentry.GetHubFromContext(req.Context()) if hub != nil { + // clone the hub, so we don't send garbage events with e.g. mismatching rooms/event_ids + hub = hub.Clone() hub.Scope().SetTag("origin", string(fedReq.Origin())) hub.Scope().SetTag("uri", fedReq.RequestURI()) } diff --git a/federationapi/statistics/statistics.go b/federationapi/statistics/statistics.go index e5fc4b940..e133fc9c9 100644 --- a/federationapi/statistics/statistics.go +++ b/federationapi/statistics/statistics.go @@ -34,12 +34,15 @@ type Statistics struct { // mark the destination as offline. At this point we should attempt // to send messages to the user's async relay servers if we know them. FailuresUntilAssumedOffline uint32 + + enableRelays bool } func NewStatistics( db storage.Database, failuresUntilBlacklist uint32, failuresUntilAssumedOffline uint32, + enableRelays bool, ) Statistics { return Statistics{ DB: db, @@ -47,6 +50,7 @@ func NewStatistics( FailuresUntilAssumedOffline: failuresUntilAssumedOffline, backoffTimers: make(map[spec.ServerName]*time.Timer), servers: make(map[spec.ServerName]*ServerStatistics), + enableRelays: enableRelays, } } @@ -73,6 +77,13 @@ func (s *Statistics) ForServer(serverName spec.ServerName) *ServerStatistics { } else { server.blacklisted.Store(blacklisted) } + + // Don't bother hitting the database 2 additional times + // if we don't want to use relays. + if !s.enableRelays { + return server + } + assumedOffline, err := s.DB.IsServerAssumedOffline(context.Background(), serverName) if err != nil { logrus.WithError(err).Errorf("Failed to get assumed offline entry %q", serverName) diff --git a/federationapi/statistics/statistics_test.go b/federationapi/statistics/statistics_test.go index a930bc3b0..4376a9050 100644 --- a/federationapi/statistics/statistics_test.go +++ b/federationapi/statistics/statistics_test.go @@ -16,7 +16,7 @@ const ( ) func TestBackoff(t *testing.T) { - stats := NewStatistics(nil, FailuresUntilBlacklist, FailuresUntilAssumedOffline) + stats := NewStatistics(nil, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false) server := ServerStatistics{ statistics: &stats, serverName: "test.com", @@ -106,7 +106,7 @@ func TestBackoff(t *testing.T) { } func TestRelayServersListing(t *testing.T) { - stats := NewStatistics(test.NewInMemoryFederationDatabase(), FailuresUntilBlacklist, FailuresUntilAssumedOffline) + stats := NewStatistics(test.NewInMemoryFederationDatabase(), FailuresUntilBlacklist, FailuresUntilAssumedOffline, false) server := ServerStatistics{statistics: &stats} server.AddRelayServers([]spec.ServerName{"relay1", "relay1", "relay2"}) relayServers := server.KnownRelayServers() diff --git a/go.mod b/go.mod index d9efae7fb..234381a4f 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91 github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530 - github.com/matrix-org/gomatrixserverlib v0.0.0-20231212115925-41497b7563eb + github.com/matrix-org/gomatrixserverlib v0.0.0-20240109180417-3495e573f2b7 github.com/matrix-org/pinecone v0.11.1-0.20230810010612-ea4c33717fd7 github.com/matrix-org/util v0.0.0-20221111132719-399730281e66 github.com/mattn/go-sqlite3 v1.14.17 @@ -115,7 +115,7 @@ require ( github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/quic-go/qtls-go1-20 v0.3.2 // indirect - github.com/quic-go/quic-go v0.37.4 // indirect + github.com/quic-go/quic-go v0.37.7 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/rs/zerolog v1.29.1 // indirect diff --git a/go.sum b/go.sum index 0e3367fcb..3129f40e7 100644 --- a/go.sum +++ b/go.sum @@ -208,8 +208,8 @@ github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91 h1:s7fexw github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91/go.mod h1:e+cg2q7C7yE5QnAXgzo512tgFh1RbQLC0+jozuegKgo= github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530 h1:kHKxCOLcHH8r4Fzarl4+Y3K5hjothkVW5z7T1dUM11U= github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s= -github.com/matrix-org/gomatrixserverlib v0.0.0-20231212115925-41497b7563eb h1:Nn+Fr96oi7bIfdOwX5A2L6A2MZCM+lqwLe4/+3+nYj8= -github.com/matrix-org/gomatrixserverlib v0.0.0-20231212115925-41497b7563eb/go.mod h1:M8m7seOroO5ePlgxA7AFZymnG90Cnh94rYQyngSrZkk= +github.com/matrix-org/gomatrixserverlib v0.0.0-20240109180417-3495e573f2b7 h1:EaUvK2ay6cxMxeshC1p6QswS9+rQFbUc2YerkRFyVXQ= +github.com/matrix-org/gomatrixserverlib v0.0.0-20240109180417-3495e573f2b7/go.mod h1:HZGsVJ3bUE+DkZtufkH9H0mlsvbhEGK5CpX0Zlavylg= github.com/matrix-org/pinecone v0.11.1-0.20230810010612-ea4c33717fd7 h1:6t8kJr8i1/1I5nNttw6nn1ryQJgzVlBmSGgPiiaTdw4= github.com/matrix-org/pinecone v0.11.1-0.20230810010612-ea4c33717fd7/go.mod h1:ReWMS/LoVnOiRAdq9sNUC2NZnd1mZkMNB52QhpTRWjg= github.com/matrix-org/util v0.0.0-20221111132719-399730281e66 h1:6z4KxomXSIGWqhHcfzExgkH3Z3UkIXry4ibJS4Aqz2Y= @@ -286,8 +286,8 @@ github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+Pymzi github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI= github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= -github.com/quic-go/quic-go v0.37.4 h1:ke8B73yMCWGq9MfrCCAw0Uzdm7GaViC3i39dsIdDlH4= -github.com/quic-go/quic-go v0.37.4/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU= +github.com/quic-go/quic-go v0.37.7 h1:AgKsQLZ1+YCwZd2GYhBUsJDYZwEkA5gENtAjb+MxONU= +github.com/quic-go/quic-go v0.37.7/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= diff --git a/helm/dendrite/Chart.yaml b/helm/dendrite/Chart.yaml index 769e0ca53..2032c233e 100644 --- a/helm/dendrite/Chart.yaml +++ b/helm/dendrite/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: dendrite version: "0.14.0" -appVersion: "0.13.5" +appVersion: "0.13.6" description: Dendrite Matrix Homeserver type: application icon: https://avatars.githubusercontent.com/u/8418310?s=48&v=4 diff --git a/helm/dendrite/README.md b/helm/dendrite/README.md index f5f824927..c830bb283 100644 --- a/helm/dendrite/README.md +++ b/helm/dendrite/README.md @@ -1,7 +1,7 @@ # dendrite -![Version: 0.13.6](https://img.shields.io/badge/Version-0.13.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.13.5](https://img.shields.io/badge/AppVersion-0.13.5-informational?style=flat-square) +![Version: 0.13.8](https://img.shields.io/badge/Version-0.13.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.13.6](https://img.shields.io/badge/AppVersion-0.13.6-informational?style=flat-square) Dendrite Matrix Homeserver Status: **NOT PRODUCTION READY** @@ -48,13 +48,16 @@ Create a folder `appservices` and place your configurations in there. The confi | signing_key.create | bool | `true` | Create a new signing key, if not exists | | signing_key.existingSecret | string | `""` | Use an existing secret | | resources | object | sets some sane default values | Default resource requests/limits. | -| persistence.jetstream | object | `{"capacity":"1Gi","existingClaim":""}` | The storage class to use for volume claims. Used unless specified at the specific component. Defaults to the cluster default storage class. # If defined, storageClassName: # If set to "-", storageClassName: "", which disables dynamic provisioning # If undefined (the default) or set to null, no storageClassName spec is # set, choosing the default provisioner. (gp2 on AWS, standard on # GKE, AWS & OpenStack) # storageClass: "" | +| persistence.storageClass | string | `nil` | The storage class to use for volume claims. Used unless specified at the specific component. Defaults to the cluster default storage class. If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. (gp2 on AWS, standard on GKE, AWS & OpenStack) | | persistence.jetstream.existingClaim | string | `""` | Use an existing volume claim for jetstream | | persistence.jetstream.capacity | string | `"1Gi"` | PVC Storage Request for the jetstream volume | +| persistence.jetstream.storageClass | string | `nil` | The storage class to use for volume claims. Defaults to persistence.storageClass If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. (gp2 on AWS, standard on GKE, AWS & OpenStack) | | persistence.media.existingClaim | string | `""` | Use an existing volume claim for media files | | persistence.media.capacity | string | `"1Gi"` | PVC Storage Request for the media volume | +| persistence.media.storageClass | string | `nil` | The storage class to use for volume claims. Defaults to persistence.storageClass If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. (gp2 on AWS, standard on GKE, AWS & OpenStack) | | persistence.search.existingClaim | string | `""` | Use an existing volume claim for the fulltext search index | | persistence.search.capacity | string | `"1Gi"` | PVC Storage Request for the search volume | +| persistence.search.storageClass | string | `nil` | The storage class to use for volume claims. Defaults to persistence.storageClass If defined, storageClassName: If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. (gp2 on AWS, standard on GKE, AWS & OpenStack) | | extraVolumes | list | `[]` | Add additional volumes to the Dendrite Pod | | extraVolumeMounts | list | `[]` | Configure additional mount points volumes in the Dendrite Pod | | strategy.type | string | `"RollingUpdate"` | Strategy to use for rolling updates (e.g. Recreate, RollingUpdate) If you are using ReadWriteOnce volumes, you should probably use Recreate | diff --git a/helm/dendrite/templates/ingress.yaml b/helm/dendrite/templates/ingress.yaml index 4bcaee12d..eee762511 100644 --- a/helm/dendrite/templates/ingress.yaml +++ b/helm/dendrite/templates/ingress.yaml @@ -4,6 +4,7 @@ {{- $wellKnownServerHost := default $serverNameHost (regexFind "^(\\[.+\\])?[^:]*" .Values.dendrite_config.global.well_known_server_name) -}} {{- $wellKnownClientHost := default $serverNameHost (regexFind "//(\\[.+\\])?[^:/]*" .Values.dendrite_config.global.well_known_client_name | trimAll "/") -}} {{- $allHosts := list $serverNameHost $wellKnownServerHost $wellKnownClientHost | uniq -}} + {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} apiVersion: networking.k8s.io/v1 {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} @@ -56,7 +57,7 @@ spec: service: name: {{ $fullName }} port: - name: http + number: {{ $.Values.service.port }} {{- else }} serviceName: {{ $fullName }} servicePort: http @@ -72,7 +73,7 @@ spec: service: name: {{ $fullName }} port: - name: http + number: {{ $.Values.service.port }} {{- else }} serviceName: {{ $fullName }} servicePort: http @@ -88,7 +89,7 @@ spec: service: name: {{ $fullName }} port: - name: http + number: {{ $.Values.service.port }} {{- else }} serviceName: {{ $fullName }} servicePort: http @@ -105,7 +106,7 @@ spec: service: name: {{ $fullName }} port: - name: http + number: {{ $.Values.service.port }} {{- else }} serviceName: {{ $fullName }} servicePort: http diff --git a/helm/dendrite/templates/service.yaml b/helm/dendrite/templates/service.yaml index 3b571df1f..1b709c79c 100644 --- a/helm/dendrite/templates/service.yaml +++ b/helm/dendrite/templates/service.yaml @@ -14,4 +14,4 @@ spec: - name: http protocol: TCP port: {{ .Values.service.port }} - targetPort: 8008 \ No newline at end of file + targetPort: http \ No newline at end of file diff --git a/helm/dendrite/values.yaml b/helm/dendrite/values.yaml index afce1d930..bd8c50d53 100644 --- a/helm/dendrite/values.yaml +++ b/helm/dendrite/values.yaml @@ -26,13 +26,13 @@ persistence: # -- The storage class to use for volume claims. # Used unless specified at the specific component. # Defaults to the cluster default storage class. - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "" + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. (gp2 on AWS, standard on + # GKE, AWS & OpenStack) + # + storageClass: jetstream: # -- Use an existing volume claim for jetstream existingClaim: "" @@ -40,13 +40,12 @@ persistence: capacity: "1Gi" # -- The storage class to use for volume claims. # Defaults to persistence.storageClass - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "" + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. (gp2 on AWS, standard on + # GKE, AWS & OpenStack) + storageClass: media: # -- Use an existing volume claim for media files existingClaim: "" @@ -54,13 +53,12 @@ persistence: capacity: "1Gi" # -- The storage class to use for volume claims. # Defaults to persistence.storageClass - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "" + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. (gp2 on AWS, standard on + # GKE, AWS & OpenStack) + storageClass: search: # -- Use an existing volume claim for the fulltext search index existingClaim: "" @@ -68,13 +66,12 @@ persistence: capacity: "1Gi" # -- The storage class to use for volume claims. # Defaults to persistence.storageClass - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "" + # If defined, storageClassName: + # If set to "-", storageClassName: "", which disables dynamic provisioning + # If undefined (the default) or set to null, no storageClassName spec is + # set, choosing the default provisioner. (gp2 on AWS, standard on + # GKE, AWS & OpenStack) + storageClass: # -- Add additional volumes to the Dendrite Pod extraVolumes: [] @@ -92,7 +89,7 @@ extraVolumeMounts: [] strategy: # -- Strategy to use for rolling updates (e.g. Recreate, RollingUpdate) # If you are using ReadWriteOnce volumes, you should probably use Recreate - type: RollingUpdate + type: Recreate rollingUpdate: # -- Maximum number of pods that can be unavailable during the update process maxUnavailable: 25% diff --git a/internal/httputil/httpapi.go b/internal/httputil/httpapi.go index 1966e7546..c78aadf89 100644 --- a/internal/httputil/httpapi.go +++ b/internal/httputil/httpapi.go @@ -76,6 +76,8 @@ func MakeAuthAPI( // add the user to Sentry, if enabled hub := sentry.GetHubFromContext(req.Context()) if hub != nil { + // clone the hub, so we don't send garbage events with e.g. mismatching rooms/event_ids + hub = hub.Clone() hub.Scope().SetUser(sentry.User{ Username: device.UserID, }) diff --git a/internal/validate.go b/internal/validate.go index da8b35cd3..c831565f5 100644 --- a/internal/validate.go +++ b/internal/validate.go @@ -38,9 +38,9 @@ var ( ErrPasswordTooLong = fmt.Errorf("password too long: max %d characters", maxPasswordLength) ErrPasswordWeak = fmt.Errorf("password too weak: min %d characters", minPasswordLength) ErrUsernameTooLong = fmt.Errorf("username exceeds the maximum length of %d characters", maxUsernameLength) - ErrUsernameInvalid = errors.New("username can only contain characters a-z, 0-9, or '_-./='") + ErrUsernameInvalid = errors.New("username can only contain characters a-z, 0-9, or '_+-./='") ErrUsernameUnderscore = errors.New("username cannot start with a '_'") - validUsernameRegex = regexp.MustCompile(`^[0-9a-z_\-=./]+$`) + validUsernameRegex = regexp.MustCompile(`^[0-9a-z_\-+=./]+$`) ) // ValidatePassword returns an error if the password is invalid diff --git a/internal/validate_test.go b/internal/validate_test.go index cd2626133..1019102df 100644 --- a/internal/validate_test.go +++ b/internal/validate_test.go @@ -129,6 +129,11 @@ func Test_validateUsername(t *testing.T) { localpart: "i_am_allowed=1", domain: "localhost", }, + { + name: "special characters are allowed 3", + localpart: "+55555555555", + domain: "localhost", + }, { name: "not all special characters are allowed", localpart: "notallowed#", // contains # @@ -139,6 +144,16 @@ func Test_validateUsername(t *testing.T) { JSON: spec.InvalidUsername(ErrUsernameInvalid.Error()), }, }, + { + name: "not all special characters are allowed 2", + localpart: " ACL - aclsMutex sync.RWMutex // protects the above + acls map[string]*serverACL // room ID -> ACL + aclsMutex sync.RWMutex // protects the above + aclRegexCache map[string]**regexp.Regexp // Cache from "serverName" -> pointer to a regex + aclRegexCacheMutex sync.RWMutex // protects the above } func NewServerACLs(db ServerACLDatabase) *ServerACLs { ctx := context.TODO() acls := &ServerACLs{ acls: make(map[string]*serverACL), + // Be generous when creating the cache, as in reality + // there are hundreds of servers in an ACL. + aclRegexCache: make(map[string]**regexp.Regexp, 100), } + // Look up all of the rooms that the current state server knows about. rooms, err := db.GetKnownRooms(ctx) if err != nil { @@ -58,16 +64,16 @@ func NewServerACLs(db ServerACLDatabase) *ServerACLs { // For each room, let's see if we have a server ACL state event. If we // do then we'll process it into memory so that we have the regexes to // hand. - for _, room := range rooms { - state, err := db.GetStateEvent(ctx, room, MRoomServerACL, "") - if err != nil { - logrus.WithError(err).Errorf("Failed to get server ACLs for room %q", room) - continue - } - if state != nil { - acls.OnServerACLUpdate(state.PDU) - } + + events, err := db.GetBulkStateContent(ctx, rooms, []gomatrixserverlib.StateKeyTuple{{EventType: MRoomServerACL, StateKey: ""}}, false) + if err != nil { + logrus.WithError(err).Errorf("Failed to get server ACLs for all rooms: %q", err) } + + for _, event := range events { + acls.OnServerACLUpdate(event) + } + return acls } @@ -79,8 +85,8 @@ type ServerACL struct { type serverACL struct { ServerACL - allowedRegexes []*regexp.Regexp - deniedRegexes []*regexp.Regexp + allowedRegexes []**regexp.Regexp + deniedRegexes []**regexp.Regexp } func compileACLRegex(orig string) (*regexp.Regexp, error) { @@ -90,9 +96,28 @@ func compileACLRegex(orig string) (*regexp.Regexp, error) { return regexp.Compile(escaped) } -func (s *ServerACLs) OnServerACLUpdate(state gomatrixserverlib.PDU) { +// cachedCompileACLRegex is a wrapper around compileACLRegex with added caching +func (s *ServerACLs) cachedCompileACLRegex(orig string) (**regexp.Regexp, error) { + s.aclRegexCacheMutex.RLock() + re, ok := s.aclRegexCache[orig] + if ok { + s.aclRegexCacheMutex.RUnlock() + return re, nil + } + s.aclRegexCacheMutex.RUnlock() + compiled, err := compileACLRegex(orig) + if err != nil { + return nil, err + } + s.aclRegexCacheMutex.Lock() + defer s.aclRegexCacheMutex.Unlock() + s.aclRegexCache[orig] = &compiled + return &compiled, nil +} + +func (s *ServerACLs) OnServerACLUpdate(strippedEvent tables.StrippedEvent) { acls := &serverACL{} - if err := json.Unmarshal(state.Content(), &acls.ServerACL); err != nil { + if err := json.Unmarshal([]byte(strippedEvent.ContentValue), &acls.ServerACL); err != nil { logrus.WithError(err).Errorf("Failed to unmarshal state content for server ACLs") return } @@ -101,14 +126,14 @@ func (s *ServerACLs) OnServerACLUpdate(state gomatrixserverlib.PDU) { // special characters and then replace * and ? with their regex counterparts. // https://matrix.org/docs/spec/client_server/r0.6.1#m-room-server-acl for _, orig := range acls.Allowed { - if expr, err := compileACLRegex(orig); err != nil { + if expr, err := s.cachedCompileACLRegex(orig); err != nil { logrus.WithError(err).Errorf("Failed to compile allowed regex") } else { acls.allowedRegexes = append(acls.allowedRegexes, expr) } } for _, orig := range acls.Denied { - if expr, err := compileACLRegex(orig); err != nil { + if expr, err := s.cachedCompileACLRegex(orig); err != nil { logrus.WithError(err).Errorf("Failed to compile denied regex") } else { acls.deniedRegexes = append(acls.deniedRegexes, expr) @@ -118,10 +143,15 @@ func (s *ServerACLs) OnServerACLUpdate(state gomatrixserverlib.PDU) { "allow_ip_literals": acls.AllowIPLiterals, "num_allowed": len(acls.allowedRegexes), "num_denied": len(acls.deniedRegexes), - }).Debugf("Updating server ACLs for %q", state.RoomID()) + }).Debugf("Updating server ACLs for %q", strippedEvent.RoomID) + + // Clear out Denied and Allowed, now that we have the compiled regexes. + // They are not needed anymore from this point on. + acls.Denied = nil + acls.Allowed = nil s.aclsMutex.Lock() defer s.aclsMutex.Unlock() - s.acls[state.RoomID().String()] = acls + s.acls[strippedEvent.RoomID] = acls } func (s *ServerACLs) IsServerBannedFromRoom(serverName spec.ServerName, roomID string) bool { @@ -151,14 +181,14 @@ func (s *ServerACLs) IsServerBannedFromRoom(serverName spec.ServerName, roomID s // Check if the hostname matches one of the denied regexes. If it does then // the server is banned from the room. for _, expr := range acls.deniedRegexes { - if expr.MatchString(string(serverName)) { + if (*expr).MatchString(string(serverName)) { return true } } // Check if the hostname matches one of the allowed regexes. If it does then // the server is NOT banned from the room. for _, expr := range acls.allowedRegexes { - if expr.MatchString(string(serverName)) { + if (*expr).MatchString(string(serverName)) { return false } } diff --git a/roomserver/acls/acls_test.go b/roomserver/acls/acls_test.go index 9fb6a5581..efe1d2093 100644 --- a/roomserver/acls/acls_test.go +++ b/roomserver/acls/acls_test.go @@ -15,8 +15,14 @@ package acls import ( + "context" "regexp" "testing" + + "github.com/matrix-org/dendrite/roomserver/storage/tables" + "github.com/matrix-org/gomatrixserverlib" + "github.com/matrix-org/gomatrixserverlib/spec" + "github.com/stretchr/testify/assert" ) func TestOpenACLsWithBlacklist(t *testing.T) { @@ -38,8 +44,8 @@ func TestOpenACLsWithBlacklist(t *testing.T) { ServerACL: ServerACL{ AllowIPLiterals: true, }, - allowedRegexes: []*regexp.Regexp{allowRegex}, - deniedRegexes: []*regexp.Regexp{denyRegex}, + allowedRegexes: []**regexp.Regexp{&allowRegex}, + deniedRegexes: []**regexp.Regexp{&denyRegex}, } if acls.IsServerBannedFromRoom("1.2.3.4", roomID) { @@ -77,8 +83,8 @@ func TestDefaultACLsWithWhitelist(t *testing.T) { ServerACL: ServerACL{ AllowIPLiterals: false, }, - allowedRegexes: []*regexp.Regexp{allowRegex}, - deniedRegexes: []*regexp.Regexp{}, + allowedRegexes: []**regexp.Regexp{&allowRegex}, + deniedRegexes: []**regexp.Regexp{}, } if !acls.IsServerBannedFromRoom("1.2.3.4", roomID) { @@ -103,3 +109,45 @@ func TestDefaultACLsWithWhitelist(t *testing.T) { t.Fatal("Expected qux.com:4567 to be allowed but wasn't") } } + +var ( + content1 = `{"allow":["*"],"allow_ip_literals":false,"deny":["hello.world", "*.hello.world"]}` +) + +type dummyACLDB struct{} + +func (d dummyACLDB) GetKnownRooms(ctx context.Context) ([]string, error) { + return []string{"1", "2"}, nil +} + +func (d dummyACLDB) GetBulkStateContent(ctx context.Context, roomIDs []string, tuples []gomatrixserverlib.StateKeyTuple, allowWildcards bool) ([]tables.StrippedEvent, error) { + return []tables.StrippedEvent{ + { + RoomID: "1", + ContentValue: content1, + }, + { + RoomID: "2", + ContentValue: content1, + }, + }, nil +} + +func TestCachedRegex(t *testing.T) { + db := dummyACLDB{} + wantBannedServer := spec.ServerName("hello.world") + + acls := NewServerACLs(db) + + // Check that hello.world is banned in room 1 + banned := acls.IsServerBannedFromRoom(wantBannedServer, "1") + assert.True(t, banned) + + // Check that hello.world is banned in room 2 + banned = acls.IsServerBannedFromRoom(wantBannedServer, "2") + assert.True(t, banned) + + // Check that matrix.hello.world is banned in room 2 + banned = acls.IsServerBannedFromRoom("matrix."+wantBannedServer, "2") + assert.True(t, banned) +} diff --git a/roomserver/api/perform.go b/roomserver/api/perform.go index 2818efaa3..d6caec08c 100644 --- a/roomserver/api/perform.go +++ b/roomserver/api/perform.go @@ -8,7 +8,6 @@ import ( "github.com/matrix-org/dendrite/roomserver/types" "github.com/matrix-org/gomatrixserverlib" "github.com/matrix-org/gomatrixserverlib/spec" - "github.com/matrix-org/util" ) type PerformCreateRoomRequest struct { @@ -51,16 +50,14 @@ type PerformLeaveResponse struct { } type InviteInput struct { - RoomID spec.RoomID - Inviter spec.UserID - Invitee spec.UserID - DisplayName string - AvatarURL string - Reason string - IsDirect bool - KeyID gomatrixserverlib.KeyID - PrivateKey ed25519.PrivateKey - EventTime time.Time + RoomID spec.RoomID + Inviter spec.UserID + Invitee spec.UserID + Reason string + IsDirect bool + KeyID gomatrixserverlib.KeyID + PrivateKey ed25519.PrivateKey + EventTime time.Time } type PerformInviteRequest struct { @@ -91,14 +88,44 @@ type PerformBackfillRequest struct { VirtualHost spec.ServerName `json:"virtual_host"` } -// PrevEventIDs returns the prev_event IDs of all backwards extremities, de-duplicated in a lexicographically sorted order. +// limitPrevEventIDs is the maximum of eventIDs we +// return when calling PrevEventIDs. +const limitPrevEventIDs = 100 + +// PrevEventIDs returns the prev_event IDs of either 100 backwards extremities or +// len(r.BackwardsExtremities). Limited to 100, due to Synapse/Dendrite stopping after reaching +// this limit. (which sounds sane) func (r *PerformBackfillRequest) PrevEventIDs() []string { - var prevEventIDs []string - for _, pes := range r.BackwardsExtremities { - prevEventIDs = append(prevEventIDs, pes...) + var uniqueIDs map[string]struct{} + + // Create a unique eventID map of either 100 or len(r.BackwardsExtremities). + // 100 since Synapse/Dendrite stops after reaching 100 events. + if len(r.BackwardsExtremities) > limitPrevEventIDs { + uniqueIDs = make(map[string]struct{}, limitPrevEventIDs) + } else { + uniqueIDs = make(map[string]struct{}, len(r.BackwardsExtremities)) } - prevEventIDs = util.UniqueStrings(prevEventIDs) - return prevEventIDs + +outerLoop: + for _, pes := range r.BackwardsExtremities { + for _, evID := range pes { + uniqueIDs[evID] = struct{}{} + // We found enough unique eventIDs. + if len(uniqueIDs) >= limitPrevEventIDs { + break outerLoop + } + } + } + + // map -> []string + result := make([]string, len(uniqueIDs)) + i := 0 + for evID := range uniqueIDs { + result[i] = evID + i++ + } + + return result } // PerformBackfillResponse is a response to PerformBackfill. diff --git a/roomserver/api/perform_test.go b/roomserver/api/perform_test.go new file mode 100644 index 000000000..f26438d32 --- /dev/null +++ b/roomserver/api/perform_test.go @@ -0,0 +1,81 @@ +package api + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" +) + +func BenchmarkPrevEventIDs(b *testing.B) { + for _, x := range []int64{1, 10, 100, 500, 1000, 2000} { + benchPrevEventIDs(b, int(x)) + } +} + +func benchPrevEventIDs(b *testing.B, count int) { + bwExtrems := generateBackwardsExtremities(b, count) + backfiller := PerformBackfillRequest{ + BackwardsExtremities: bwExtrems, + } + + b.Run(fmt.Sprintf("Original%d", count), func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + prevIDs := backfiller.PrevEventIDs() + _ = prevIDs + } + }) +} + +type testLike interface { + Helper() +} + +const randomIDCharsCount = 10 + +func generateBackwardsExtremities(t testLike, count int) map[string][]string { + t.Helper() + result := make(map[string][]string, count) + for i := 0; i < count; i++ { + eventID := randomEventId(int64(i)) + result[eventID] = []string{ + randomEventId(int64(i + 1)), + randomEventId(int64(i + 2)), + randomEventId(int64(i + 3)), + } + } + return result +} + +const alphanumerics = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + +// randomEventId generates a pseudo-random string of length n. +func randomEventId(src int64) string { + randSrc := rand.NewSource(src) + b := make([]byte, randomIDCharsCount) + for i := range b { + b[i] = alphanumerics[randSrc.Int63()%int64(len(alphanumerics))] + } + return string(b) +} + +func TestPrevEventIDs(t *testing.T) { + // generate 10 backwards extremities + bwExtrems := generateBackwardsExtremities(t, 10) + backfiller := PerformBackfillRequest{ + BackwardsExtremities: bwExtrems, + } + + prevIDs := backfiller.PrevEventIDs() + // Given how "generateBackwardsExtremities" works, this + // generates 12 unique event IDs + assert.Equal(t, 12, len(prevIDs)) + + // generate 200 backwards extremities + backfiller.BackwardsExtremities = generateBackwardsExtremities(t, 200) + prevIDs = backfiller.PrevEventIDs() + // PrevEventIDs returns at max 100 event IDs + assert.Equal(t, 100, len(prevIDs)) +} diff --git a/roomserver/internal/input/input.go b/roomserver/internal/input/input.go index 20d2cfc7a..104ce94e5 100644 --- a/roomserver/internal/input/input.go +++ b/roomserver/internal/input/input.go @@ -108,12 +108,14 @@ type worker struct { r *Inputer roomID string subscription *nats.Subscription + sentryHub *sentry.Hub } func (r *Inputer) startWorkerForRoom(roomID string) { v, loaded := r.workers.LoadOrStore(roomID, &worker{ - r: r, - roomID: roomID, + r: r, + roomID: roomID, + sentryHub: sentry.CurrentHub().Clone(), }) w := v.(*worker) w.Lock() @@ -265,9 +267,9 @@ func (w *worker) _next() { // Look up what the next event is that's waiting to be processed. ctx, cancel := context.WithTimeout(w.r.ProcessContext.Context(), time.Minute) defer cancel() - if scope := sentry.CurrentHub().Scope(); scope != nil { + w.sentryHub.ConfigureScope(func(scope *sentry.Scope) { scope.SetTag("room_id", w.roomID) - } + }) msgs, err := w.subscription.Fetch(1, nats.Context(ctx)) switch err { case nil: @@ -323,9 +325,9 @@ func (w *worker) _next() { return } - if scope := sentry.CurrentHub().Scope(); scope != nil { + w.sentryHub.ConfigureScope(func(scope *sentry.Scope) { scope.SetTag("event_id", inputRoomEvent.Event.EventID()) - } + }) // Process the room event. If something goes wrong then we'll tell // NATS to terminate the message. We'll store the error result as @@ -347,7 +349,7 @@ func (w *worker) _next() { }).Warn("Roomserver rejected event") default: if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { - sentry.CaptureException(err) + w.sentryHub.CaptureException(err) } logrus.WithError(err).WithFields(logrus.Fields{ "room_id": w.roomID, diff --git a/roomserver/internal/input/input_events.go b/roomserver/internal/input/input_events.go index 1d9208434..657ca8719 100644 --- a/roomserver/internal/input/input_events.go +++ b/roomserver/internal/input/input_events.go @@ -24,6 +24,7 @@ import ( "fmt" "time" + "github.com/matrix-org/dendrite/roomserver/storage/tables" "github.com/tidwall/gjson" "github.com/matrix-org/gomatrixserverlib" @@ -509,7 +510,13 @@ func (r *Inputer) processRoomEvent( logrus.WithError(err).Error("failed to get server ACLs") } if aclEvent != nil { - r.ACLs.OnServerACLUpdate(aclEvent) + strippedEvent := tables.StrippedEvent{ + RoomID: aclEvent.RoomID().String(), + EventType: aclEvent.Type(), + StateKey: *aclEvent.StateKey(), + ContentValue: string(aclEvent.Content()), + } + r.ACLs.OnServerACLUpdate(strippedEvent) } } } diff --git a/roomserver/internal/input/input_latest_events.go b/roomserver/internal/input/input_latest_events.go index ec03d6f13..e9856cc5d 100644 --- a/roomserver/internal/input/input_latest_events.go +++ b/roomserver/internal/input/input_latest_events.go @@ -298,6 +298,7 @@ func (u *latestEventsUpdater) latestState() error { }).Warnf("State reset detected (removing %d events)", removed) sentry.WithScope(func(scope *sentry.Scope) { scope.SetLevel("warning") + scope.SetTag("room_id", u.event.RoomID().String()) scope.SetContext("State reset", map[string]interface{}{ "Event ID": u.event.EventID(), "Old state NID": fmt.Sprintf("%d", u.oldStateNID), diff --git a/roomserver/internal/perform/perform_create_room.go b/roomserver/internal/perform/perform_create_room.go index eb8de7811..093082f90 100644 --- a/roomserver/internal/perform/perform_create_room.go +++ b/roomserver/internal/perform/perform_create_room.go @@ -503,16 +503,14 @@ func (c *Creator) PerformCreateRoom(ctx context.Context, userID spec.UserID, roo err = c.RSAPI.PerformInvite(ctx, &api.PerformInviteRequest{ InviteInput: api.InviteInput{ - RoomID: roomID, - Inviter: userID, - Invitee: *inviteeUserID, - DisplayName: createRequest.UserDisplayName, - AvatarURL: createRequest.UserAvatarURL, - Reason: "", - IsDirect: createRequest.IsDirect, - KeyID: createRequest.KeyID, - PrivateKey: createRequest.PrivateKey, - EventTime: createRequest.EventTime, + RoomID: roomID, + Inviter: userID, + Invitee: *inviteeUserID, + Reason: "", + IsDirect: createRequest.IsDirect, + KeyID: createRequest.KeyID, + PrivateKey: createRequest.PrivateKey, + EventTime: createRequest.EventTime, }, InviteRoomState: globalStrippedState, SendAsServer: string(userID.Domain()), diff --git a/roomserver/internal/perform/perform_invite.go b/roomserver/internal/perform/perform_invite.go index 3abb69cb9..86563e8c3 100644 --- a/roomserver/internal/perform/perform_invite.go +++ b/roomserver/internal/perform/perform_invite.go @@ -144,11 +144,9 @@ func (r *Inviter) PerformInvite( } content := gomatrixserverlib.MemberContent{ - Membership: spec.Invite, - DisplayName: req.InviteInput.DisplayName, - AvatarURL: req.InviteInput.AvatarURL, - Reason: req.InviteInput.Reason, - IsDirect: req.InviteInput.IsDirect, + Membership: spec.Invite, + Reason: req.InviteInput.Reason, + IsDirect: req.InviteInput.IsDirect, } if err = proto.SetContent(content); err != nil { diff --git a/roomserver/producers/roomevent.go b/roomserver/producers/roomevent.go index af7e10580..894e6d81b 100644 --- a/roomserver/producers/roomevent.go +++ b/roomserver/producers/roomevent.go @@ -17,6 +17,7 @@ package producers import ( "encoding/json" + "github.com/matrix-org/dendrite/roomserver/storage/tables" "github.com/nats-io/nats.go" log "github.com/sirupsen/logrus" "github.com/tidwall/gjson" @@ -75,7 +76,13 @@ func (r *RoomEventProducer) ProduceRoomEvents(roomID string, updates []api.Outpu if eventType == acls.MRoomServerACL && update.NewRoomEvent.Event.StateKeyEquals("") { ev := update.NewRoomEvent.Event.PDU - defer r.ACLs.OnServerACLUpdate(ev) + strippedEvent := tables.StrippedEvent{ + RoomID: ev.RoomID().String(), + EventType: ev.Type(), + StateKey: *ev.StateKey(), + ContentValue: string(ev.Content()), + } + defer r.ACLs.OnServerACLUpdate(strippedEvent) } } logger.Tracef("Producing to topic '%s'", r.Topic) diff --git a/roomserver/storage/shared/storage.go b/roomserver/storage/shared/storage.go index 3331c6029..682cead6c 100644 --- a/roomserver/storage/shared/storage.go +++ b/roomserver/storage/shared/storage.go @@ -889,10 +889,10 @@ func (d *Database) assignRoomNID( } // Check if we already have a numeric ID in the database. roomNID, err := d.RoomsTable.SelectRoomNID(ctx, txn, roomID) - if err == sql.ErrNoRows { + if errors.Is(err, sql.ErrNoRows) { // We don't have a numeric ID so insert one into the database. roomNID, err = d.RoomsTable.InsertRoomNID(ctx, txn, roomID, roomVersion) - if err == sql.ErrNoRows { + if errors.Is(err, sql.ErrNoRows) { // We raced with another insert so run the select again. roomNID, err = d.RoomsTable.SelectRoomNID(ctx, txn, roomID) } @@ -914,10 +914,10 @@ func (d *Database) assignEventTypeNID( } // Check if we already have a numeric ID in the database. eventTypeNID, err := d.EventTypesTable.SelectEventTypeNID(ctx, txn, eventType) - if err == sql.ErrNoRows { + if errors.Is(err, sql.ErrNoRows) { // We don't have a numeric ID so insert one into the database. eventTypeNID, err = d.EventTypesTable.InsertEventTypeNID(ctx, txn, eventType) - if err == sql.ErrNoRows { + if errors.Is(err, sql.ErrNoRows) { // We raced with another insert so run the select again. eventTypeNID, err = d.EventTypesTable.SelectEventTypeNID(ctx, txn, eventType) } @@ -938,16 +938,19 @@ func (d *EventDatabase) assignStateKeyNID( } // Check if we already have a numeric ID in the database. eventStateKeyNID, err := d.EventStateKeysTable.SelectEventStateKeyNID(ctx, txn, eventStateKey) - if err == sql.ErrNoRows { + if errors.Is(err, sql.ErrNoRows) { // We don't have a numeric ID so insert one into the database. eventStateKeyNID, err = d.EventStateKeysTable.InsertEventStateKeyNID(ctx, txn, eventStateKey) - if err == sql.ErrNoRows { + if errors.Is(err, sql.ErrNoRows) { // We raced with another insert so run the select again. eventStateKeyNID, err = d.EventStateKeysTable.SelectEventStateKeyNID(ctx, txn, eventStateKey) } } + if err != nil { + return 0, err + } d.Cache.StoreEventStateKey(eventStateKeyNID, eventStateKey) - return eventStateKeyNID, err + return eventStateKeyNID, nil } func extractRoomVersionFromCreateEvent(event gomatrixserverlib.PDU) ( diff --git a/roomserver/storage/tables/interface.go b/roomserver/storage/tables/interface.go index 0ae064e6b..b3cb31880 100644 --- a/roomserver/storage/tables/interface.go +++ b/roomserver/storage/tables/interface.go @@ -235,6 +235,10 @@ func ExtractContentValue(ev *types.HeaderedEvent) string { key = "topic" case "m.room.guest_access": key = "guest_access" + case "m.room.server_acl": + // We need the entire content and not only one key, so we can use it + // on startup to generate the ACLs. This is merely a workaround. + return string(content) } result := gjson.GetBytes(content, key) if !result.Exists() { diff --git a/setup/base/base.go b/setup/base/base.go index ea342054c..455337e59 100644 --- a/setup/base/base.go +++ b/setup/base/base.go @@ -50,6 +50,9 @@ import ( //go:embed static/*.gotmpl var staticContent embed.FS +//go:embed static/client/login +var loginFallback embed.FS + const HTTPServerTimeout = time.Minute * 5 // CreateClient creates a new client (normally used for media fetch requests). @@ -158,6 +161,14 @@ func SetupAndServeHTTP( _, _ = w.Write(landingPage.Bytes()) }) + // We only need the files beneath the static/client/login folder. + sub, err := fs.Sub(loginFallback, "static/client/login") + if err != nil { + logrus.Panicf("unable to read embedded files, this should never happen: %s", err) + } + // Serve a static page for login fallback + routers.Static.PathPrefix("/client/login/").Handler(http.StripPrefix("/_matrix/static/client/login/", http.FileServer(http.FS(sub)))) + var clientHandler http.Handler clientHandler = routers.Client if cfg.Global.Sentry.Enabled { diff --git a/setup/base/static/client/login/index.html b/setup/base/static/client/login/index.html new file mode 100644 index 000000000..7d3b109a1 --- /dev/null +++ b/setup/base/static/client/login/index.html @@ -0,0 +1,47 @@ + + + + + Login + + + + + + + +
+

+ + + +
+ +
+ + + + + + +
+ + diff --git a/setup/base/static/client/login/js/jquery-3.4.1.min.js b/setup/base/static/client/login/js/jquery-3.4.1.min.js new file mode 100644 index 000000000..a1c07fd80 --- /dev/null +++ b/setup/base/static/client/login/js/jquery-3.4.1.min.js @@ -0,0 +1,2 @@ +/*! jQuery v3.4.1 | (c) JS Foundation and other contributors | jquery.org/license */ +!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],E=C.document,r=Object.getPrototypeOf,s=t.slice,g=t.concat,u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.4.1",k=function(e,t){return new k.fn.init(e,t)},p=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;function d(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp($),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+$),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ne=function(e,t,n){var r="0x"+t-65536;return r!=r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(m.childNodes),m.childNodes),t[m.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&((e?e.ownerDocument||e:m)!==C&&T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!A[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&U.test(t)){(s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=k),o=(l=h(t)).length;while(o--)l[o]="#"+s+" "+xe(l[o]);c=l.join(","),f=ee.test(t)&&ye(e.parentNode)||e}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){A(t,!0)}finally{s===k&&e.removeAttribute("id")}}}return g(t.replace(B,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[k]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:m;return r!==C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),m!==C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=k,!C.getElementsByName||!C.getElementsByName(k).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+k+"-]").length||v.push("~="),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+k+"+*").length||v.push(".#.+[+~]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",$)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e===C||e.ownerDocument===m&&y(m,e)?-1:t===C||t.ownerDocument===m&&y(m,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===C?-1:t===C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]===m?-1:s[r]===m?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==C&&T(e),d.matchesSelector&&E&&!A[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){A(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=p[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&p(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?k.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?k.grep(e,function(e){return e===n!==r}):"string"!=typeof n?k.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(k.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof k?t[0]:t,k.merge(this,k.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),D.test(r[1])&&k.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(k):k.makeArray(e,this)}).prototype=k.fn,q=k(E);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}k.fn.extend({has:function(e){var t=k(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,""],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?k.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;nx",y.noCloneChecked=!!me.cloneNode(!0).lastChild.defaultValue;var Te=/^key/,Ce=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ee=/^([^.]*)(?:\.(.+)|)/;function ke(){return!0}function Se(){return!1}function Ne(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function Ae(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)Ae(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Se;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return k().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=k.guid++)),e.each(function(){k.event.add(this,t,i,r,n)})}function De(e,i,o){o?(Q.set(e,i,!1),k.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Q.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(k.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Q.set(this,i,r),t=o(this,i),this[i](),r!==(n=Q.get(this,i))||t?Q.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Q.set(this,i,{value:k.event.trigger(k.extend(r[0],k.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Q.get(e,i)&&k.event.add(e,i,ke)}k.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.get(t);if(v){n.handler&&(n=(o=n).handler,i=o.selector),i&&k.find.matchesSelector(ie,i),n.guid||(n.guid=k.guid++),(u=v.events)||(u=v.events={}),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof k&&k.event.triggered!==e.type?k.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(R)||[""]).length;while(l--)d=g=(s=Ee.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=k.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=k.event.special[d]||{},c=k.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&k.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),k.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.hasData(e)&&Q.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(R)||[""]).length;while(l--)if(d=g=(s=Ee.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=k.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||k.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)k.event.remove(e,d+t[l],n,r,!0);k.isEmptyObject(u)&&Q.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=k.event.fix(e),u=new Array(arguments.length),l=(Q.get(this,"events")||{})[s.type]||[],c=k.event.special[s.type]||{};for(u[0]=s,t=1;t\x20\t\r\n\f]*)[^>]*)\/>/gi,qe=/\s*$/g;function Oe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&k(e).children("tbody")[0]||e}function Pe(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Re(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Me(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(Q.hasData(e)&&(o=Q.access(e),a=Q.set(t,o),l=o.events))for(i in delete a.handle,a.events={},l)for(n=0,r=l[i].length;n")},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=oe(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||k.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Vt,Gt=[],Yt=/(=)\?(?=&|$)|\?\?/;k.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Gt.pop()||k.expando+"_"+kt++;return this[e]=!0,e}}),k.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Yt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Yt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Yt,"$1"+r):!1!==e.jsonp&&(e.url+=(St.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||k.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?k(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Gt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Vt=E.implementation.createHTMLDocument("").body).innerHTML="
",2===Vt.childNodes.length),k.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=D.exec(e))?[t.createElement(i[1])]:(i=we([e],t,o),o&&o.length&&k(o).remove(),k.merge([],i.childNodes)));var r,i,o},k.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(k.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},k.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){k.fn[t]=function(e){return this.on(t,e)}}),k.expr.pseudos.animated=function(t){return k.grep(k.timers,function(e){return t===e.elem}).length},k.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=k.css(e,"position"),c=k(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=k.css(e,"top"),u=k.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,k.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},k.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){k.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===k.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===k.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=k(e).offset()).top+=k.css(e,"borderTopWidth",!0),i.left+=k.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-k.css(r,"marginTop",!0),left:t.left-i.left-k.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===k.css(e,"position"))e=e.offsetParent;return e||ie})}}),k.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;k.fn[t]=function(e){return _(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),k.each(["top","left"],function(e,n){k.cssHooks[n]=ze(y.pixelPosition,function(e,t){if(t)return t=_e(e,n),$e.test(t)?k(e).position()[n]+"px":t})}),k.each({Height:"height",Width:"width"},function(a,s){k.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){k.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return _(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?k.css(e,t,i):k.style(e,t,n,i)},s,n?e:void 0,n)}})}),k.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){k.fn[n]=function(e,t){return 0 * { + display: block; + margin-left: auto; + margin-right: auto; + text-align: center; +} + +/* + * A wrapper around each login flow. + */ +.login_flow { + width: 300px; + text-align: left; + padding: 10px; + margin-bottom: 40px; + + border-radius: 10px; + box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15); + + background-color: #f8f8f8; + border: 1px #ccc solid; +} + +/* + * Used to show error content. + */ +#feedback { + /* Red text. */ + color: #ff0000; + /* A little space to not overlap the box-shadow. */ + margin-bottom: 20px; +} diff --git a/setup/config/config_appservice.go b/setup/config/config_appservice.go index ef10649d2..a95cec046 100644 --- a/setup/config/config_appservice.go +++ b/setup/config/config_appservice.go @@ -40,6 +40,9 @@ type AppServiceAPI struct { // on appservice endpoints. This is not recommended in production! DisableTLSValidation bool `yaml:"disable_tls_validation"` + LegacyAuth bool `yaml:"legacy_auth"` + LegacyPaths bool `yaml:"legacy_paths"` + ConfigFiles []string `yaml:"config_files"` } diff --git a/setup/config/config_federationapi.go b/setup/config/config_federationapi.go index a72eee369..073c46e03 100644 --- a/setup/config/config_federationapi.go +++ b/setup/config/config_federationapi.go @@ -18,6 +18,13 @@ type FederationAPI struct { // The default value is 16 if not specified, which is circa 18 hours. FederationMaxRetries uint32 `yaml:"send_max_retries"` + // P2P Feature: Whether relaying to specific nodes should be enabled. + // Defaults to false. + // Note: Enabling relays introduces a huge startup delay, if you are not using + // relays and have many servers to re-hydrate on start. Only enable this + // if you are using relays! + EnableRelays bool `yaml:"enable_relays"` + // P2P Feature: How many consecutive failures that we should tolerate when // sending federation requests to a specific server until we should assume they // are offline. If we assume they are offline then we will attempt to send diff --git a/syncapi/routing/context.go b/syncapi/routing/context.go index b0c91c40b..b136c69a0 100644 --- a/syncapi/routing/context.go +++ b/syncapi/routing/context.go @@ -110,6 +110,7 @@ func Context( } stateFilter := synctypes.StateFilter{ + Limit: filter.Limit, NotSenders: filter.NotSenders, NotTypes: filter.NotTypes, Senders: filter.Senders, @@ -157,6 +158,11 @@ func Context( } } + // Limit is split up for before/after events + if filter.Limit > 1 { + filter.Limit = filter.Limit / 2 + } + eventsBefore, err := snapshot.SelectContextBeforeEvent(ctx, id, roomID, filter) if err != nil && err != sql.ErrNoRows { logrus.WithError(err).Error("unable to fetch before events") diff --git a/syncapi/routing/memberships.go b/syncapi/routing/memberships.go index e849adf6d..9cc937d88 100644 --- a/syncapi/routing/memberships.go +++ b/syncapi/routing/memberships.go @@ -15,7 +15,6 @@ package routing import ( - "encoding/json" "math" "net/http" @@ -33,31 +32,13 @@ type getMembershipResponse struct { Chunk []synctypes.ClientEvent `json:"chunk"` } -// https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-rooms-roomid-joined-members -type getJoinedMembersResponse struct { - Joined map[string]joinedMember `json:"joined"` -} - -type joinedMember struct { - DisplayName string `json:"display_name"` - AvatarURL string `json:"avatar_url"` -} - -// The database stores 'displayname' without an underscore. -// Deserialize into this and then change to the actual API response -type databaseJoinedMember struct { - DisplayName string `json:"displayname"` - AvatarURL string `json:"avatar_url"` -} - // GetMemberships implements // // GET /rooms/{roomId}/members -// GET /rooms/{roomId}/joined_members func GetMemberships( req *http.Request, device *userapi.Device, roomID string, syncDB storage.Database, rsAPI api.SyncRoomserverAPI, - joinedOnly bool, membership, notMembership *string, at string, + membership, notMembership *string, at string, ) util.JSONResponse { userID, err := spec.NewUserID(device.UserID, true) if err != nil { @@ -87,13 +68,6 @@ func GetMemberships( } } - if joinedOnly && !queryRes.IsInRoom { - return util.JSONResponse{ - Code: http.StatusForbidden, - JSON: spec.Forbidden("You aren't a member of the room and weren't previously a member of the room."), - } - } - db, err := syncDB.NewDatabaseSnapshot(req.Context()) if err != nil { return util.JSONResponse{ @@ -139,40 +113,6 @@ func GetMemberships( result := qryRes.Events - if joinedOnly { - var res getJoinedMembersResponse - res.Joined = make(map[string]joinedMember) - for _, ev := range result { - var content databaseJoinedMember - if err := json.Unmarshal(ev.Content(), &content); err != nil { - util.GetLogger(req.Context()).WithError(err).Error("failed to unmarshal event content") - return util.JSONResponse{ - Code: http.StatusInternalServerError, - JSON: spec.InternalServerError{}, - } - } - - userID, err := rsAPI.QueryUserIDForSender(req.Context(), ev.RoomID(), ev.SenderID()) - if err != nil || userID == nil { - util.GetLogger(req.Context()).WithError(err).Error("rsAPI.QueryUserIDForSender failed") - return util.JSONResponse{ - Code: http.StatusInternalServerError, - JSON: spec.InternalServerError{}, - } - } - if err != nil { - return util.JSONResponse{ - Code: http.StatusForbidden, - JSON: spec.Forbidden("You don't have permission to kick this user, unknown senderID"), - } - } - res.Joined[userID.String()] = joinedMember(content) - } - return util.JSONResponse{ - Code: http.StatusOK, - JSON: res, - } - } return util.JSONResponse{ Code: http.StatusOK, JSON: getMembershipResponse{synctypes.ToClientEvents(gomatrixserverlib.ToPDUs(result), synctypes.FormatAll, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) { diff --git a/syncapi/routing/messages.go b/syncapi/routing/messages.go index 3333cb54d..7ea01c7dc 100644 --- a/syncapi/routing/messages.go +++ b/syncapi/routing/messages.go @@ -135,13 +135,6 @@ func OnIncomingMessagesRequest( var fromStream *types.StreamingToken fromQuery := req.URL.Query().Get("from") toQuery := req.URL.Query().Get("to") - emptyFromSupplied := fromQuery == "" - if emptyFromSupplied { - // NOTSPEC: We will pretend they used the latest sync token if no ?from= was provided. - // We do this to allow clients to get messages without having to call `/sync` e.g Cerulean - currPos := srp.Notifier.CurrentPosition() - fromQuery = currPos.String() - } // Direction to return events from. dir := req.URL.Query().Get("dir") @@ -155,6 +148,23 @@ func OnIncomingMessagesRequest( // to have one of the two accepted values (so dir == "f" <=> !backwardOrdering). backwardOrdering := (dir == "b") + emptyFromSupplied := fromQuery == "" + if emptyFromSupplied { + // If "from" isn't provided, it defaults to either the earliest stream + // position (if we're going forward) or to the latest one (if we're + // going backward). + + var from types.TopologyToken + if backwardOrdering { + from = types.TopologyToken{Depth: math.MaxInt64, PDUPosition: math.MaxInt64} + } else { + // go 1 earlier than the first event so we correctly fetch the earliest event + // this is because Database.GetEventsInTopologicalRange is exclusive of the lower-bound. + from = types.TopologyToken{} + } + fromQuery = from.String() + } + from, err := types.NewTopologyTokenFromString(fromQuery) if err != nil { var streamToken types.StreamingToken diff --git a/syncapi/routing/routing.go b/syncapi/routing/routing.go index a837e1696..78188d1b6 100644 --- a/syncapi/routing/routing.go +++ b/syncapi/routing/routing.go @@ -197,19 +197,7 @@ func Setup( } at := req.URL.Query().Get("at") - return GetMemberships(req, device, vars["roomID"], syncDB, rsAPI, false, membership, notMembership, at) + return GetMemberships(req, device, vars["roomID"], syncDB, rsAPI, membership, notMembership, at) }, httputil.WithAllowGuests()), ).Methods(http.MethodGet, http.MethodOptions) - - v3mux.Handle("/rooms/{roomID}/joined_members", - httputil.MakeAuthAPI("rooms_members", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse { - vars, err := httputil.URLDecodeMapValues(mux.Vars(req)) - if err != nil { - return util.ErrorResponse(err) - } - at := req.URL.Query().Get("at") - membership := spec.Join - return GetMemberships(req, device, vars["roomID"], syncDB, rsAPI, true, &membership, nil, at) - }), - ).Methods(http.MethodGet, http.MethodOptions) } diff --git a/syncapi/streams/stream_pdu.go b/syncapi/streams/stream_pdu.go index 3abb0b3c6..790f5bd1b 100644 --- a/syncapi/streams/stream_pdu.go +++ b/syncapi/streams/stream_pdu.go @@ -203,6 +203,12 @@ func (p *PDUStreamProvider) IncrementalSync( req.Log.WithError(err).Error("unable to update event filter with ignored users") } + dbEvents, err := p.getRecentEvents(ctx, stateDeltas, r, eventFilter, snapshot) + if err != nil { + req.Log.WithError(err).Error("unable to get recent events") + return r.From + } + newPos = from for _, delta := range stateDeltas { newRange := r @@ -218,7 +224,7 @@ func (p *PDUStreamProvider) IncrementalSync( } } var pos types.StreamPosition - if pos, err = p.addRoomDeltaToResponse(ctx, snapshot, req.Device, newRange, delta, &eventFilter, &stateFilter, req); err != nil { + if pos, err = p.addRoomDeltaToResponse(ctx, snapshot, req.Device, newRange, delta, &eventFilter, &stateFilter, req, dbEvents); err != nil { req.Log.WithError(err).Error("d.addRoomDeltaToResponse failed") if err == context.DeadlineExceeded || err == context.Canceled || err == sql.ErrTxDone { return newPos @@ -240,6 +246,66 @@ func (p *PDUStreamProvider) IncrementalSync( return newPos } +func (p *PDUStreamProvider) getRecentEvents(ctx context.Context, stateDeltas []types.StateDelta, r types.Range, eventFilter synctypes.RoomEventFilter, snapshot storage.DatabaseTransaction) (map[string]types.RecentEvents, error) { + var roomIDs []string + var newlyJoinedRoomIDs []string + for _, delta := range stateDeltas { + if delta.NewlyJoined { + newlyJoinedRoomIDs = append(newlyJoinedRoomIDs, delta.RoomID) + } else { + roomIDs = append(roomIDs, delta.RoomID) + } + } + dbEvents := make(map[string]types.RecentEvents) + if len(roomIDs) > 0 { + events, err := snapshot.RecentEvents( + ctx, roomIDs, r, + &eventFilter, true, true, + ) + if err != nil { + if err != sql.ErrNoRows { + return nil, err + } + } + for k, v := range events { + dbEvents[k] = v + } + } + if len(newlyJoinedRoomIDs) > 0 { + // For rooms that were joined in this sync, try to fetch + // as much timeline events as allowed by the filter. + + filter := eventFilter + // If we're going backwards, grep at least X events, this is mostly to satisfy Sytest + if eventFilter.Limit < recentEventBackwardsLimit { + filter.Limit = recentEventBackwardsLimit // TODO: Figure out a better way + diff := r.From - r.To + if diff > 0 && diff < recentEventBackwardsLimit { + filter.Limit = int(diff) + } + } + + events, err := snapshot.RecentEvents( + ctx, newlyJoinedRoomIDs, types.Range{ + From: r.To, + To: 0, + Backwards: true, + }, + &filter, true, true, + ) + if err != nil { + if err != sql.ErrNoRows { + return nil, err + } + } + for k, v := range events { + dbEvents[k] = v + } + } + + return dbEvents, nil +} + // Limit the recent events to X when going backwards const recentEventBackwardsLimit = 100 @@ -253,29 +319,9 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse( eventFilter *synctypes.RoomEventFilter, stateFilter *synctypes.StateFilter, req *types.SyncRequest, + dbEvents map[string]types.RecentEvents, ) (types.StreamPosition, error) { var err error - originalLimit := eventFilter.Limit - // If we're going backwards, grep at least X events, this is mostly to satisfy Sytest - if r.Backwards && originalLimit < recentEventBackwardsLimit { - eventFilter.Limit = recentEventBackwardsLimit // TODO: Figure out a better way - diff := r.From - r.To - if diff > 0 && diff < recentEventBackwardsLimit { - eventFilter.Limit = int(diff) - } - } - - dbEvents, err := snapshot.RecentEvents( - ctx, []string{delta.RoomID}, r, - eventFilter, true, true, - ) - if err != nil { - if err == sql.ErrNoRows { - return r.To, nil - } - return r.From, fmt.Errorf("p.DB.RecentEvents: %w", err) - } - recentStreamEvents := dbEvents[delta.RoomID].Events limited := dbEvents[delta.RoomID].Limited @@ -337,9 +383,9 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse( logrus.WithError(err).Error("unable to apply history visibility filter") } - if r.Backwards && len(events) > originalLimit { + if r.Backwards && len(events) > eventFilter.Limit { // We're going backwards and the events are ordered chronologically, so take the last `limit` events - events = events[len(events)-originalLimit:] + events = events[len(events)-eventFilter.Limit:] limited = true } diff --git a/syncapi/syncapi_test.go b/syncapi/syncapi_test.go index ac5268511..0392f209a 100644 --- a/syncapi/syncapi_test.go +++ b/syncapi/syncapi_test.go @@ -753,24 +753,6 @@ func TestGetMembership(t *testing.T) { }, wantOK: false, }, - { - name: "/joined_members - Bob never joined", - request: func(t *testing.T, room *test.Room) *http.Request { - return test.NewRequest(t, "GET", fmt.Sprintf("/_matrix/client/v3/rooms/%s/joined_members", room.ID), test.WithQueryParams(map[string]string{ - "access_token": bobDev.AccessToken, - })) - }, - wantOK: false, - }, - { - name: "/joined_members - Alice joined", - request: func(t *testing.T, room *test.Room) *http.Request { - return test.NewRequest(t, "GET", fmt.Sprintf("/_matrix/client/v3/rooms/%s/joined_members", room.ID), test.WithQueryParams(map[string]string{ - "access_token": aliceDev.AccessToken, - })) - }, - wantOK: true, - }, { name: "Alice leaves before Bob joins, should not be able to see Bob", request: func(t *testing.T, room *test.Room) *http.Request { @@ -809,21 +791,6 @@ func TestGetMembership(t *testing.T) { wantOK: true, wantMemberCount: 2, }, - { - name: "/joined_members - Alice leaves, shouldn't be able to see members ", - request: func(t *testing.T, room *test.Room) *http.Request { - return test.NewRequest(t, "GET", fmt.Sprintf("/_matrix/client/v3/rooms/%s/joined_members", room.ID), test.WithQueryParams(map[string]string{ - "access_token": aliceDev.AccessToken, - })) - }, - additionalEvents: func(t *testing.T, room *test.Room) { - room.CreateAndInsert(t, alice, spec.MRoomMember, map[string]interface{}{ - "membership": "leave", - }, test.WithStateKey(alice.ID)) - }, - useSleep: true, - wantOK: false, - }, { name: "'at' specified, returns memberships before Bob joins", request: func(t *testing.T, room *test.Room) *http.Request { @@ -1169,7 +1136,7 @@ func testContext(t *testing.T, dbType test.DBType) { }, { name: "events are not limited", - wantBeforeLength: 7, + wantBeforeLength: 5, }, { name: "all events are limited", diff --git a/syncapi/types/types.go b/syncapi/types/types.go index bca11855c..26faf7c05 100644 --- a/syncapi/types/types.go +++ b/syncapi/types/types.go @@ -286,8 +286,8 @@ func NewTopologyTokenFromString(tok string) (token TopologyToken, err error) { if i > len(positions) { break } - var pos int - pos, err = strconv.Atoi(p) + var pos int64 + pos, err = strconv.ParseInt(p, 10, 64) if err != nil { return } @@ -318,8 +318,8 @@ func NewStreamTokenFromString(tok string) (token StreamingToken, err error) { if i >= len(positions) { break } - var pos int - pos, err = strconv.Atoi(p) + var pos int64 + pos, err = strconv.ParseInt(p, 10, 64) if err != nil { err = ErrMalformedSyncToken return diff --git a/syncapi/types/types_test.go b/syncapi/types/types_test.go index 35e1882cb..6c616ab0d 100644 --- a/syncapi/types/types_test.go +++ b/syncapi/types/types_test.go @@ -3,6 +3,7 @@ package types import ( "context" "encoding/json" + "math" "reflect" "testing" @@ -33,12 +34,28 @@ func TestSyncTokens(t *testing.T) { "s3_1_0_0_0_0_2_0_5": StreamingToken{3, 1, 0, 0, 0, 0, 2, 0, 5}.String(), "s3_1_2_3_5_0_0_0_6": StreamingToken{3, 1, 2, 3, 5, 0, 0, 0, 6}.String(), "t3_1": TopologyToken{3, 1}.String(), + "t9223372036854775807_9223372036854775807": TopologyToken{Depth: math.MaxInt64, PDUPosition: math.MaxInt64}.String(), + "s9223372036854775807_1_2_3_5_0_0_0_6": StreamingToken{math.MaxInt64, 1, 2, 3, 5, 0, 0, 0, 6}.String(), } for a, b := range shouldPass { if a != b { t.Errorf("expected %q, got %q", a, b) } + + // parse as topology token + if a[0] == 't' { + if _, err := NewTopologyTokenFromString(a); err != nil { + t.Errorf("expected %q to pass, but got %q", a, err) + } + } + + // parse as sync token + if a[0] == 's' { + if _, err := NewStreamTokenFromString(a); err != nil { + t.Errorf("expected %q to pass, but got %q", a, err) + } + } } shouldFail := []string{ diff --git a/userapi/api/api.go b/userapi/api/api.go index a0dce9758..d4daec820 100644 --- a/userapi/api/api.go +++ b/userapi/api/api.go @@ -379,6 +379,10 @@ type PerformDeviceCreationRequest struct { // update for this account. Generally the only reason to do this is if the account // is an appservice account. NoDeviceListUpdate bool + + // FromRegistration determines if this request comes from registering a new account + // and is in most cases false. + FromRegistration bool } // PerformDeviceCreationResponse is the response for PerformDeviceCreation @@ -803,6 +807,10 @@ type PerformUploadKeysRequest struct { // itself doesn't change but it's easier to pretend upload new keys and reuse the same code paths. // Without this flag, requests to modify device display names would delete device keys. OnlyDisplayNameUpdates bool + + // FromRegistration is set if this key upload comes right after creating an account + // and determines if we need to inform downstream components. + FromRegistration bool } // PerformUploadKeysResponse is the response to PerformUploadKeys diff --git a/userapi/internal/key_api.go b/userapi/internal/key_api.go index 786a2dcd8..422898c70 100644 --- a/userapi/internal/key_api.go +++ b/userapi/internal/key_api.go @@ -711,9 +711,15 @@ func (a *UserInternalAPI) uploadLocalDeviceKeys(ctx context.Context, req *api.Pe } return } - err = emitDeviceKeyChanges(a.KeyChangeProducer, existingKeys, keysToStore, req.OnlyDisplayNameUpdates) - if err != nil { - util.GetLogger(ctx).Errorf("Failed to emitDeviceKeyChanges: %s", err) + + // If the request does _not_ come right after registering an account + // inform downstream components. However, we're fine with just creating the + // database entries above in other cases. + if !req.FromRegistration { + err = emitDeviceKeyChanges(a.KeyChangeProducer, existingKeys, keysToStore, req.OnlyDisplayNameUpdates) + if err != nil { + util.GetLogger(ctx).Errorf("Failed to emitDeviceKeyChanges: %s", err) + } } } diff --git a/userapi/internal/user_api.go b/userapi/internal/user_api.go index 4e3c2671a..a126dc871 100644 --- a/userapi/internal/user_api.go +++ b/userapi/internal/user_api.go @@ -316,7 +316,7 @@ func (a *UserInternalAPI) PerformDeviceCreation(ctx context.Context, req *api.Pe return nil } // create empty device keys and upload them to trigger device list changes - return a.deviceListUpdate(dev.UserID, []string{dev.ID}) + return a.deviceListUpdate(dev.UserID, []string{dev.ID}, req.FromRegistration) } func (a *UserInternalAPI) PerformDeviceDeletion(ctx context.Context, req *api.PerformDeviceDeletionRequest, res *api.PerformDeviceDeletionResponse) error { @@ -356,10 +356,10 @@ func (a *UserInternalAPI) PerformDeviceDeletion(ctx context.Context, req *api.Pe return fmt.Errorf("a.KeyAPI.PerformDeleteKeys: %w", err) } // create empty device keys and upload them to delete what was once there and trigger device list changes - return a.deviceListUpdate(req.UserID, deletedDeviceIDs) + return a.deviceListUpdate(req.UserID, deletedDeviceIDs, false) } -func (a *UserInternalAPI) deviceListUpdate(userID string, deviceIDs []string) error { +func (a *UserInternalAPI) deviceListUpdate(userID string, deviceIDs []string, fromRegistration bool) error { deviceKeys := make([]api.DeviceKeys, len(deviceIDs)) for i, did := range deviceIDs { deviceKeys[i] = api.DeviceKeys{ @@ -371,8 +371,9 @@ func (a *UserInternalAPI) deviceListUpdate(userID string, deviceIDs []string) er var uploadRes api.PerformUploadKeysResponse if err := a.PerformUploadKeys(context.Background(), &api.PerformUploadKeysRequest{ - UserID: userID, - DeviceKeys: deviceKeys, + UserID: userID, + DeviceKeys: deviceKeys, + FromRegistration: fromRegistration, }, &uploadRes); err != nil { return err }