Merge branch 'main' of github.com:matrix-org/dendrite into i2p-demo

This commit is contained in:
Till Faelligen 2024-02-29 18:52:56 +01:00
commit 4f23dbaae4
No known key found for this signature in database
GPG key ID: 3DF82D8AB9211D4E
78 changed files with 1714 additions and 444 deletions

View file

@ -7,7 +7,7 @@ coverage:
project: project:
default: default:
target: auto target: auto
threshold: 0% threshold: 0.1%
base: auto base: auto
flags: flags:
- unittests - unittests

View file

@ -28,10 +28,10 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ false }} # disable for now if: ${{ false }} # disable for now
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: Install Go - name: Install Go
uses: actions/setup-go@v3 uses: actions/setup-go@v4
with: with:
go-version: "stable" go-version: "stable"
cache: true cache: true
@ -41,7 +41,7 @@ jobs:
with: with:
node-version: 14 node-version: 14
- uses: actions/cache@v3 - uses: actions/cache@v4
with: with:
path: ~/.npm path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
@ -66,11 +66,11 @@ jobs:
name: Linting name: Linting
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: Install libolm - name: Install libolm
run: sudo apt-get install libolm-dev libolm3 run: sudo apt-get install libolm-dev libolm3
- name: Install Go - name: Install Go
uses: actions/setup-go@v3 uses: actions/setup-go@v4
with: with:
go-version: "stable" go-version: "stable"
- name: golangci-lint - name: golangci-lint
@ -102,14 +102,14 @@ jobs:
--health-timeout 5s --health-timeout 5s
--health-retries 5 --health-retries 5
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: Install libolm - name: Install libolm
run: sudo apt-get install libolm-dev libolm3 run: sudo apt-get install libolm-dev libolm3
- name: Setup go - name: Setup go
uses: actions/setup-go@v3 uses: actions/setup-go@v4
with: with:
go-version: "stable" go-version: "stable"
- uses: actions/cache@v3 - uses: actions/cache@v4
# manually set up caches, as they otherwise clash with different steps using setup-go with cache=true # manually set up caches, as they otherwise clash with different steps using setup-go with cache=true
with: with:
path: | path: |
@ -141,12 +141,12 @@ jobs:
goos: ["linux"] goos: ["linux"]
goarch: ["amd64", "386"] goarch: ["amd64", "386"]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: Setup go - name: Setup go
uses: actions/setup-go@v3 uses: actions/setup-go@v4
with: with:
go-version: "stable" go-version: "stable"
- uses: actions/cache@v3 - uses: actions/cache@v4
with: with:
path: | path: |
~/.cache/go-build ~/.cache/go-build
@ -174,12 +174,12 @@ jobs:
goos: ["windows"] goos: ["windows"]
goarch: ["amd64"] goarch: ["amd64"]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v3 uses: actions/setup-go@v4
with: with:
go-version: "stable" go-version: "stable"
- uses: actions/cache@v3 - uses: actions/cache@v4
with: with:
path: | path: |
~/.cache/go-build ~/.cache/go-build
@ -235,11 +235,11 @@ jobs:
--health-timeout 5s --health-timeout 5s
--health-retries 5 --health-retries 5
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: Install libolm - name: Install libolm
run: sudo apt-get install libolm-dev libolm3 run: sudo apt-get install libolm-dev libolm3
- name: Setup go - name: Setup go
uses: actions/setup-go@v3 uses: actions/setup-go@v4
with: with:
go-version: "stable" go-version: "stable"
- name: Set up gotestfmt - name: Set up gotestfmt
@ -247,7 +247,7 @@ jobs:
with: with:
# Optional: pass GITHUB_TOKEN to avoid rate limiting. # Optional: pass GITHUB_TOKEN to avoid rate limiting.
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions/cache@v3 - uses: actions/cache@v4
with: with:
path: | path: |
~/.cache/go-build ~/.cache/go-build
@ -262,10 +262,11 @@ jobs:
POSTGRES_PASSWORD: postgres POSTGRES_PASSWORD: postgres
POSTGRES_DB: dendrite POSTGRES_DB: dendrite
- name: Upload coverage to Codecov - name: Upload coverage to Codecov
uses: codecov/codecov-action@v3 uses: codecov/codecov-action@v4
with: with:
flags: unittests flags: unittests
fail_ci_if_error: true fail_ci_if_error: true
token: ${{ secrets.CODECOV_TOKEN }}
# run database upgrade tests # run database upgrade tests
upgrade_test: upgrade_test:
@ -274,12 +275,20 @@ jobs:
needs: initial-tests-done needs: initial-tests-done
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: Setup go - name: Setup go
uses: actions/setup-go@v3 uses: actions/setup-go@v4
with: with:
go-version: "stable" go-version: "stable"
cache: true cache: true
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-upgrade-test-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-upgrade-test-
- name: Docker version - name: Docker version
run: docker version run: docker version
- name: Build upgrade-tests - name: Build upgrade-tests
@ -296,12 +305,20 @@ jobs:
needs: initial-tests-done needs: initial-tests-done
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: Setup go - name: Setup go
uses: actions/setup-go@v3 uses: actions/setup-go@v4
with: with:
go-version: "stable" go-version: "stable"
cache: true cache: true
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-upgrade-direct-test-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-upgrade-direct-test-
- name: Docker version - name: Docker version
run: docker version run: docker version
- name: Build upgrade-tests - name: Build upgrade-tests
@ -340,8 +357,8 @@ jobs:
SYTEST_BRANCH: ${{ github.head_ref }} SYTEST_BRANCH: ${{ github.head_ref }}
CGO_ENABLED: ${{ matrix.cgo && 1 }} CGO_ENABLED: ${{ matrix.cgo && 1 }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/cache@v3 - uses: actions/cache@v4
with: with:
path: | path: |
~/.cache/go-build ~/.cache/go-build
@ -364,7 +381,7 @@ jobs:
run: /src/are-we-synapse-yet.py /logs/results.tap -v run: /src/are-we-synapse-yet.py /logs/results.tap -v
continue-on-error: true # not fatal continue-on-error: true # not fatal
- name: Upload Sytest logs - name: Upload Sytest logs
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v4
if: ${{ always() }} if: ${{ always() }}
with: with:
name: Sytest Logs - ${{ job.status }} - (Dendrite, ${{ join(matrix.*, ', ') }}) name: Sytest Logs - ${{ job.status }} - (Dendrite, ${{ join(matrix.*, ', ') }})
@ -404,8 +421,8 @@ jobs:
run: | run: |
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
- name: Run actions/checkout@v3 for dendrite - name: Run actions/checkout@v4 for dendrite
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
path: dendrite path: dendrite

View file

@ -27,22 +27,22 @@ jobs:
security-events: write # To upload Trivy sarif files security-events: write # To upload Trivy sarif files
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Get release tag & build flags - name: Get release tag & build flags
if: github.event_name == 'release' # Only for GitHub releases if: github.event_name == 'release' # Only for GitHub releases
run: | run: |
echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v1 uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v2 uses: docker/login-action@v3
with: with:
username: ${{ env.DOCKER_HUB_USER }} username: ${{ env.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_TOKEN }} password: ${{ secrets.DOCKER_TOKEN }}
- name: Login to GitHub Containers - name: Login to GitHub Containers
uses: docker/login-action@v2 uses: docker/login-action@v3
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
@ -98,22 +98,22 @@ jobs:
packages: write packages: write
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Get release tag & build flags - name: Get release tag & build flags
if: github.event_name == 'release' # Only for GitHub releases if: github.event_name == 'release' # Only for GitHub releases
run: | run: |
echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v1 uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v2 uses: docker/login-action@v3
with: with:
username: ${{ env.DOCKER_HUB_USER }} username: ${{ env.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_TOKEN }} password: ${{ secrets.DOCKER_TOKEN }}
- name: Login to GitHub Containers - name: Login to GitHub Containers
uses: docker/login-action@v2 uses: docker/login-action@v3
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
@ -159,22 +159,22 @@ jobs:
packages: write packages: write
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Get release tag & build flags - name: Get release tag & build flags
if: github.event_name == 'release' # Only for GitHub releases if: github.event_name == 'release' # Only for GitHub releases
run: | run: |
echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v1 uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v2 uses: docker/login-action@v3
with: with:
username: ${{ env.DOCKER_HUB_USER }} username: ${{ env.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_TOKEN }} password: ${{ secrets.DOCKER_TOKEN }}
- name: Login to GitHub Containers - name: Login to GitHub Containers
uses: docker/login-action@v2 uses: docker/login-action@v3
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}

View file

@ -28,7 +28,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Setup Pages - name: Setup Pages
uses: actions/configure-pages@v2 uses: actions/configure-pages@v2
- name: Build with Jekyll - name: Build with Jekyll

View file

@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0

View file

@ -17,7 +17,7 @@ jobs:
outputs: outputs:
changed: ${{ steps.list-changed.outputs.changed }} changed: ${{ steps.list-changed.outputs.changed }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: azure/setup-helm@v3 - uses: azure/setup-helm@v3
@ -48,7 +48,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
ref: ${{ inputs.checkoutCommit }} ref: ${{ inputs.checkoutCommit }}
@ -66,7 +66,7 @@ jobs:
- name: Create k3d cluster - name: Create k3d cluster
uses: nolar/setup-k3d-k3s@v1 uses: nolar/setup-k3d-k3s@v1
with: with:
version: v1.21 version: v1.28
- name: Remove node taints - name: Remove node taints
run: | run: |
kubectl taint --all=true nodes node.cloudprovider.kubernetes.io/uninitialized- || true kubectl taint --all=true nodes node.cloudprovider.kubernetes.io/uninitialized- || true

View file

@ -10,8 +10,26 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
check_date: # https://stackoverflow.com/questions/63014786/how-to-schedule-a-github-actions-nightly-build-but-run-it-only-when-there-where
runs-on: ubuntu-latest
name: Check latest commit
outputs:
should_run: ${{ steps.should_run.outputs.should_run }}
steps:
- uses: actions/checkout@v4
- name: print latest_commit
run: echo ${{ github.sha }}
- id: should_run
continue-on-error: true
name: check latest commit is less than a day
if: ${{ github.event_name == 'schedule' }}
run: test -z $(git rev-list --after="24 hours" ${{ github.sha }}) && echo "::set-output name=should_run::false"
# run Sytest in different variations # run Sytest in different variations
sytest: sytest:
needs: check_date
if: ${{ needs.check_date.outputs.should_run != 'false' }}
timeout-minutes: 60 timeout-minutes: 60
name: "Sytest (${{ matrix.label }})" name: "Sytest (${{ matrix.label }})"
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -38,8 +56,8 @@ jobs:
RACE_DETECTION: 1 RACE_DETECTION: 1
COVER: 1 COVER: 1
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/cache@v3 - uses: actions/cache@v4
with: with:
path: | path: |
~/.cache/go-build ~/.cache/go-build
@ -62,7 +80,7 @@ jobs:
run: /src/are-we-synapse-yet.py /logs/results.tap -v run: /src/are-we-synapse-yet.py /logs/results.tap -v
continue-on-error: true # not fatal continue-on-error: true # not fatal
- name: Upload Sytest logs - name: Upload Sytest logs
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v4
if: ${{ always() }} if: ${{ always() }}
with: with:
name: Sytest Logs - ${{ job.status }} - (Dendrite ${{ join(matrix.*, ' ') }}) name: Sytest Logs - ${{ job.status }} - (Dendrite ${{ join(matrix.*, ' ') }})
@ -75,31 +93,34 @@ jobs:
timeout-minutes: 5 timeout-minutes: 5
name: "Sytest Coverage" name: "Sytest Coverage"
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: sytest # only run once Sytest is done needs: [ sytest, check_date ] # only run once Sytest is done and there was a commit
if: ${{ always() }} if: ${{ always() && needs.check_date.outputs.should_run != 'false' }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: Install Go - name: Install Go
uses: actions/setup-go@v3 uses: actions/setup-go@v4
with: with:
go-version: 'stable' go-version: 'stable'
cache: true cache: true
- name: Download all artifacts - name: Download all artifacts
uses: actions/download-artifact@v3 uses: actions/download-artifact@v4
- name: Collect coverage - name: Collect coverage
run: | run: |
go tool covdata textfmt -i="$(find Sytest* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" -o sytest.cov go tool covdata textfmt -i="$(find Sytest* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" -o sytest.cov
grep -Ev 'relayapi|setup/mscs|api_trace' sytest.cov > final.cov grep -Ev 'relayapi|setup/mscs|api_trace' sytest.cov > final.cov
go tool covdata func -i="$(find Sytest* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" go tool covdata func -i="$(find Sytest* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)"
- name: Upload coverage to Codecov - name: Upload coverage to Codecov
uses: codecov/codecov-action@v3 uses: codecov/codecov-action@v4
with: with:
files: ./final.cov files: ./final.cov
flags: sytest flags: sytest
fail_ci_if_error: true fail_ci_if_error: true
token: ${{ secrets.CODECOV_TOKEN }}
# run Complement # run Complement
complement: complement:
needs: check_date
if: ${{ needs.check_date.outputs.should_run != 'false' }}
name: "Complement (${{ matrix.label }})" name: "Complement (${{ matrix.label }})"
timeout-minutes: 60 timeout-minutes: 60
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -129,8 +150,8 @@ jobs:
run: | run: |
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
- name: Run actions/checkout@v3 for dendrite - name: Run actions/checkout@v4 for dendrite
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
path: dendrite path: dendrite
@ -174,7 +195,7 @@ jobs:
# Run Complement # Run Complement
- run: | - run: |
set -o pipefail && set -o pipefail &&
go test -v -json -tags dendrite_blacklist ./tests/... 2>&1 | gotestfmt go test -v -json -tags dendrite_blacklist ./tests ./tests/csapi 2>&1 | gotestfmt -hide all
shell: bash shell: bash
name: Run Complement Tests name: Run Complement Tests
env: env:
@ -185,7 +206,7 @@ jobs:
working-directory: complement working-directory: complement
- name: Upload Complement logs - name: Upload Complement logs
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v4
if: ${{ always() }} if: ${{ always() }}
with: with:
name: Complement Logs - (Dendrite ${{ join(matrix.*, ' ') }}) name: Complement Logs - (Dendrite ${{ join(matrix.*, ' ') }})
@ -196,30 +217,32 @@ jobs:
timeout-minutes: 5 timeout-minutes: 5
name: "Complement Coverage" name: "Complement Coverage"
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: complement # only run once Complement is done needs: [ complement, check_date ] # only run once Complements is done and there was a commit
if: ${{ always() }} if: ${{ always() && needs.check_date.outputs.should_run != 'false' }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: Install Go - name: Install Go
uses: actions/setup-go@v3 uses: actions/setup-go@v4
with: with:
go-version: 'stable' go-version: 'stable'
cache: true cache: true
- name: Download all artifacts - name: Download all artifacts
uses: actions/download-artifact@v3 uses: actions/download-artifact@v4
- name: Collect coverage - name: Collect coverage
run: | run: |
go tool covdata textfmt -i="$(find Complement* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" -o complement.cov go tool covdata textfmt -i="$(find Complement* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" -o complement.cov
grep -Ev 'relayapi|setup/mscs|api_trace' complement.cov > final.cov grep -Ev 'relayapi|setup/mscs|api_trace' complement.cov > final.cov
go tool covdata func -i="$(find Complement* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" go tool covdata func -i="$(find Complement* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)"
- name: Upload coverage to Codecov - name: Upload coverage to Codecov
uses: codecov/codecov-action@v3 uses: codecov/codecov-action@v4
with: with:
files: ./final.cov files: ./final.cov
flags: complement flags: complement
fail_ci_if_error: true fail_ci_if_error: true
token: ${{ secrets.CODECOV_TOKEN }} # required
element-web: element-web:
if: ${{ false }} # disable for now, as Cypress has been replaced by Playwright
timeout-minutes: 120 timeout-minutes: 120
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -228,7 +251,7 @@ jobs:
# Our test suite includes some screenshot tests with unusual diacritics, which are # Our test suite includes some screenshot tests with unusual diacritics, which are
# supposed to be covered by STIXGeneral. # supposed to be covered by STIXGeneral.
tools: fonts-stix tools: fonts-stix
- uses: actions/checkout@v2 - uses: actions/checkout@v4
with: with:
repository: matrix-org/matrix-react-sdk repository: matrix-org/matrix-react-sdk
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
@ -259,6 +282,7 @@ jobs:
TMPDIR: ${{ runner.temp }} TMPDIR: ${{ runner.temp }}
element-web-pinecone: element-web-pinecone:
if: ${{ false }} # disable for now, as Cypress has been replaced by Playwright
timeout-minutes: 120 timeout-minutes: 120
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -267,7 +291,7 @@ jobs:
# Our test suite includes some screenshot tests with unusual diacritics, which are # Our test suite includes some screenshot tests with unusual diacritics, which are
# supposed to be covered by STIXGeneral. # supposed to be covered by STIXGeneral.
tools: fonts-stix tools: fonts-stix
- uses: actions/checkout@v2 - uses: actions/checkout@v4
with: with:
repository: matrix-org/matrix-react-sdk repository: matrix-org/matrix-react-sdk
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3

5
.gitignore vendored
View file

@ -77,4 +77,7 @@ media_store/
build build
# golang workspaces # golang workspaces
go.work* go.work*
# helm chart
helm/dendrite/charts/

View file

@ -1,5 +1,26 @@
# Changelog # Changelog
## Dendrite 0.13.6 (2024-01-26)
Upgrading to this version is **highly** recommended, as it contains several QoL improvements.
### Fixes
- Use `AckExplicitPolicy` for JetStream consumers, so messages don't pile up in NATS
- A rare panic when assigning a state key NID has been fixed
- A rare panic when checking powerlevels has been fixed
- Notary keys requests for all keys now work correctly
- Spec compliance:
- Return `M_INVALID_PARAM` when querying room aliases
- Handle empty `from` parameter when requesting `/messages`
- Add CORP headers on media endpoints
- Remove `aliases` from `/publicRooms` responses
- Allow `+` in MXIDs (Contributed by [RosstheRoss](https://github.com/RosstheRoss))
- Fixes membership transitions from `knock` to `join` in `knock_restricted` rooms
- Incremental syncs now batch querying events (Contributed by [recht](https://github.com/recht))
- Move `/joined_members` back to the clientAPI/roomserver, which should make bridges happier again
- Backfilling from other servers now only uses at max 100 events instead of potentially thousands
## Dendrite 0.13.5 (2023-12-12) ## Dendrite 0.13.5 (2023-12-12)
Upgrading to this version is **highly** recommended, as it fixes several long-standing bugs in Upgrading to this version is **highly** recommended, as it fixes several long-standing bugs in

View file

@ -82,9 +82,17 @@ type UserIDExistsResponse struct {
} }
const ( const (
ASProtocolPath = "/_matrix/app/unstable/thirdparty/protocol/" ASProtocolLegacyPath = "/_matrix/app/unstable/thirdparty/protocol/"
ASUserPath = "/_matrix/app/unstable/thirdparty/user" ASUserLegacyPath = "/_matrix/app/unstable/thirdparty/user"
ASLocationPath = "/_matrix/app/unstable/thirdparty/location" ASLocationLegacyPath = "/_matrix/app/unstable/thirdparty/location"
ASRoomAliasExistsLegacyPath = "/rooms/"
ASUserExistsLegacyPath = "/users/"
ASProtocolPath = "/_matrix/app/v1/thirdparty/protocol/"
ASUserPath = "/_matrix/app/v1/thirdparty/user"
ASLocationPath = "/_matrix/app/v1/thirdparty/location"
ASRoomAliasExistsPath = "/_matrix/app/v1/rooms/"
ASUserExistsPath = "/_matrix/app/v1/users/"
) )
type ProtocolRequest struct { type ProtocolRequest struct {

View file

@ -206,13 +206,21 @@ func (s *OutputRoomEventConsumer) sendEvents(
} }
// Send the transaction to the appservice. // Send the transaction to the appservice.
// https://matrix.org/docs/spec/application_service/r0.1.2#put-matrix-app-v1-transactions-txnid // https://spec.matrix.org/v1.9/application-service-api/#pushing-events
address := fmt.Sprintf("%s/transactions/%s?access_token=%s", state.RequestUrl(), txnID, url.QueryEscape(state.HSToken)) path := "_matrix/app/v1/transactions"
if s.cfg.LegacyPaths {
path = "transactions"
}
address := fmt.Sprintf("%s/%s/%s", state.RequestUrl(), path, txnID)
if s.cfg.LegacyAuth {
address += "?access_token=" + url.QueryEscape(state.HSToken)
}
req, err := http.NewRequestWithContext(ctx, "PUT", address, bytes.NewBuffer(transaction)) req, err := http.NewRequestWithContext(ctx, "PUT", address, bytes.NewBuffer(transaction))
if err != nil { if err != nil {
return err return err
} }
req.Header.Set("Content-Type", "application/json") req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", state.HSToken))
resp, err := state.HTTPClient.Do(req) resp, err := state.HTTPClient.Do(req)
if err != nil { if err != nil {
return state.backoffAndPause(err) return state.backoffAndPause(err)

View file

@ -19,10 +19,10 @@ package query
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
"strings"
"sync" "sync"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
@ -32,9 +32,6 @@ import (
"github.com/matrix-org/dendrite/setup/config" "github.com/matrix-org/dendrite/setup/config"
) )
const roomAliasExistsPath = "/rooms/"
const userIDExistsPath = "/users/"
// AppServiceQueryAPI is an implementation of api.AppServiceQueryAPI // AppServiceQueryAPI is an implementation of api.AppServiceQueryAPI
type AppServiceQueryAPI struct { type AppServiceQueryAPI struct {
Cfg *config.AppServiceAPI Cfg *config.AppServiceAPI
@ -55,14 +52,23 @@ func (a *AppServiceQueryAPI) RoomAliasExists(
// Determine which application service should handle this request // Determine which application service should handle this request
for _, appservice := range a.Cfg.Derived.ApplicationServices { for _, appservice := range a.Cfg.Derived.ApplicationServices {
if appservice.URL != "" && appservice.IsInterestedInRoomAlias(request.Alias) { if appservice.URL != "" && appservice.IsInterestedInRoomAlias(request.Alias) {
path := api.ASRoomAliasExistsPath
if a.Cfg.LegacyPaths {
path = api.ASRoomAliasExistsLegacyPath
}
// The full path to the rooms API, includes hs token // The full path to the rooms API, includes hs token
URL, err := url.Parse(appservice.RequestUrl() + roomAliasExistsPath) URL, err := url.Parse(appservice.RequestUrl() + path)
if err != nil { if err != nil {
return err return err
} }
URL.Path += request.Alias URL.Path += request.Alias
apiURL := URL.String() + "?access_token=" + appservice.HSToken if a.Cfg.LegacyAuth {
q := URL.Query()
q.Set("access_token", appservice.HSToken)
URL.RawQuery = q.Encode()
}
apiURL := URL.String()
// Send a request to each application service. If one responds that it has // Send a request to each application service. If one responds that it has
// created the room, immediately return. // created the room, immediately return.
@ -70,6 +76,7 @@ func (a *AppServiceQueryAPI) RoomAliasExists(
if err != nil { if err != nil {
return err return err
} }
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", appservice.HSToken))
req = req.WithContext(ctx) req = req.WithContext(ctx)
resp, err := appservice.HTTPClient.Do(req) resp, err := appservice.HTTPClient.Do(req)
@ -123,12 +130,21 @@ func (a *AppServiceQueryAPI) UserIDExists(
for _, appservice := range a.Cfg.Derived.ApplicationServices { for _, appservice := range a.Cfg.Derived.ApplicationServices {
if appservice.URL != "" && appservice.IsInterestedInUserID(request.UserID) { if appservice.URL != "" && appservice.IsInterestedInUserID(request.UserID) {
// The full path to the rooms API, includes hs token // The full path to the rooms API, includes hs token
URL, err := url.Parse(appservice.RequestUrl() + userIDExistsPath) path := api.ASUserExistsPath
if a.Cfg.LegacyPaths {
path = api.ASUserExistsLegacyPath
}
URL, err := url.Parse(appservice.RequestUrl() + path)
if err != nil { if err != nil {
return err return err
} }
URL.Path += request.UserID URL.Path += request.UserID
apiURL := URL.String() + "?access_token=" + appservice.HSToken if a.Cfg.LegacyAuth {
q := URL.Query()
q.Set("access_token", appservice.HSToken)
URL.RawQuery = q.Encode()
}
apiURL := URL.String()
// Send a request to each application service. If one responds that it has // Send a request to each application service. If one responds that it has
// created the user, immediately return. // created the user, immediately return.
@ -136,6 +152,7 @@ func (a *AppServiceQueryAPI) UserIDExists(
if err != nil { if err != nil {
return err return err
} }
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", appservice.HSToken))
resp, err := appservice.HTTPClient.Do(req.WithContext(ctx)) resp, err := appservice.HTTPClient.Do(req.WithContext(ctx))
if resp != nil { if resp != nil {
defer func() { defer func() {
@ -176,25 +193,22 @@ type thirdpartyResponses interface {
api.ASProtocolResponse | []api.ASUserResponse | []api.ASLocationResponse api.ASProtocolResponse | []api.ASUserResponse | []api.ASLocationResponse
} }
func requestDo[T thirdpartyResponses](client *http.Client, url string, response *T) (err error) { func requestDo[T thirdpartyResponses](as *config.ApplicationService, url string, response *T) error {
origURL := url req, err := http.NewRequest(http.MethodGet, url, nil)
// try v1 and unstable appservice endpoints if err != nil {
for _, version := range []string{"v1", "unstable"} { return err
var resp *http.Response
var body []byte
asURL := strings.Replace(origURL, "unstable", version, 1)
resp, err = client.Get(asURL)
if err != nil {
continue
}
defer resp.Body.Close() // nolint: errcheck
body, err = io.ReadAll(resp.Body)
if err != nil {
continue
}
return json.Unmarshal(body, &response)
} }
return err req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", as.HSToken))
resp, err := as.HTTPClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close() // nolint: errcheck
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
return json.Unmarshal(body, &response)
} }
func (a *AppServiceQueryAPI) Locations( func (a *AppServiceQueryAPI) Locations(
@ -207,16 +221,22 @@ func (a *AppServiceQueryAPI) Locations(
return err return err
} }
path := api.ASLocationPath
if a.Cfg.LegacyPaths {
path = api.ASLocationLegacyPath
}
for _, as := range a.Cfg.Derived.ApplicationServices { for _, as := range a.Cfg.Derived.ApplicationServices {
var asLocations []api.ASLocationResponse var asLocations []api.ASLocationResponse
params.Set("access_token", as.HSToken) if a.Cfg.LegacyAuth {
params.Set("access_token", as.HSToken)
}
url := as.RequestUrl() + api.ASLocationPath url := as.RequestUrl() + path
if req.Protocol != "" { if req.Protocol != "" {
url += "/" + req.Protocol url += "/" + req.Protocol
} }
if err := requestDo[[]api.ASLocationResponse](as.HTTPClient, url+"?"+params.Encode(), &asLocations); err != nil { if err := requestDo[[]api.ASLocationResponse](&as, url+"?"+params.Encode(), &asLocations); err != nil {
log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'locations' from application service") log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'locations' from application service")
continue continue
} }
@ -242,16 +262,22 @@ func (a *AppServiceQueryAPI) User(
return err return err
} }
path := api.ASUserPath
if a.Cfg.LegacyPaths {
path = api.ASUserLegacyPath
}
for _, as := range a.Cfg.Derived.ApplicationServices { for _, as := range a.Cfg.Derived.ApplicationServices {
var asUsers []api.ASUserResponse var asUsers []api.ASUserResponse
params.Set("access_token", as.HSToken) if a.Cfg.LegacyAuth {
params.Set("access_token", as.HSToken)
}
url := as.RequestUrl() + api.ASUserPath url := as.RequestUrl() + path
if req.Protocol != "" { if req.Protocol != "" {
url += "/" + req.Protocol url += "/" + req.Protocol
} }
if err := requestDo[[]api.ASUserResponse](as.HTTPClient, url+"?"+params.Encode(), &asUsers); err != nil { if err := requestDo[[]api.ASUserResponse](&as, url+"?"+params.Encode(), &asUsers); err != nil {
log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'user' from application service") log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'user' from application service")
continue continue
} }
@ -272,6 +298,10 @@ func (a *AppServiceQueryAPI) Protocols(
req *api.ProtocolRequest, req *api.ProtocolRequest,
resp *api.ProtocolResponse, resp *api.ProtocolResponse,
) error { ) error {
protocolPath := api.ASProtocolPath
if a.Cfg.LegacyPaths {
protocolPath = api.ASProtocolLegacyPath
}
// get a single protocol response // get a single protocol response
if req.Protocol != "" { if req.Protocol != "" {
@ -289,7 +319,7 @@ func (a *AppServiceQueryAPI) Protocols(
response := api.ASProtocolResponse{} response := api.ASProtocolResponse{}
for _, as := range a.Cfg.Derived.ApplicationServices { for _, as := range a.Cfg.Derived.ApplicationServices {
var proto api.ASProtocolResponse var proto api.ASProtocolResponse
if err := requestDo[api.ASProtocolResponse](as.HTTPClient, as.RequestUrl()+api.ASProtocolPath+req.Protocol, &proto); err != nil { if err := requestDo[api.ASProtocolResponse](&as, as.RequestUrl()+protocolPath+req.Protocol, &proto); err != nil {
log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'protocol' from application service") log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'protocol' from application service")
continue continue
} }
@ -319,7 +349,7 @@ func (a *AppServiceQueryAPI) Protocols(
for _, as := range a.Cfg.Derived.ApplicationServices { for _, as := range a.Cfg.Derived.ApplicationServices {
for _, p := range as.Protocols { for _, p := range as.Protocols {
var proto api.ASProtocolResponse var proto api.ASProtocolResponse
if err := requestDo[api.ASProtocolResponse](as.HTTPClient, as.RequestUrl()+api.ASProtocolPath+p, &proto); err != nil { if err := requestDo[api.ASProtocolResponse](&as, as.RequestUrl()+protocolPath+p, &proto); err != nil {
log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'protocol' from application service") log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'protocol' from application service")
continue continue
} }

View file

@ -958,7 +958,8 @@ func TestCapabilities(t *testing.T) {
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions) cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
// Needed to create accounts // Needed to create accounts
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, nil, caching.DisableMetrics) caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
rsAPI.SetFederationAPI(nil, nil) rsAPI.SetFederationAPI(nil, nil)
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff) userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
// We mostly need the rsAPI/userAPI for this test, so nil for other APIs etc. // We mostly need the rsAPI/userAPI for this test, so nil for other APIs etc.
@ -1005,7 +1006,8 @@ func TestTurnserver(t *testing.T) {
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions) cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
// Needed to create accounts // Needed to create accounts
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, nil, caching.DisableMetrics) caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
rsAPI.SetFederationAPI(nil, nil) rsAPI.SetFederationAPI(nil, nil)
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff) userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
//rsAPI.SetUserAPI(userAPI) //rsAPI.SetUserAPI(userAPI)
@ -1103,7 +1105,8 @@ func Test3PID(t *testing.T) {
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions) cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
// Needed to create accounts // Needed to create accounts
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, nil, caching.DisableMetrics) caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
rsAPI.SetFederationAPI(nil, nil) rsAPI.SetFederationAPI(nil, nil)
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff) userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
// We mostly need the rsAPI/userAPI for this test, so nil for other APIs etc. // We mostly need the rsAPI/userAPI for this test, so nil for other APIs etc.
@ -2151,3 +2154,195 @@ func TestKeyBackup(t *testing.T) {
} }
}) })
} }
func TestGetMembership(t *testing.T) {
alice := test.NewUser(t)
bob := test.NewUser(t)
testCases := []struct {
name string
roomID string
user *test.User
additionalEvents func(t *testing.T, room *test.Room)
request func(t *testing.T, room *test.Room, accessToken string) *http.Request
wantOK bool
wantMemberCount int
}{
{
name: "/joined_members - Bob never joined",
user: bob,
request: func(t *testing.T, room *test.Room, accessToken string) *http.Request {
return test.NewRequest(t, "GET", fmt.Sprintf("/_matrix/client/v3/rooms/%s/joined_members", room.ID), test.WithQueryParams(map[string]string{
"access_token": accessToken,
}))
},
wantOK: false,
},
{
name: "/joined_members - Alice joined",
user: alice,
request: func(t *testing.T, room *test.Room, accessToken string) *http.Request {
return test.NewRequest(t, "GET", fmt.Sprintf("/_matrix/client/v3/rooms/%s/joined_members", room.ID), test.WithQueryParams(map[string]string{
"access_token": accessToken,
}))
},
wantOK: true,
wantMemberCount: 1,
},
{
name: "/joined_members - Alice leaves, shouldn't be able to see members ",
user: alice,
request: func(t *testing.T, room *test.Room, accessToken string) *http.Request {
return test.NewRequest(t, "GET", fmt.Sprintf("/_matrix/client/v3/rooms/%s/joined_members", room.ID), test.WithQueryParams(map[string]string{
"access_token": accessToken,
}))
},
additionalEvents: func(t *testing.T, room *test.Room) {
room.CreateAndInsert(t, alice, spec.MRoomMember, map[string]interface{}{
"membership": "leave",
}, test.WithStateKey(alice.ID))
},
wantOK: false,
},
{
name: "/joined_members - Bob joins, Alice sees two members",
user: alice,
request: func(t *testing.T, room *test.Room, accessToken string) *http.Request {
return test.NewRequest(t, "GET", fmt.Sprintf("/_matrix/client/v3/rooms/%s/joined_members", room.ID), test.WithQueryParams(map[string]string{
"access_token": accessToken,
}))
},
additionalEvents: func(t *testing.T, room *test.Room) {
room.CreateAndInsert(t, bob, spec.MRoomMember, map[string]interface{}{
"membership": "join",
}, test.WithStateKey(bob.ID))
},
wantOK: true,
wantMemberCount: 2,
},
}
test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) {
cfg, processCtx, close := testrig.CreateConfig(t, dbType)
routers := httputil.NewRouters()
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
defer close()
natsInstance := jetstream.NATSInstance{}
jsctx, _ := natsInstance.Prepare(processCtx, &cfg.Global.JetStream)
defer jetstream.DeleteAllStreams(jsctx, &cfg.Global.JetStream)
// Use an actual roomserver for this
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
rsAPI.SetFederationAPI(nil, nil)
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
// We mostly need the rsAPI for this test, so nil for other APIs/caches etc.
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
accessTokens := map[*test.User]userDevice{
alice: {},
bob: {},
}
createAccessTokens(t, accessTokens, userAPI, processCtx.Context(), routers)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
room := test.NewRoom(t, alice)
t.Cleanup(func() {
t.Logf("running cleanup for %s", tc.name)
})
// inject additional events
if tc.additionalEvents != nil {
tc.additionalEvents(t, room)
}
if err := api.SendEvents(context.Background(), rsAPI, api.KindNew, room.Events(), "test", "test", "test", nil, false); err != nil {
t.Fatalf("failed to send events: %v", err)
}
w := httptest.NewRecorder()
routers.Client.ServeHTTP(w, tc.request(t, room, accessTokens[tc.user].accessToken))
if w.Code != 200 && tc.wantOK {
t.Logf("%s", w.Body.String())
t.Fatalf("got HTTP %d want %d", w.Code, 200)
}
t.Logf("[%s] Resp: %s", tc.name, w.Body.String())
// check we got the expected events
if tc.wantOK {
memberCount := len(gjson.GetBytes(w.Body.Bytes(), "joined").Map())
if memberCount != tc.wantMemberCount {
t.Fatalf("expected %d members, got %d", tc.wantMemberCount, memberCount)
}
}
})
}
})
}
func TestCreateRoomInvite(t *testing.T) {
alice := test.NewUser(t)
bob := test.NewUser(t)
test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) {
cfg, processCtx, close := testrig.CreateConfig(t, dbType)
routers := httputil.NewRouters()
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
defer close()
natsInstance := jetstream.NATSInstance{}
jsctx, _ := natsInstance.Prepare(processCtx, &cfg.Global.JetStream)
defer jetstream.DeleteAllStreams(jsctx, &cfg.Global.JetStream)
// Use an actual roomserver for this
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
rsAPI.SetFederationAPI(nil, nil)
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
// We mostly need the rsAPI for this test, so nil for other APIs/caches etc.
AddPublicRoutes(processCtx, routers, cfg, &natsInstance, nil, rsAPI, nil, nil, nil, userAPI, nil, nil, caching.DisableMetrics)
accessTokens := map[*test.User]userDevice{
alice: {},
}
createAccessTokens(t, accessTokens, userAPI, processCtx.Context(), routers)
reqBody := map[string]any{
"invite": []string{bob.ID},
}
body, err := json.Marshal(reqBody)
if err != nil {
t.Fatal(err)
}
w := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodPost, "/_matrix/client/v3/createRoom", strings.NewReader(string(body)))
req.Header.Set("Authorization", "Bearer "+accessTokens[alice].accessToken)
routers.Client.ServeHTTP(w, req)
if w.Code != http.StatusOK {
t.Fatalf("expected room creation to be successful, got HTTP %d instead: %s", w.Code, w.Body.String())
}
roomID := gjson.GetBytes(w.Body.Bytes(), "room_id").Str
validRoomID, _ := spec.NewRoomID(roomID)
// Now ask the roomserver about the membership event of Bob
ev, err := rsAPI.CurrentStateEvent(context.Background(), *validRoomID, spec.MRoomMember, bob.ID)
if err != nil {
t.Fatal(err)
}
if ev == nil {
t.Fatal("Membership event for Bob does not exist")
}
// Validate that there is NO displayname in content
if gjson.GetBytes(ev.Content(), "displayname").Exists() {
t.Fatal("Found displayname in invite")
}
})
}

View file

@ -55,7 +55,7 @@ func DirectoryRoom(
if err != nil { if err != nil {
return util.JSONResponse{ return util.JSONResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
JSON: spec.BadJSON("Room alias must be in the form '#localpart:domain'"), JSON: spec.InvalidParam("Room alias must be in the form '#localpart:domain'"),
} }
} }
@ -134,7 +134,7 @@ func SetLocalAlias(
if err != nil { if err != nil {
return util.JSONResponse{ return util.JSONResponse{
Code: http.StatusBadRequest, Code: http.StatusBadRequest,
JSON: spec.BadJSON("Room alias must be in the form '#localpart:domain'"), JSON: spec.InvalidParam("Room alias must be in the form '#localpart:domain'"),
} }
} }

View file

@ -93,7 +93,6 @@ func UploadKeys(req *http.Request, keyAPI api.ClientKeyAPI, device *api.Device)
type queryKeysRequest struct { type queryKeysRequest struct {
Timeout int `json:"timeout"` Timeout int `json:"timeout"`
Token string `json:"token"`
DeviceKeys map[string][]string `json:"device_keys"` DeviceKeys map[string][]string `json:"device_keys"`
} }
@ -119,7 +118,6 @@ func QueryKeys(req *http.Request, keyAPI api.ClientKeyAPI, device *api.Device) u
UserID: device.UserID, UserID: device.UserID,
UserToDevices: r.DeviceKeys, UserToDevices: r.DeviceKeys,
Timeout: r.GetTimeout(), Timeout: r.GetTimeout(),
// TODO: Token?
}, &queryRes) }, &queryRes)
return util.JSONResponse{ return util.JSONResponse{
Code: 200, Code: 200,

View file

@ -324,19 +324,18 @@ func SendInvite(
} }
// We already received the return value, so no need to check for an error here. // We already received the return value, so no need to check for an error here.
response, _ := sendInvite(req.Context(), profileAPI, device, roomID, body.UserID, body.Reason, cfg, rsAPI, asAPI, evTime) response, _ := sendInvite(req.Context(), device, roomID, body.UserID, body.Reason, cfg, rsAPI, evTime)
return response return response
} }
// sendInvite sends an invitation to a user. Returns a JSONResponse and an error // sendInvite sends an invitation to a user. Returns a JSONResponse and an error
func sendInvite( func sendInvite(
ctx context.Context, ctx context.Context,
profileAPI userapi.ClientUserAPI,
device *userapi.Device, device *userapi.Device,
roomID, userID, reason string, roomID, userID, reason string,
cfg *config.ClientAPI, cfg *config.ClientAPI,
rsAPI roomserverAPI.ClientRoomserverAPI, rsAPI roomserverAPI.ClientRoomserverAPI,
asAPI appserviceAPI.AppServiceInternalAPI, evTime time.Time, evTime time.Time,
) (util.JSONResponse, error) { ) (util.JSONResponse, error) {
validRoomID, err := spec.NewRoomID(roomID) validRoomID, err := spec.NewRoomID(roomID)
if err != nil { if err != nil {
@ -359,13 +358,7 @@ func sendInvite(
JSON: spec.InvalidParam("UserID is invalid"), JSON: spec.InvalidParam("UserID is invalid"),
}, err }, err
} }
profile, err := loadProfile(ctx, userID, cfg, profileAPI, asAPI)
if err != nil {
return util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
}, err
}
identity, err := cfg.Matrix.SigningIdentityFor(device.UserDomain()) identity, err := cfg.Matrix.SigningIdentityFor(device.UserDomain())
if err != nil { if err != nil {
return util.JSONResponse{ return util.JSONResponse{
@ -375,16 +368,14 @@ func sendInvite(
} }
err = rsAPI.PerformInvite(ctx, &api.PerformInviteRequest{ err = rsAPI.PerformInvite(ctx, &api.PerformInviteRequest{
InviteInput: roomserverAPI.InviteInput{ InviteInput: roomserverAPI.InviteInput{
RoomID: *validRoomID, RoomID: *validRoomID,
Inviter: *inviter, Inviter: *inviter,
Invitee: *invitee, Invitee: *invitee,
DisplayName: profile.DisplayName, Reason: reason,
AvatarURL: profile.AvatarURL, IsDirect: false,
Reason: reason, KeyID: identity.KeyID,
IsDirect: false, PrivateKey: identity.PrivateKey,
KeyID: identity.KeyID, EventTime: evTime,
PrivateKey: identity.PrivateKey,
EventTime: evTime,
}, },
InviteRoomState: nil, // ask the roomserver to draw up invite room state for us InviteRoomState: nil, // ask the roomserver to draw up invite room state for us
SendAsServer: string(device.UserDomain()), SendAsServer: string(device.UserDomain()),

View file

@ -0,0 +1,139 @@
// Copyright 2024 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package routing
import (
"encoding/json"
"net/http"
"github.com/matrix-org/dendrite/roomserver/api"
userapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/util"
)
// https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-rooms-roomid-joined-members
type getJoinedMembersResponse struct {
Joined map[string]joinedMember `json:"joined"`
}
type joinedMember struct {
DisplayName string `json:"display_name"`
AvatarURL string `json:"avatar_url"`
}
// The database stores 'displayname' without an underscore.
// Deserialize into this and then change to the actual API response
type databaseJoinedMember struct {
DisplayName string `json:"displayname"`
AvatarURL string `json:"avatar_url"`
}
// GetJoinedMembers implements
//
// GET /rooms/{roomId}/joined_members
func GetJoinedMembers(
req *http.Request, device *userapi.Device, roomID string,
rsAPI api.ClientRoomserverAPI,
) util.JSONResponse {
// Validate the userID
userID, err := spec.NewUserID(device.UserID, true)
if err != nil {
return util.JSONResponse{
Code: http.StatusBadRequest,
JSON: spec.InvalidParam("Device UserID is invalid"),
}
}
// Validate the roomID
validRoomID, err := spec.NewRoomID(roomID)
if err != nil {
return util.JSONResponse{
Code: http.StatusBadRequest,
JSON: spec.InvalidParam("RoomID is invalid"),
}
}
// Get the current memberships for the requesting user to determine
// if they are allowed to query this endpoint.
queryReq := api.QueryMembershipForUserRequest{
RoomID: validRoomID.String(),
UserID: *userID,
}
var queryRes api.QueryMembershipForUserResponse
if queryErr := rsAPI.QueryMembershipForUser(req.Context(), &queryReq, &queryRes); queryErr != nil {
util.GetLogger(req.Context()).WithError(queryErr).Error("rsAPI.QueryMembershipsForRoom failed")
return util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
}
}
if !queryRes.HasBeenInRoom {
return util.JSONResponse{
Code: http.StatusForbidden,
JSON: spec.Forbidden("You aren't a member of the room and weren't previously a member of the room."),
}
}
if !queryRes.IsInRoom {
return util.JSONResponse{
Code: http.StatusForbidden,
JSON: spec.Forbidden("You aren't a member of the room and weren't previously a member of the room."),
}
}
// Get the current membership events
var membershipsForRoomResp api.QueryMembershipsForRoomResponse
if err = rsAPI.QueryMembershipsForRoom(req.Context(), &api.QueryMembershipsForRoomRequest{
JoinedOnly: true,
RoomID: validRoomID.String(),
}, &membershipsForRoomResp); err != nil {
util.GetLogger(req.Context()).WithError(err).Error("rsAPI.QueryEventsByID failed")
return util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
}
}
var res getJoinedMembersResponse
res.Joined = make(map[string]joinedMember)
for _, ev := range membershipsForRoomResp.JoinEvents {
var content databaseJoinedMember
if err := json.Unmarshal(ev.Content, &content); err != nil {
util.GetLogger(req.Context()).WithError(err).Error("failed to unmarshal event content")
return util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
}
}
userID, err := rsAPI.QueryUserIDForSender(req.Context(), *validRoomID, spec.SenderID(ev.Sender))
if err != nil || userID == nil {
util.GetLogger(req.Context()).WithError(err).Error("rsAPI.QueryUserIDForSender failed")
return util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
}
}
res.Joined[userID.String()] = joinedMember(content)
}
return util.JSONResponse{
Code: http.StatusOK,
JSON: res,
}
}

View file

@ -630,6 +630,7 @@ func handleGuestRegistration(
AccessToken: token, AccessToken: token,
IPAddr: req.RemoteAddr, IPAddr: req.RemoteAddr,
UserAgent: req.UserAgent(), UserAgent: req.UserAgent(),
FromRegistration: true,
}, &devRes) }, &devRes)
if err != nil { if err != nil {
return util.JSONResponse{ return util.JSONResponse{
@ -919,6 +920,7 @@ func completeRegistration(
DeviceID: deviceID, DeviceID: deviceID,
IPAddr: ipAddr, IPAddr: ipAddr,
UserAgent: userAgent, UserAgent: userAgent,
FromRegistration: true,
}, &devRes) }, &devRes)
if err != nil { if err != nil {
return util.JSONResponse{ return util.JSONResponse{

View file

@ -1513,4 +1513,14 @@ func Setup(
return GetPresence(req, device, natsClient, cfg.Matrix.JetStream.Prefixed(jetstream.RequestPresence), vars["userId"]) return GetPresence(req, device, natsClient, cfg.Matrix.JetStream.Prefixed(jetstream.RequestPresence), vars["userId"])
}), }),
).Methods(http.MethodGet, http.MethodOptions) ).Methods(http.MethodGet, http.MethodOptions)
v3mux.Handle("/rooms/{roomID}/joined_members",
httputil.MakeAuthAPI("rooms_members", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
if err != nil {
return util.ErrorResponse(err)
}
return GetJoinedMembers(req, device, vars["roomID"], rsAPI)
}),
).Methods(http.MethodGet, http.MethodOptions)
} }

View file

@ -215,7 +215,7 @@ func SendServerNotice(
} }
if !membershipRes.IsInRoom { if !membershipRes.IsInRoom {
// re-invite the user // re-invite the user
res, err := sendInvite(ctx, userAPI, senderDevice, roomID, r.UserID, "Server notice room", cfgClient, rsAPI, asAPI, time.Now()) res, err := sendInvite(ctx, senderDevice, roomID, r.UserID, "Server notice room", cfgClient, rsAPI, time.Now())
if err != nil { if err != nil {
return res return res
} }

View file

@ -154,6 +154,13 @@ app_service_api:
# to be sent to an insecure endpoint. # to be sent to an insecure endpoint.
disable_tls_validation: false disable_tls_validation: false
# Send the access_token query parameter with appservice requests in addition
# to the Authorization header. This can cause hs_tokens to be saved to logs,
# so it should not be enabled unless absolutely necessary.
legacy_auth: false
# Use the legacy unprefixed paths for appservice requests.
legacy_paths: false
# Appservice configuration files to load into this homeserver. # Appservice configuration files to load into this homeserver.
config_files: config_files:
# - /path/to/appservice_registration.yaml # - /path/to/appservice_registration.yaml

View file

@ -117,6 +117,7 @@ The list of files that need to be stored is:
- matrix-key.pem - matrix-key.pem
- dendrite.yaml - dendrite.yaml
- the postgres or sqlite DB - the postgres or sqlite DB
- the jetstream directory
- the media store - the media store
- the search index (although this can be regenerated) - the search index (although this can be regenerated)

View file

@ -231,9 +231,9 @@ GEM
jekyll-seo-tag (~> 2.1) jekyll-seo-tag (~> 2.1)
minitest (5.17.0) minitest (5.17.0)
multipart-post (2.1.1) multipart-post (2.1.1)
nokogiri (1.14.3-arm64-darwin) nokogiri (1.16.2-arm64-darwin)
racc (~> 1.4) racc (~> 1.4)
nokogiri (1.14.3-x86_64-linux) nokogiri (1.16.2-x86_64-linux)
racc (~> 1.4) racc (~> 1.4)
octokit (4.22.0) octokit (4.22.0)
faraday (>= 0.9) faraday (>= 0.9)
@ -241,7 +241,7 @@ GEM
pathutil (0.16.2) pathutil (0.16.2)
forwardable-extended (~> 2.6) forwardable-extended (~> 2.6)
public_suffix (4.0.7) public_suffix (4.0.7)
racc (1.6.2) racc (1.7.3)
rb-fsevent (0.11.1) rb-fsevent (0.11.1)
rb-inotify (0.10.1) rb-inotify (0.10.1)
ffi (~> 1.0) ffi (~> 1.0)

View file

@ -113,10 +113,7 @@ func NewInternalAPI(
_ = federationDB.RemoveAllServersFromBlacklist() _ = federationDB.RemoveAllServersFromBlacklist()
} }
stats := statistics.NewStatistics( stats := statistics.NewStatistics(federationDB, cfg.FederationMaxRetries+1, cfg.P2PFederationRetriesUntilAssumedOffline+1, cfg.EnableRelays)
federationDB,
cfg.FederationMaxRetries+1,
cfg.P2PFederationRetriesUntilAssumedOffline+1)
js, nats := natsInstance.Prepare(processContext, &cfg.Matrix.JetStream) js, nats := natsInstance.Prepare(processContext, &cfg.Matrix.JetStream)

View file

@ -5,11 +5,14 @@ import (
"crypto/ed25519" "crypto/ed25519"
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http"
"net/http/httptest"
"strings" "strings"
"sync" "sync"
"testing" "testing"
"time" "time"
"github.com/matrix-org/dendrite/federationapi/routing"
"github.com/matrix-org/dendrite/internal/caching" "github.com/matrix-org/dendrite/internal/caching"
"github.com/matrix-org/dendrite/internal/httputil" "github.com/matrix-org/dendrite/internal/httputil"
"github.com/matrix-org/dendrite/internal/sqlutil" "github.com/matrix-org/dendrite/internal/sqlutil"
@ -17,7 +20,10 @@ import (
"github.com/matrix-org/gomatrixserverlib" "github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/gomatrixserverlib/fclient" "github.com/matrix-org/gomatrixserverlib/fclient"
"github.com/matrix-org/gomatrixserverlib/spec" "github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/util"
"github.com/nats-io/nats.go" "github.com/nats-io/nats.go"
"github.com/stretchr/testify/assert"
"github.com/tidwall/gjson"
"github.com/matrix-org/dendrite/federationapi" "github.com/matrix-org/dendrite/federationapi"
"github.com/matrix-org/dendrite/federationapi/api" "github.com/matrix-org/dendrite/federationapi/api"
@ -362,3 +368,126 @@ func TestRoomsV3URLEscapeDoNot404(t *testing.T) {
} }
} }
} }
func TestNotaryServer(t *testing.T) {
testCases := []struct {
name string
httpBody string
pubKeyRequest *gomatrixserverlib.PublicKeyNotaryLookupRequest
validateFunc func(t *testing.T, response util.JSONResponse)
}{
{
name: "empty httpBody",
validateFunc: func(t *testing.T, resp util.JSONResponse) {
assert.Equal(t, http.StatusBadRequest, resp.Code)
nk, ok := resp.JSON.(spec.MatrixError)
assert.True(t, ok)
assert.Equal(t, spec.ErrorBadJSON, nk.ErrCode)
},
},
{
name: "valid but empty httpBody",
httpBody: "{}",
validateFunc: func(t *testing.T, resp util.JSONResponse) {
want := util.JSONResponse{
Code: http.StatusOK,
JSON: routing.NotaryKeysResponse{ServerKeys: []json.RawMessage{}},
}
assert.Equal(t, want, resp)
},
},
{
name: "request all keys using an empty criteria",
httpBody: `{"server_keys":{"servera":{}}}`,
validateFunc: func(t *testing.T, resp util.JSONResponse) {
assert.Equal(t, http.StatusOK, resp.Code)
nk, ok := resp.JSON.(routing.NotaryKeysResponse)
assert.True(t, ok)
assert.Equal(t, "servera", gjson.GetBytes(nk.ServerKeys[0], "server_name").Str)
assert.True(t, gjson.GetBytes(nk.ServerKeys[0], "verify_keys.ed25519:someID").Exists())
},
},
{
name: "request all keys using null as the criteria",
httpBody: `{"server_keys":{"servera":null}}`,
validateFunc: func(t *testing.T, resp util.JSONResponse) {
assert.Equal(t, http.StatusOK, resp.Code)
nk, ok := resp.JSON.(routing.NotaryKeysResponse)
assert.True(t, ok)
assert.Equal(t, "servera", gjson.GetBytes(nk.ServerKeys[0], "server_name").Str)
assert.True(t, gjson.GetBytes(nk.ServerKeys[0], "verify_keys.ed25519:someID").Exists())
},
},
{
name: "request specific key",
httpBody: `{"server_keys":{"servera":{"ed25519:someID":{}}}}`,
validateFunc: func(t *testing.T, resp util.JSONResponse) {
assert.Equal(t, http.StatusOK, resp.Code)
nk, ok := resp.JSON.(routing.NotaryKeysResponse)
assert.True(t, ok)
assert.Equal(t, "servera", gjson.GetBytes(nk.ServerKeys[0], "server_name").Str)
assert.True(t, gjson.GetBytes(nk.ServerKeys[0], "verify_keys.ed25519:someID").Exists())
},
},
{
name: "request multiple servers",
httpBody: `{"server_keys":{"servera":{"ed25519:someID":{}},"serverb":{"ed25519:someID":{}}}}`,
validateFunc: func(t *testing.T, resp util.JSONResponse) {
assert.Equal(t, http.StatusOK, resp.Code)
nk, ok := resp.JSON.(routing.NotaryKeysResponse)
assert.True(t, ok)
wantServers := map[string]struct{}{
"servera": {},
"serverb": {},
}
for _, js := range nk.ServerKeys {
serverName := gjson.GetBytes(js, "server_name").Str
_, ok = wantServers[serverName]
assert.True(t, ok, "unexpected servername: %s", serverName)
delete(wantServers, serverName)
assert.True(t, gjson.GetBytes(js, "verify_keys.ed25519:someID").Exists())
}
if len(wantServers) > 0 {
t.Fatalf("expected response to also contain: %#v", wantServers)
}
},
},
}
test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) {
cfg, processCtx, close := testrig.CreateConfig(t, dbType)
defer close()
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
natsInstance := jetstream.NATSInstance{}
fc := &fedClient{
keys: map[spec.ServerName]struct {
key ed25519.PrivateKey
keyID gomatrixserverlib.KeyID
}{
"servera": {
key: test.PrivateKeyA,
keyID: "ed25519:someID",
},
"serverb": {
key: test.PrivateKeyB,
keyID: "ed25519:someID",
},
},
}
fedAPI := federationapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, fc, nil, caches, nil, true)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
req := httptest.NewRequest(http.MethodPost, "/", strings.NewReader(tc.httpBody))
req.Host = string(cfg.Global.ServerName)
resp := routing.NotaryKeys(req, &cfg.FederationAPI, fedAPI, tc.pubKeyRequest)
// assert that we received the expected response
tc.validateFunc(t, resp)
})
}
})
}

View file

@ -61,7 +61,7 @@ func TestFederationClientQueryKeys(t *testing.T) {
}, },
} }
fedClient := &testFedClient{} fedClient := &testFedClient{}
stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false)
queues := queue.NewOutgoingQueues( queues := queue.NewOutgoingQueues(
testDB, process.NewProcessContext(), testDB, process.NewProcessContext(),
false, false,
@ -92,7 +92,7 @@ func TestFederationClientQueryKeysBlacklisted(t *testing.T) {
}, },
} }
fedClient := &testFedClient{} fedClient := &testFedClient{}
stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false)
queues := queue.NewOutgoingQueues( queues := queue.NewOutgoingQueues(
testDB, process.NewProcessContext(), testDB, process.NewProcessContext(),
false, false,
@ -122,7 +122,7 @@ func TestFederationClientQueryKeysFailure(t *testing.T) {
}, },
} }
fedClient := &testFedClient{shouldFail: true} fedClient := &testFedClient{shouldFail: true}
stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false)
queues := queue.NewOutgoingQueues( queues := queue.NewOutgoingQueues(
testDB, process.NewProcessContext(), testDB, process.NewProcessContext(),
false, false,
@ -152,7 +152,7 @@ func TestFederationClientClaimKeys(t *testing.T) {
}, },
} }
fedClient := &testFedClient{} fedClient := &testFedClient{}
stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false)
queues := queue.NewOutgoingQueues( queues := queue.NewOutgoingQueues(
testDB, process.NewProcessContext(), testDB, process.NewProcessContext(),
false, false,
@ -183,7 +183,7 @@ func TestFederationClientClaimKeysBlacklisted(t *testing.T) {
}, },
} }
fedClient := &testFedClient{} fedClient := &testFedClient{}
stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false)
queues := queue.NewOutgoingQueues( queues := queue.NewOutgoingQueues(
testDB, process.NewProcessContext(), testDB, process.NewProcessContext(),
false, false,

View file

@ -66,7 +66,7 @@ func TestPerformWakeupServers(t *testing.T) {
}, },
} }
fedClient := &testFedClient{} fedClient := &testFedClient{}
stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, true)
queues := queue.NewOutgoingQueues( queues := queue.NewOutgoingQueues(
testDB, process.NewProcessContext(), testDB, process.NewProcessContext(),
false, false,
@ -112,7 +112,7 @@ func TestQueryRelayServers(t *testing.T) {
}, },
} }
fedClient := &testFedClient{} fedClient := &testFedClient{}
stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false)
queues := queue.NewOutgoingQueues( queues := queue.NewOutgoingQueues(
testDB, process.NewProcessContext(), testDB, process.NewProcessContext(),
false, false,
@ -153,7 +153,7 @@ func TestRemoveRelayServers(t *testing.T) {
}, },
} }
fedClient := &testFedClient{} fedClient := &testFedClient{}
stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false)
queues := queue.NewOutgoingQueues( queues := queue.NewOutgoingQueues(
testDB, process.NewProcessContext(), testDB, process.NewProcessContext(),
false, false,
@ -193,7 +193,7 @@ func TestPerformDirectoryLookup(t *testing.T) {
}, },
} }
fedClient := &testFedClient{} fedClient := &testFedClient{}
stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false)
queues := queue.NewOutgoingQueues( queues := queue.NewOutgoingQueues(
testDB, process.NewProcessContext(), testDB, process.NewProcessContext(),
false, false,
@ -232,7 +232,7 @@ func TestPerformDirectoryLookupRelaying(t *testing.T) {
}, },
} }
fedClient := &testFedClient{} fedClient := &testFedClient{}
stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline) stats := statistics.NewStatistics(testDB, FailuresUntilBlacklist, FailuresUntilAssumedOffline, true)
queues := queue.NewOutgoingQueues( queues := queue.NewOutgoingQueues(
testDB, process.NewProcessContext(), testDB, process.NewProcessContext(),
false, false,

View file

@ -43,6 +43,15 @@ func (a *FederationInternalAPI) fetchServerKeysFromCache(
ctx context.Context, req *api.QueryServerKeysRequest, ctx context.Context, req *api.QueryServerKeysRequest,
) ([]gomatrixserverlib.ServerKeys, error) { ) ([]gomatrixserverlib.ServerKeys, error) {
var results []gomatrixserverlib.ServerKeys var results []gomatrixserverlib.ServerKeys
// We got a request for _all_ server keys, return them.
if len(req.KeyIDToCriteria) == 0 {
serverKeysResponses, _ := a.db.GetNotaryKeys(ctx, req.ServerName, []gomatrixserverlib.KeyID{})
if len(serverKeysResponses) == 0 {
return nil, fmt.Errorf("failed to find server key response for server %s", req.ServerName)
}
return serverKeysResponses, nil
}
for keyID, criteria := range req.KeyIDToCriteria { for keyID, criteria := range req.KeyIDToCriteria {
serverKeysResponses, _ := a.db.GetNotaryKeys(ctx, req.ServerName, []gomatrixserverlib.KeyID{keyID}) serverKeysResponses, _ := a.db.GetNotaryKeys(ctx, req.ServerName, []gomatrixserverlib.KeyID{keyID})
if len(serverKeysResponses) == 0 { if len(serverKeysResponses) == 0 {

View file

@ -117,7 +117,7 @@ func testSetup(failuresUntilBlacklist uint32, failuresUntilAssumedOffline uint32
txRelayCount: *atomic.NewUint32(0), txRelayCount: *atomic.NewUint32(0),
} }
stats := statistics.NewStatistics(db, failuresUntilBlacklist, failuresUntilAssumedOffline) stats := statistics.NewStatistics(db, failuresUntilBlacklist, failuresUntilAssumedOffline, false)
signingInfo := []*fclient.SigningIdentity{ signingInfo := []*fclient.SigningIdentity{
{ {
KeyID: "ed21019:auto", KeyID: "ed21019:auto",

View file

@ -95,6 +95,12 @@ func Backfill(
} }
} }
// Enforce a limit of 100 events, as not to hit the DB to hard.
// Synapse has a hard limit of 100 events as well.
if req.Limit > 100 {
req.Limit = 100
}
// Query the Roomserver. // Query the Roomserver.
if err = rsAPI.PerformBackfill(httpReq.Context(), &req, &res); err != nil { if err = rsAPI.PerformBackfill(httpReq.Context(), &req, &res); err != nil {
util.GetLogger(httpReq.Context()).WithError(err).Error("query.PerformBackfill failed") util.GetLogger(httpReq.Context()).WithError(err).Error("query.PerformBackfill failed")

View file

@ -197,6 +197,10 @@ func localKeys(cfg *config.FederationAPI, serverName spec.ServerName) (*gomatrix
return &keys, err return &keys, err
} }
type NotaryKeysResponse struct {
ServerKeys []json.RawMessage `json:"server_keys"`
}
func NotaryKeys( func NotaryKeys(
httpReq *http.Request, cfg *config.FederationAPI, httpReq *http.Request, cfg *config.FederationAPI,
fsAPI federationAPI.FederationInternalAPI, fsAPI federationAPI.FederationInternalAPI,
@ -217,10 +221,9 @@ func NotaryKeys(
} }
} }
var response struct { response := NotaryKeysResponse{
ServerKeys []json.RawMessage `json:"server_keys"` ServerKeys: []json.RawMessage{},
} }
response.ServerKeys = []json.RawMessage{}
for serverName, kidToCriteria := range req.ServerKeys { for serverName, kidToCriteria := range req.ServerKeys {
var keyList []gomatrixserverlib.ServerKeys var keyList []gomatrixserverlib.ServerKeys

View file

@ -647,6 +647,8 @@ func MakeFedAPI(
// add the user to Sentry, if enabled // add the user to Sentry, if enabled
hub := sentry.GetHubFromContext(req.Context()) hub := sentry.GetHubFromContext(req.Context())
if hub != nil { if hub != nil {
// clone the hub, so we don't send garbage events with e.g. mismatching rooms/event_ids
hub = hub.Clone()
hub.Scope().SetTag("origin", string(fedReq.Origin())) hub.Scope().SetTag("origin", string(fedReq.Origin()))
hub.Scope().SetTag("uri", fedReq.RequestURI()) hub.Scope().SetTag("uri", fedReq.RequestURI())
} }

View file

@ -34,12 +34,15 @@ type Statistics struct {
// mark the destination as offline. At this point we should attempt // mark the destination as offline. At this point we should attempt
// to send messages to the user's async relay servers if we know them. // to send messages to the user's async relay servers if we know them.
FailuresUntilAssumedOffline uint32 FailuresUntilAssumedOffline uint32
enableRelays bool
} }
func NewStatistics( func NewStatistics(
db storage.Database, db storage.Database,
failuresUntilBlacklist uint32, failuresUntilBlacklist uint32,
failuresUntilAssumedOffline uint32, failuresUntilAssumedOffline uint32,
enableRelays bool,
) Statistics { ) Statistics {
return Statistics{ return Statistics{
DB: db, DB: db,
@ -47,6 +50,7 @@ func NewStatistics(
FailuresUntilAssumedOffline: failuresUntilAssumedOffline, FailuresUntilAssumedOffline: failuresUntilAssumedOffline,
backoffTimers: make(map[spec.ServerName]*time.Timer), backoffTimers: make(map[spec.ServerName]*time.Timer),
servers: make(map[spec.ServerName]*ServerStatistics), servers: make(map[spec.ServerName]*ServerStatistics),
enableRelays: enableRelays,
} }
} }
@ -73,6 +77,13 @@ func (s *Statistics) ForServer(serverName spec.ServerName) *ServerStatistics {
} else { } else {
server.blacklisted.Store(blacklisted) server.blacklisted.Store(blacklisted)
} }
// Don't bother hitting the database 2 additional times
// if we don't want to use relays.
if !s.enableRelays {
return server
}
assumedOffline, err := s.DB.IsServerAssumedOffline(context.Background(), serverName) assumedOffline, err := s.DB.IsServerAssumedOffline(context.Background(), serverName)
if err != nil { if err != nil {
logrus.WithError(err).Errorf("Failed to get assumed offline entry %q", serverName) logrus.WithError(err).Errorf("Failed to get assumed offline entry %q", serverName)

View file

@ -16,7 +16,7 @@ const (
) )
func TestBackoff(t *testing.T) { func TestBackoff(t *testing.T) {
stats := NewStatistics(nil, FailuresUntilBlacklist, FailuresUntilAssumedOffline) stats := NewStatistics(nil, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false)
server := ServerStatistics{ server := ServerStatistics{
statistics: &stats, statistics: &stats,
serverName: "test.com", serverName: "test.com",
@ -106,7 +106,7 @@ func TestBackoff(t *testing.T) {
} }
func TestRelayServersListing(t *testing.T) { func TestRelayServersListing(t *testing.T) {
stats := NewStatistics(test.NewInMemoryFederationDatabase(), FailuresUntilBlacklist, FailuresUntilAssumedOffline) stats := NewStatistics(test.NewInMemoryFederationDatabase(), FailuresUntilBlacklist, FailuresUntilAssumedOffline, false)
server := ServerStatistics{statistics: &stats} server := ServerStatistics{statistics: &stats}
server.AddRelayServers([]spec.ServerName{"relay1", "relay1", "relay2"}) server.AddRelayServers([]spec.ServerName{"relay1", "relay1", "relay2"})
relayServers := server.KnownRelayServers() relayServers := server.KnownRelayServers()

4
go.mod
View file

@ -22,7 +22,7 @@ require (
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91 github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91
github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530 github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530
github.com/matrix-org/gomatrixserverlib v0.0.0-20231212115925-41497b7563eb github.com/matrix-org/gomatrixserverlib v0.0.0-20240109180417-3495e573f2b7
github.com/matrix-org/pinecone v0.11.1-0.20230810010612-ea4c33717fd7 github.com/matrix-org/pinecone v0.11.1-0.20230810010612-ea4c33717fd7
github.com/matrix-org/util v0.0.0-20221111132719-399730281e66 github.com/matrix-org/util v0.0.0-20221111132719-399730281e66
github.com/mattn/go-sqlite3 v1.14.17 github.com/mattn/go-sqlite3 v1.14.17
@ -120,7 +120,7 @@ require (
github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect github.com/prometheus/procfs v0.10.1 // indirect
github.com/quic-go/qtls-go1-20 v0.3.2 // indirect github.com/quic-go/qtls-go1-20 v0.3.2 // indirect
github.com/quic-go/quic-go v0.37.4 // indirect github.com/quic-go/quic-go v0.37.7 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/rs/zerolog v1.29.1 // indirect github.com/rs/zerolog v1.29.1 // indirect

8
go.sum
View file

@ -245,8 +245,8 @@ github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91 h1:s7fexw
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91/go.mod h1:e+cg2q7C7yE5QnAXgzo512tgFh1RbQLC0+jozuegKgo= github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91/go.mod h1:e+cg2q7C7yE5QnAXgzo512tgFh1RbQLC0+jozuegKgo=
github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530 h1:kHKxCOLcHH8r4Fzarl4+Y3K5hjothkVW5z7T1dUM11U= github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530 h1:kHKxCOLcHH8r4Fzarl4+Y3K5hjothkVW5z7T1dUM11U=
github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s= github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
github.com/matrix-org/gomatrixserverlib v0.0.0-20231212115925-41497b7563eb h1:Nn+Fr96oi7bIfdOwX5A2L6A2MZCM+lqwLe4/+3+nYj8= github.com/matrix-org/gomatrixserverlib v0.0.0-20240109180417-3495e573f2b7 h1:EaUvK2ay6cxMxeshC1p6QswS9+rQFbUc2YerkRFyVXQ=
github.com/matrix-org/gomatrixserverlib v0.0.0-20231212115925-41497b7563eb/go.mod h1:M8m7seOroO5ePlgxA7AFZymnG90Cnh94rYQyngSrZkk= github.com/matrix-org/gomatrixserverlib v0.0.0-20240109180417-3495e573f2b7/go.mod h1:HZGsVJ3bUE+DkZtufkH9H0mlsvbhEGK5CpX0Zlavylg=
github.com/matrix-org/pinecone v0.11.1-0.20230810010612-ea4c33717fd7 h1:6t8kJr8i1/1I5nNttw6nn1ryQJgzVlBmSGgPiiaTdw4= github.com/matrix-org/pinecone v0.11.1-0.20230810010612-ea4c33717fd7 h1:6t8kJr8i1/1I5nNttw6nn1ryQJgzVlBmSGgPiiaTdw4=
github.com/matrix-org/pinecone v0.11.1-0.20230810010612-ea4c33717fd7/go.mod h1:ReWMS/LoVnOiRAdq9sNUC2NZnd1mZkMNB52QhpTRWjg= github.com/matrix-org/pinecone v0.11.1-0.20230810010612-ea4c33717fd7/go.mod h1:ReWMS/LoVnOiRAdq9sNUC2NZnd1mZkMNB52QhpTRWjg=
github.com/matrix-org/util v0.0.0-20221111132719-399730281e66 h1:6z4KxomXSIGWqhHcfzExgkH3Z3UkIXry4ibJS4Aqz2Y= github.com/matrix-org/util v0.0.0-20221111132719-399730281e66 h1:6z4KxomXSIGWqhHcfzExgkH3Z3UkIXry4ibJS4Aqz2Y=
@ -324,8 +324,8 @@ github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+Pymzi
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI= github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI=
github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
github.com/quic-go/quic-go v0.37.4 h1:ke8B73yMCWGq9MfrCCAw0Uzdm7GaViC3i39dsIdDlH4= github.com/quic-go/quic-go v0.37.7 h1:AgKsQLZ1+YCwZd2GYhBUsJDYZwEkA5gENtAjb+MxONU=
github.com/quic-go/quic-go v0.37.4/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU= github.com/quic-go/quic-go v0.37.7/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=

View file

@ -1,9 +1,10 @@
apiVersion: v2 apiVersion: v2
name: dendrite name: dendrite
version: "0.13.6" version: "0.14.0"
appVersion: "0.13.5" appVersion: "0.13.6"
description: Dendrite Matrix Homeserver description: Dendrite Matrix Homeserver
type: application type: application
icon: https://avatars.githubusercontent.com/u/8418310?s=48&v=4
keywords: keywords:
- matrix - matrix
- chat - chat
@ -13,7 +14,7 @@ home: https://github.com/matrix-org/dendrite
sources: sources:
- https://github.com/matrix-org/dendrite - https://github.com/matrix-org/dendrite
dependencies: dependencies:
- name: postgresql - name: postgresql
version: 12.1.7 version: 14.2.3
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled condition: postgresql.enabled

View file

@ -1,7 +1,7 @@
# dendrite # dendrite
![Version: 0.13.6](https://img.shields.io/badge/Version-0.13.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.13.5](https://img.shields.io/badge/AppVersion-0.13.5-informational?style=flat-square) ![Version: 0.14.0](https://img.shields.io/badge/Version-0.14.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.13.6](https://img.shields.io/badge/AppVersion-0.13.6-informational?style=flat-square)
Dendrite Matrix Homeserver Dendrite Matrix Homeserver
Status: **NOT PRODUCTION READY** Status: **NOT PRODUCTION READY**
@ -37,7 +37,7 @@ Create a folder `appservices` and place your configurations in there. The confi
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://charts.bitnami.com/bitnami | postgresql | 12.1.7 | | https://charts.bitnami.com/bitnami | postgresql | 14.2.3 |
## Values ## Values
| Key | Type | Default | Description | | Key | Type | Default | Description |
@ -48,16 +48,19 @@ Create a folder `appservices` and place your configurations in there. The confi
| signing_key.create | bool | `true` | Create a new signing key, if not exists | | signing_key.create | bool | `true` | Create a new signing key, if not exists |
| signing_key.existingSecret | string | `""` | Use an existing secret | | signing_key.existingSecret | string | `""` | Use an existing secret |
| resources | object | sets some sane default values | Default resource requests/limits. | | resources | object | sets some sane default values | Default resource requests/limits. |
| persistence.jetstream | object | `{"capacity":"1Gi","existingClaim":""}` | The storage class to use for volume claims. Used unless specified at the specific component. Defaults to the cluster default storage class. # If defined, storageClassName: <storageClass> # If set to "-", storageClassName: "", which disables dynamic provisioning # If undefined (the default) or set to null, no storageClassName spec is # set, choosing the default provisioner. (gp2 on AWS, standard on # GKE, AWS & OpenStack) # storageClass: "" | | persistence.storageClass | string | `nil` | The storage class to use for volume claims. Used unless specified at the specific component. Defaults to the cluster default storage class. If defined, storageClassName: <storageClass> If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. (gp2 on AWS, standard on GKE, AWS & OpenStack) |
| persistence.jetstream.existingClaim | string | `""` | Use an existing volume claim for jetstream | | persistence.jetstream.existingClaim | string | `""` | Use an existing volume claim for jetstream |
| persistence.jetstream.capacity | string | `"1Gi"` | PVC Storage Request for the jetstream volume | | persistence.jetstream.capacity | string | `"1Gi"` | PVC Storage Request for the jetstream volume |
| persistence.jetstream.storageClass | string | `nil` | The storage class to use for volume claims. Defaults to persistence.storageClass If defined, storageClassName: <storageClass> If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. (gp2 on AWS, standard on GKE, AWS & OpenStack) |
| persistence.media.existingClaim | string | `""` | Use an existing volume claim for media files | | persistence.media.existingClaim | string | `""` | Use an existing volume claim for media files |
| persistence.media.capacity | string | `"1Gi"` | PVC Storage Request for the media volume | | persistence.media.capacity | string | `"1Gi"` | PVC Storage Request for the media volume |
| persistence.media.storageClass | string | `nil` | The storage class to use for volume claims. Defaults to persistence.storageClass If defined, storageClassName: <storageClass> If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. (gp2 on AWS, standard on GKE, AWS & OpenStack) |
| persistence.search.existingClaim | string | `""` | Use an existing volume claim for the fulltext search index | | persistence.search.existingClaim | string | `""` | Use an existing volume claim for the fulltext search index |
| persistence.search.capacity | string | `"1Gi"` | PVC Storage Request for the search volume | | persistence.search.capacity | string | `"1Gi"` | PVC Storage Request for the search volume |
| persistence.search.storageClass | string | `nil` | The storage class to use for volume claims. Defaults to persistence.storageClass If defined, storageClassName: <storageClass> If set to "-", storageClassName: "", which disables dynamic provisioning If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. (gp2 on AWS, standard on GKE, AWS & OpenStack) |
| extraVolumes | list | `[]` | Add additional volumes to the Dendrite Pod | | extraVolumes | list | `[]` | Add additional volumes to the Dendrite Pod |
| extraVolumeMounts | list | `[]` | Configure additional mount points volumes in the Dendrite Pod | | extraVolumeMounts | list | `[]` | Configure additional mount points volumes in the Dendrite Pod |
| strategy.type | string | `"RollingUpdate"` | Strategy to use for rolling updates (e.g. Recreate, RollingUpdate) If you are using ReadWriteOnce volumes, you should probably use Recreate | | strategy.type | string | `"Recreate"` | Strategy to use for rolling updates (e.g. Recreate, RollingUpdate) If you are using ReadWriteOnce volumes, you should probably use Recreate |
| strategy.rollingUpdate.maxUnavailable | string | `"25%"` | Maximum number of pods that can be unavailable during the update process | | strategy.rollingUpdate.maxUnavailable | string | `"25%"` | Maximum number of pods that can be unavailable during the update process |
| strategy.rollingUpdate.maxSurge | string | `"25%"` | Maximum number of pods that can be scheduled above the desired number of pods | | strategy.rollingUpdate.maxSurge | string | `"25%"` | Maximum number of pods that can be scheduled above the desired number of pods |
| dendrite_config.version | int | `2` | | | dendrite_config.version | int | `2` | |
@ -139,7 +142,7 @@ Create a folder `appservices` and place your configurations in there. The confi
| dendrite_config.logging | list | `[{"level":"info","type":"std"}]` | Default logging configuration | | dendrite_config.logging | list | `[{"level":"info","type":"std"}]` | Default logging configuration |
| postgresql.enabled | bool | See value.yaml | Enable and configure postgres as the database for dendrite. | | postgresql.enabled | bool | See value.yaml | Enable and configure postgres as the database for dendrite. |
| postgresql.image.repository | string | `"bitnami/postgresql"` | | | postgresql.image.repository | string | `"bitnami/postgresql"` | |
| postgresql.image.tag | string | `"15.1.0"` | | | postgresql.image.tag | string | `"16.2.0"` | |
| postgresql.auth.username | string | `"dendrite"` | | | postgresql.auth.username | string | `"dendrite"` | |
| postgresql.auth.password | string | `"changeme"` | | | postgresql.auth.password | string | `"changeme"` | |
| postgresql.auth.database | string | `"dendrite"` | | | postgresql.auth.database | string | `"dendrite"` | |
@ -186,3 +189,5 @@ grafana:
``` ```
PS: The label `release=kube-prometheus-stack` is setup with the helmchart of the Prometheus Operator. For Grafana Dashboards it may be necessary to enable scanning in the correct namespaces (or ALL), enabled by `sidecar.dashboards.searchNamespace` in [Helmchart of grafana](https://artifacthub.io/packages/helm/grafana/grafana) (which is part of PrometheusOperator, so `grafana.sidecar.dashboards.searchNamespace`) PS: The label `release=kube-prometheus-stack` is setup with the helmchart of the Prometheus Operator. For Grafana Dashboards it may be necessary to enable scanning in the correct namespaces (or ALL), enabled by `sidecar.dashboards.searchNamespace` in [Helmchart of grafana](https://artifacthub.io/packages/helm/grafana/grafana) (which is part of PrometheusOperator, so `grafana.sidecar.dashboards.searchNamespace`)
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.13.0](https://github.com/norwoodj/helm-docs/releases/v1.13.0)

View file

@ -4,6 +4,7 @@
{{- $wellKnownServerHost := default $serverNameHost (regexFind "^(\\[.+\\])?[^:]*" .Values.dendrite_config.global.well_known_server_name) -}} {{- $wellKnownServerHost := default $serverNameHost (regexFind "^(\\[.+\\])?[^:]*" .Values.dendrite_config.global.well_known_server_name) -}}
{{- $wellKnownClientHost := default $serverNameHost (regexFind "//(\\[.+\\])?[^:/]*" .Values.dendrite_config.global.well_known_client_name | trimAll "/") -}} {{- $wellKnownClientHost := default $serverNameHost (regexFind "//(\\[.+\\])?[^:/]*" .Values.dendrite_config.global.well_known_client_name | trimAll "/") -}}
{{- $allHosts := list $serverNameHost $wellKnownServerHost $wellKnownClientHost | uniq -}} {{- $allHosts := list $serverNameHost $wellKnownServerHost $wellKnownClientHost | uniq -}}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
@ -56,7 +57,7 @@ spec:
service: service:
name: {{ $fullName }} name: {{ $fullName }}
port: port:
name: http number: {{ $.Values.service.port }}
{{- else }} {{- else }}
serviceName: {{ $fullName }} serviceName: {{ $fullName }}
servicePort: http servicePort: http
@ -72,7 +73,7 @@ spec:
service: service:
name: {{ $fullName }} name: {{ $fullName }}
port: port:
name: http number: {{ $.Values.service.port }}
{{- else }} {{- else }}
serviceName: {{ $fullName }} serviceName: {{ $fullName }}
servicePort: http servicePort: http
@ -88,7 +89,7 @@ spec:
service: service:
name: {{ $fullName }} name: {{ $fullName }}
port: port:
name: http number: {{ $.Values.service.port }}
{{- else }} {{- else }}
serviceName: {{ $fullName }} serviceName: {{ $fullName }}
servicePort: http servicePort: http
@ -105,7 +106,7 @@ spec:
service: service:
name: {{ $fullName }} name: {{ $fullName }}
port: port:
name: http number: {{ $.Values.service.port }}
{{- else }} {{- else }}
serviceName: {{ $fullName }} serviceName: {{ $fullName }}
servicePort: http servicePort: http

View file

@ -14,4 +14,4 @@ spec:
- name: http - name: http
protocol: TCP protocol: TCP
port: {{ .Values.service.port }} port: {{ .Values.service.port }}
targetPort: 8008 targetPort: http

View file

@ -26,13 +26,13 @@ persistence:
# -- The storage class to use for volume claims. # -- The storage class to use for volume claims.
# Used unless specified at the specific component. # Used unless specified at the specific component.
# Defaults to the cluster default storage class. # Defaults to the cluster default storage class.
## If defined, storageClassName: <storageClass> # If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning # If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is # If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on # set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack) # GKE, AWS & OpenStack)
## #
# storageClass: "" storageClass:
jetstream: jetstream:
# -- Use an existing volume claim for jetstream # -- Use an existing volume claim for jetstream
existingClaim: "" existingClaim: ""
@ -40,13 +40,12 @@ persistence:
capacity: "1Gi" capacity: "1Gi"
# -- The storage class to use for volume claims. # -- The storage class to use for volume claims.
# Defaults to persistence.storageClass # Defaults to persistence.storageClass
## If defined, storageClassName: <storageClass> # If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning # If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is # If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on # set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack) # GKE, AWS & OpenStack)
## storageClass:
# storageClass: ""
media: media:
# -- Use an existing volume claim for media files # -- Use an existing volume claim for media files
existingClaim: "" existingClaim: ""
@ -54,13 +53,12 @@ persistence:
capacity: "1Gi" capacity: "1Gi"
# -- The storage class to use for volume claims. # -- The storage class to use for volume claims.
# Defaults to persistence.storageClass # Defaults to persistence.storageClass
## If defined, storageClassName: <storageClass> # If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning # If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is # If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on # set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack) # GKE, AWS & OpenStack)
## storageClass:
# storageClass: ""
search: search:
# -- Use an existing volume claim for the fulltext search index # -- Use an existing volume claim for the fulltext search index
existingClaim: "" existingClaim: ""
@ -68,13 +66,12 @@ persistence:
capacity: "1Gi" capacity: "1Gi"
# -- The storage class to use for volume claims. # -- The storage class to use for volume claims.
# Defaults to persistence.storageClass # Defaults to persistence.storageClass
## If defined, storageClassName: <storageClass> # If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning # If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is # If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on # set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack) # GKE, AWS & OpenStack)
## storageClass:
# storageClass: ""
# -- Add additional volumes to the Dendrite Pod # -- Add additional volumes to the Dendrite Pod
extraVolumes: [] extraVolumes: []
@ -92,7 +89,7 @@ extraVolumeMounts: []
strategy: strategy:
# -- Strategy to use for rolling updates (e.g. Recreate, RollingUpdate) # -- Strategy to use for rolling updates (e.g. Recreate, RollingUpdate)
# If you are using ReadWriteOnce volumes, you should probably use Recreate # If you are using ReadWriteOnce volumes, you should probably use Recreate
type: RollingUpdate type: Recreate
rollingUpdate: rollingUpdate:
# -- Maximum number of pods that can be unavailable during the update process # -- Maximum number of pods that can be unavailable during the update process
maxUnavailable: 25% maxUnavailable: 25%
@ -378,7 +375,7 @@ postgresql:
enabled: false enabled: false
image: image:
repository: bitnami/postgresql repository: bitnami/postgresql
tag: "15.1.0" tag: "16.2.0"
auth: auth:
username: dendrite username: dendrite
password: changeme password: changeme

View file

@ -76,6 +76,8 @@ func MakeAuthAPI(
// add the user to Sentry, if enabled // add the user to Sentry, if enabled
hub := sentry.GetHubFromContext(req.Context()) hub := sentry.GetHubFromContext(req.Context())
if hub != nil { if hub != nil {
// clone the hub, so we don't send garbage events with e.g. mismatching rooms/event_ids
hub = hub.Clone()
hub.Scope().SetUser(sentry.User{ hub.Scope().SetUser(sentry.User{
Username: device.UserID, Username: device.UserID,
}) })

View file

@ -38,9 +38,9 @@ var (
ErrPasswordTooLong = fmt.Errorf("password too long: max %d characters", maxPasswordLength) ErrPasswordTooLong = fmt.Errorf("password too long: max %d characters", maxPasswordLength)
ErrPasswordWeak = fmt.Errorf("password too weak: min %d characters", minPasswordLength) ErrPasswordWeak = fmt.Errorf("password too weak: min %d characters", minPasswordLength)
ErrUsernameTooLong = fmt.Errorf("username exceeds the maximum length of %d characters", maxUsernameLength) ErrUsernameTooLong = fmt.Errorf("username exceeds the maximum length of %d characters", maxUsernameLength)
ErrUsernameInvalid = errors.New("username can only contain characters a-z, 0-9, or '_-./='") ErrUsernameInvalid = errors.New("username can only contain characters a-z, 0-9, or '_+-./='")
ErrUsernameUnderscore = errors.New("username cannot start with a '_'") ErrUsernameUnderscore = errors.New("username cannot start with a '_'")
validUsernameRegex = regexp.MustCompile(`^[0-9a-z_\-=./]+$`) validUsernameRegex = regexp.MustCompile(`^[0-9a-z_\-+=./]+$`)
) )
// ValidatePassword returns an error if the password is invalid // ValidatePassword returns an error if the password is invalid

View file

@ -129,6 +129,11 @@ func Test_validateUsername(t *testing.T) {
localpart: "i_am_allowed=1", localpart: "i_am_allowed=1",
domain: "localhost", domain: "localhost",
}, },
{
name: "special characters are allowed 3",
localpart: "+55555555555",
domain: "localhost",
},
{ {
name: "not all special characters are allowed", name: "not all special characters are allowed",
localpart: "notallowed#", // contains # localpart: "notallowed#", // contains #
@ -139,6 +144,16 @@ func Test_validateUsername(t *testing.T) {
JSON: spec.InvalidUsername(ErrUsernameInvalid.Error()), JSON: spec.InvalidUsername(ErrUsernameInvalid.Error()),
}, },
}, },
{
name: "not all special characters are allowed 2",
localpart: "<notallowed", // contains <
domain: "localhost",
wantErr: ErrUsernameInvalid,
wantJSON: &util.JSONResponse{
Code: http.StatusBadRequest,
JSON: spec.InvalidUsername(ErrUsernameInvalid.Error()),
},
},
{ {
name: "username containing numbers", name: "username containing numbers",
localpart: "hello1337", localpart: "hello1337",

View file

@ -18,7 +18,7 @@ var build string
const ( const (
VersionMajor = 0 VersionMajor = 0
VersionMinor = 13 VersionMinor = 13
VersionPatch = 5 VersionPatch = 6
VersionTag = "" // example: "rc1" VersionTag = "" // example: "rc1"
gitRevLen = 7 // 7 matches the displayed characters on github.com gitRevLen = 7 // 7 matches the displayed characters on github.com

View file

@ -123,6 +123,7 @@ func makeDownloadAPI(
// Set internal headers returned regardless of the outcome of the request // Set internal headers returned regardless of the outcome of the request
util.SetCORSHeaders(w) util.SetCORSHeaders(w)
w.Header().Set("Cross-Origin-Resource-Policy", "cross-origin")
// Content-Type will be overridden in case of returning file data, else we respond with JSON-formatted errors // Content-Type will be overridden in case of returning file data, else we respond with JSON-formatted errors
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")

View file

@ -108,6 +108,8 @@ func MakeRelayAPI(
// add the user to Sentry, if enabled // add the user to Sentry, if enabled
hub := sentry.GetHubFromContext(req.Context()) hub := sentry.GetHubFromContext(req.Context())
if hub != nil { if hub != nil {
// clone the hub, so we don't send garbage events with e.g. mismatching rooms/event_ids
hub = hub.Clone()
hub.Scope().SetTag("origin", string(fedReq.Origin())) hub.Scope().SetTag("origin", string(fedReq.Origin()))
hub.Scope().SetTag("uri", fedReq.RequestURI()) hub.Scope().SetTag("uri", fedReq.RequestURI())
} }

View file

@ -23,7 +23,7 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/matrix-org/dendrite/roomserver/types" "github.com/matrix-org/dendrite/roomserver/storage/tables"
"github.com/matrix-org/gomatrixserverlib" "github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/gomatrixserverlib/spec" "github.com/matrix-org/gomatrixserverlib/spec"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -34,22 +34,28 @@ const MRoomServerACL = "m.room.server_acl"
type ServerACLDatabase interface { type ServerACLDatabase interface {
// GetKnownRooms returns a list of all rooms we know about. // GetKnownRooms returns a list of all rooms we know about.
GetKnownRooms(ctx context.Context) ([]string, error) GetKnownRooms(ctx context.Context) ([]string, error)
// GetStateEvent returns the state event of a given type for a given room with a given state key
// If no event could be found, returns nil // GetBulkStateContent returns all state events which match a given room ID and a given state key tuple. Both must be satisfied for a match.
// If there was an issue during the retrieval, returns an error // If a tuple has the StateKey of '*' and allowWildcards=true then all state events with the EventType should be returned.
GetStateEvent(ctx context.Context, roomID, evType, stateKey string) (*types.HeaderedEvent, error) GetBulkStateContent(ctx context.Context, roomIDs []string, tuples []gomatrixserverlib.StateKeyTuple, allowWildcards bool) ([]tables.StrippedEvent, error)
} }
type ServerACLs struct { type ServerACLs struct {
acls map[string]*serverACL // room ID -> ACL acls map[string]*serverACL // room ID -> ACL
aclsMutex sync.RWMutex // protects the above aclsMutex sync.RWMutex // protects the above
aclRegexCache map[string]**regexp.Regexp // Cache from "serverName" -> pointer to a regex
aclRegexCacheMutex sync.RWMutex // protects the above
} }
func NewServerACLs(db ServerACLDatabase) *ServerACLs { func NewServerACLs(db ServerACLDatabase) *ServerACLs {
ctx := context.TODO() ctx := context.TODO()
acls := &ServerACLs{ acls := &ServerACLs{
acls: make(map[string]*serverACL), acls: make(map[string]*serverACL),
// Be generous when creating the cache, as in reality
// there are hundreds of servers in an ACL.
aclRegexCache: make(map[string]**regexp.Regexp, 100),
} }
// Look up all of the rooms that the current state server knows about. // Look up all of the rooms that the current state server knows about.
rooms, err := db.GetKnownRooms(ctx) rooms, err := db.GetKnownRooms(ctx)
if err != nil { if err != nil {
@ -58,16 +64,16 @@ func NewServerACLs(db ServerACLDatabase) *ServerACLs {
// For each room, let's see if we have a server ACL state event. If we // For each room, let's see if we have a server ACL state event. If we
// do then we'll process it into memory so that we have the regexes to // do then we'll process it into memory so that we have the regexes to
// hand. // hand.
for _, room := range rooms {
state, err := db.GetStateEvent(ctx, room, MRoomServerACL, "") events, err := db.GetBulkStateContent(ctx, rooms, []gomatrixserverlib.StateKeyTuple{{EventType: MRoomServerACL, StateKey: ""}}, false)
if err != nil { if err != nil {
logrus.WithError(err).Errorf("Failed to get server ACLs for room %q", room) logrus.WithError(err).Errorf("Failed to get server ACLs for all rooms: %q", err)
continue
}
if state != nil {
acls.OnServerACLUpdate(state.PDU)
}
} }
for _, event := range events {
acls.OnServerACLUpdate(event)
}
return acls return acls
} }
@ -79,8 +85,8 @@ type ServerACL struct {
type serverACL struct { type serverACL struct {
ServerACL ServerACL
allowedRegexes []*regexp.Regexp allowedRegexes []**regexp.Regexp
deniedRegexes []*regexp.Regexp deniedRegexes []**regexp.Regexp
} }
func compileACLRegex(orig string) (*regexp.Regexp, error) { func compileACLRegex(orig string) (*regexp.Regexp, error) {
@ -90,9 +96,28 @@ func compileACLRegex(orig string) (*regexp.Regexp, error) {
return regexp.Compile(escaped) return regexp.Compile(escaped)
} }
func (s *ServerACLs) OnServerACLUpdate(state gomatrixserverlib.PDU) { // cachedCompileACLRegex is a wrapper around compileACLRegex with added caching
func (s *ServerACLs) cachedCompileACLRegex(orig string) (**regexp.Regexp, error) {
s.aclRegexCacheMutex.RLock()
re, ok := s.aclRegexCache[orig]
if ok {
s.aclRegexCacheMutex.RUnlock()
return re, nil
}
s.aclRegexCacheMutex.RUnlock()
compiled, err := compileACLRegex(orig)
if err != nil {
return nil, err
}
s.aclRegexCacheMutex.Lock()
defer s.aclRegexCacheMutex.Unlock()
s.aclRegexCache[orig] = &compiled
return &compiled, nil
}
func (s *ServerACLs) OnServerACLUpdate(strippedEvent tables.StrippedEvent) {
acls := &serverACL{} acls := &serverACL{}
if err := json.Unmarshal(state.Content(), &acls.ServerACL); err != nil { if err := json.Unmarshal([]byte(strippedEvent.ContentValue), &acls.ServerACL); err != nil {
logrus.WithError(err).Errorf("Failed to unmarshal state content for server ACLs") logrus.WithError(err).Errorf("Failed to unmarshal state content for server ACLs")
return return
} }
@ -101,14 +126,14 @@ func (s *ServerACLs) OnServerACLUpdate(state gomatrixserverlib.PDU) {
// special characters and then replace * and ? with their regex counterparts. // special characters and then replace * and ? with their regex counterparts.
// https://matrix.org/docs/spec/client_server/r0.6.1#m-room-server-acl // https://matrix.org/docs/spec/client_server/r0.6.1#m-room-server-acl
for _, orig := range acls.Allowed { for _, orig := range acls.Allowed {
if expr, err := compileACLRegex(orig); err != nil { if expr, err := s.cachedCompileACLRegex(orig); err != nil {
logrus.WithError(err).Errorf("Failed to compile allowed regex") logrus.WithError(err).Errorf("Failed to compile allowed regex")
} else { } else {
acls.allowedRegexes = append(acls.allowedRegexes, expr) acls.allowedRegexes = append(acls.allowedRegexes, expr)
} }
} }
for _, orig := range acls.Denied { for _, orig := range acls.Denied {
if expr, err := compileACLRegex(orig); err != nil { if expr, err := s.cachedCompileACLRegex(orig); err != nil {
logrus.WithError(err).Errorf("Failed to compile denied regex") logrus.WithError(err).Errorf("Failed to compile denied regex")
} else { } else {
acls.deniedRegexes = append(acls.deniedRegexes, expr) acls.deniedRegexes = append(acls.deniedRegexes, expr)
@ -118,10 +143,15 @@ func (s *ServerACLs) OnServerACLUpdate(state gomatrixserverlib.PDU) {
"allow_ip_literals": acls.AllowIPLiterals, "allow_ip_literals": acls.AllowIPLiterals,
"num_allowed": len(acls.allowedRegexes), "num_allowed": len(acls.allowedRegexes),
"num_denied": len(acls.deniedRegexes), "num_denied": len(acls.deniedRegexes),
}).Debugf("Updating server ACLs for %q", state.RoomID()) }).Debugf("Updating server ACLs for %q", strippedEvent.RoomID)
// Clear out Denied and Allowed, now that we have the compiled regexes.
// They are not needed anymore from this point on.
acls.Denied = nil
acls.Allowed = nil
s.aclsMutex.Lock() s.aclsMutex.Lock()
defer s.aclsMutex.Unlock() defer s.aclsMutex.Unlock()
s.acls[state.RoomID().String()] = acls s.acls[strippedEvent.RoomID] = acls
} }
func (s *ServerACLs) IsServerBannedFromRoom(serverName spec.ServerName, roomID string) bool { func (s *ServerACLs) IsServerBannedFromRoom(serverName spec.ServerName, roomID string) bool {
@ -151,14 +181,14 @@ func (s *ServerACLs) IsServerBannedFromRoom(serverName spec.ServerName, roomID s
// Check if the hostname matches one of the denied regexes. If it does then // Check if the hostname matches one of the denied regexes. If it does then
// the server is banned from the room. // the server is banned from the room.
for _, expr := range acls.deniedRegexes { for _, expr := range acls.deniedRegexes {
if expr.MatchString(string(serverName)) { if (*expr).MatchString(string(serverName)) {
return true return true
} }
} }
// Check if the hostname matches one of the allowed regexes. If it does then // Check if the hostname matches one of the allowed regexes. If it does then
// the server is NOT banned from the room. // the server is NOT banned from the room.
for _, expr := range acls.allowedRegexes { for _, expr := range acls.allowedRegexes {
if expr.MatchString(string(serverName)) { if (*expr).MatchString(string(serverName)) {
return false return false
} }
} }

View file

@ -15,8 +15,14 @@
package acls package acls
import ( import (
"context"
"regexp" "regexp"
"testing" "testing"
"github.com/matrix-org/dendrite/roomserver/storage/tables"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/stretchr/testify/assert"
) )
func TestOpenACLsWithBlacklist(t *testing.T) { func TestOpenACLsWithBlacklist(t *testing.T) {
@ -38,8 +44,8 @@ func TestOpenACLsWithBlacklist(t *testing.T) {
ServerACL: ServerACL{ ServerACL: ServerACL{
AllowIPLiterals: true, AllowIPLiterals: true,
}, },
allowedRegexes: []*regexp.Regexp{allowRegex}, allowedRegexes: []**regexp.Regexp{&allowRegex},
deniedRegexes: []*regexp.Regexp{denyRegex}, deniedRegexes: []**regexp.Regexp{&denyRegex},
} }
if acls.IsServerBannedFromRoom("1.2.3.4", roomID) { if acls.IsServerBannedFromRoom("1.2.3.4", roomID) {
@ -77,8 +83,8 @@ func TestDefaultACLsWithWhitelist(t *testing.T) {
ServerACL: ServerACL{ ServerACL: ServerACL{
AllowIPLiterals: false, AllowIPLiterals: false,
}, },
allowedRegexes: []*regexp.Regexp{allowRegex}, allowedRegexes: []**regexp.Regexp{&allowRegex},
deniedRegexes: []*regexp.Regexp{}, deniedRegexes: []**regexp.Regexp{},
} }
if !acls.IsServerBannedFromRoom("1.2.3.4", roomID) { if !acls.IsServerBannedFromRoom("1.2.3.4", roomID) {
@ -103,3 +109,45 @@ func TestDefaultACLsWithWhitelist(t *testing.T) {
t.Fatal("Expected qux.com:4567 to be allowed but wasn't") t.Fatal("Expected qux.com:4567 to be allowed but wasn't")
} }
} }
var (
content1 = `{"allow":["*"],"allow_ip_literals":false,"deny":["hello.world", "*.hello.world"]}`
)
type dummyACLDB struct{}
func (d dummyACLDB) GetKnownRooms(ctx context.Context) ([]string, error) {
return []string{"1", "2"}, nil
}
func (d dummyACLDB) GetBulkStateContent(ctx context.Context, roomIDs []string, tuples []gomatrixserverlib.StateKeyTuple, allowWildcards bool) ([]tables.StrippedEvent, error) {
return []tables.StrippedEvent{
{
RoomID: "1",
ContentValue: content1,
},
{
RoomID: "2",
ContentValue: content1,
},
}, nil
}
func TestCachedRegex(t *testing.T) {
db := dummyACLDB{}
wantBannedServer := spec.ServerName("hello.world")
acls := NewServerACLs(db)
// Check that hello.world is banned in room 1
banned := acls.IsServerBannedFromRoom(wantBannedServer, "1")
assert.True(t, banned)
// Check that hello.world is banned in room 2
banned = acls.IsServerBannedFromRoom(wantBannedServer, "2")
assert.True(t, banned)
// Check that matrix.hello.world is banned in room 2
banned = acls.IsServerBannedFromRoom("matrix."+wantBannedServer, "2")
assert.True(t, banned)
}

View file

@ -8,7 +8,6 @@ import (
"github.com/matrix-org/dendrite/roomserver/types" "github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/gomatrixserverlib" "github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/gomatrixserverlib/spec" "github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/util"
) )
type PerformCreateRoomRequest struct { type PerformCreateRoomRequest struct {
@ -51,16 +50,14 @@ type PerformLeaveResponse struct {
} }
type InviteInput struct { type InviteInput struct {
RoomID spec.RoomID RoomID spec.RoomID
Inviter spec.UserID Inviter spec.UserID
Invitee spec.UserID Invitee spec.UserID
DisplayName string Reason string
AvatarURL string IsDirect bool
Reason string KeyID gomatrixserverlib.KeyID
IsDirect bool PrivateKey ed25519.PrivateKey
KeyID gomatrixserverlib.KeyID EventTime time.Time
PrivateKey ed25519.PrivateKey
EventTime time.Time
} }
type PerformInviteRequest struct { type PerformInviteRequest struct {
@ -91,14 +88,44 @@ type PerformBackfillRequest struct {
VirtualHost spec.ServerName `json:"virtual_host"` VirtualHost spec.ServerName `json:"virtual_host"`
} }
// PrevEventIDs returns the prev_event IDs of all backwards extremities, de-duplicated in a lexicographically sorted order. // limitPrevEventIDs is the maximum of eventIDs we
// return when calling PrevEventIDs.
const limitPrevEventIDs = 100
// PrevEventIDs returns the prev_event IDs of either 100 backwards extremities or
// len(r.BackwardsExtremities). Limited to 100, due to Synapse/Dendrite stopping after reaching
// this limit. (which sounds sane)
func (r *PerformBackfillRequest) PrevEventIDs() []string { func (r *PerformBackfillRequest) PrevEventIDs() []string {
var prevEventIDs []string var uniqueIDs map[string]struct{}
for _, pes := range r.BackwardsExtremities {
prevEventIDs = append(prevEventIDs, pes...) // Create a unique eventID map of either 100 or len(r.BackwardsExtremities).
// 100 since Synapse/Dendrite stops after reaching 100 events.
if len(r.BackwardsExtremities) > limitPrevEventIDs {
uniqueIDs = make(map[string]struct{}, limitPrevEventIDs)
} else {
uniqueIDs = make(map[string]struct{}, len(r.BackwardsExtremities))
} }
prevEventIDs = util.UniqueStrings(prevEventIDs)
return prevEventIDs outerLoop:
for _, pes := range r.BackwardsExtremities {
for _, evID := range pes {
uniqueIDs[evID] = struct{}{}
// We found enough unique eventIDs.
if len(uniqueIDs) >= limitPrevEventIDs {
break outerLoop
}
}
}
// map -> []string
result := make([]string, len(uniqueIDs))
i := 0
for evID := range uniqueIDs {
result[i] = evID
i++
}
return result
} }
// PerformBackfillResponse is a response to PerformBackfill. // PerformBackfillResponse is a response to PerformBackfill.

View file

@ -0,0 +1,81 @@
package api
import (
"fmt"
"math/rand"
"testing"
"github.com/stretchr/testify/assert"
)
func BenchmarkPrevEventIDs(b *testing.B) {
for _, x := range []int64{1, 10, 100, 500, 1000, 2000} {
benchPrevEventIDs(b, int(x))
}
}
func benchPrevEventIDs(b *testing.B, count int) {
bwExtrems := generateBackwardsExtremities(b, count)
backfiller := PerformBackfillRequest{
BackwardsExtremities: bwExtrems,
}
b.Run(fmt.Sprintf("Original%d", count), func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
prevIDs := backfiller.PrevEventIDs()
_ = prevIDs
}
})
}
type testLike interface {
Helper()
}
const randomIDCharsCount = 10
func generateBackwardsExtremities(t testLike, count int) map[string][]string {
t.Helper()
result := make(map[string][]string, count)
for i := 0; i < count; i++ {
eventID := randomEventId(int64(i))
result[eventID] = []string{
randomEventId(int64(i + 1)),
randomEventId(int64(i + 2)),
randomEventId(int64(i + 3)),
}
}
return result
}
const alphanumerics = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
// randomEventId generates a pseudo-random string of length n.
func randomEventId(src int64) string {
randSrc := rand.NewSource(src)
b := make([]byte, randomIDCharsCount)
for i := range b {
b[i] = alphanumerics[randSrc.Int63()%int64(len(alphanumerics))]
}
return string(b)
}
func TestPrevEventIDs(t *testing.T) {
// generate 10 backwards extremities
bwExtrems := generateBackwardsExtremities(t, 10)
backfiller := PerformBackfillRequest{
BackwardsExtremities: bwExtrems,
}
prevIDs := backfiller.PrevEventIDs()
// Given how "generateBackwardsExtremities" works, this
// generates 12 unique event IDs
assert.Equal(t, 12, len(prevIDs))
// generate 200 backwards extremities
backfiller.BackwardsExtremities = generateBackwardsExtremities(t, 200)
prevIDs = backfiller.PrevEventIDs()
// PrevEventIDs returns at max 100 event IDs
assert.Equal(t, 100, len(prevIDs))
}

View file

@ -108,12 +108,14 @@ type worker struct {
r *Inputer r *Inputer
roomID string roomID string
subscription *nats.Subscription subscription *nats.Subscription
sentryHub *sentry.Hub
} }
func (r *Inputer) startWorkerForRoom(roomID string) { func (r *Inputer) startWorkerForRoom(roomID string) {
v, loaded := r.workers.LoadOrStore(roomID, &worker{ v, loaded := r.workers.LoadOrStore(roomID, &worker{
r: r, r: r,
roomID: roomID, roomID: roomID,
sentryHub: sentry.CurrentHub().Clone(),
}) })
w := v.(*worker) w := v.(*worker)
w.Lock() w.Lock()
@ -265,9 +267,9 @@ func (w *worker) _next() {
// Look up what the next event is that's waiting to be processed. // Look up what the next event is that's waiting to be processed.
ctx, cancel := context.WithTimeout(w.r.ProcessContext.Context(), time.Minute) ctx, cancel := context.WithTimeout(w.r.ProcessContext.Context(), time.Minute)
defer cancel() defer cancel()
if scope := sentry.CurrentHub().Scope(); scope != nil { w.sentryHub.ConfigureScope(func(scope *sentry.Scope) {
scope.SetTag("room_id", w.roomID) scope.SetTag("room_id", w.roomID)
} })
msgs, err := w.subscription.Fetch(1, nats.Context(ctx)) msgs, err := w.subscription.Fetch(1, nats.Context(ctx))
switch err { switch err {
case nil: case nil:
@ -323,9 +325,9 @@ func (w *worker) _next() {
return return
} }
if scope := sentry.CurrentHub().Scope(); scope != nil { w.sentryHub.ConfigureScope(func(scope *sentry.Scope) {
scope.SetTag("event_id", inputRoomEvent.Event.EventID()) scope.SetTag("event_id", inputRoomEvent.Event.EventID())
} })
// Process the room event. If something goes wrong then we'll tell // Process the room event. If something goes wrong then we'll tell
// NATS to terminate the message. We'll store the error result as // NATS to terminate the message. We'll store the error result as
@ -347,7 +349,7 @@ func (w *worker) _next() {
}).Warn("Roomserver rejected event") }).Warn("Roomserver rejected event")
default: default:
if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) {
sentry.CaptureException(err) w.sentryHub.CaptureException(err)
} }
logrus.WithError(err).WithFields(logrus.Fields{ logrus.WithError(err).WithFields(logrus.Fields{
"room_id": w.roomID, "room_id": w.roomID,

View file

@ -24,6 +24,7 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/matrix-org/dendrite/roomserver/storage/tables"
"github.com/tidwall/gjson" "github.com/tidwall/gjson"
"github.com/matrix-org/gomatrixserverlib" "github.com/matrix-org/gomatrixserverlib"
@ -509,7 +510,13 @@ func (r *Inputer) processRoomEvent(
logrus.WithError(err).Error("failed to get server ACLs") logrus.WithError(err).Error("failed to get server ACLs")
} }
if aclEvent != nil { if aclEvent != nil {
r.ACLs.OnServerACLUpdate(aclEvent) strippedEvent := tables.StrippedEvent{
RoomID: aclEvent.RoomID().String(),
EventType: aclEvent.Type(),
StateKey: *aclEvent.StateKey(),
ContentValue: string(aclEvent.Content()),
}
r.ACLs.OnServerACLUpdate(strippedEvent)
} }
} }
} }

View file

@ -298,6 +298,7 @@ func (u *latestEventsUpdater) latestState() error {
}).Warnf("State reset detected (removing %d events)", removed) }).Warnf("State reset detected (removing %d events)", removed)
sentry.WithScope(func(scope *sentry.Scope) { sentry.WithScope(func(scope *sentry.Scope) {
scope.SetLevel("warning") scope.SetLevel("warning")
scope.SetTag("room_id", u.event.RoomID().String())
scope.SetContext("State reset", map[string]interface{}{ scope.SetContext("State reset", map[string]interface{}{
"Event ID": u.event.EventID(), "Event ID": u.event.EventID(),
"Old state NID": fmt.Sprintf("%d", u.oldStateNID), "Old state NID": fmt.Sprintf("%d", u.oldStateNID),

View file

@ -503,16 +503,14 @@ func (c *Creator) PerformCreateRoom(ctx context.Context, userID spec.UserID, roo
err = c.RSAPI.PerformInvite(ctx, &api.PerformInviteRequest{ err = c.RSAPI.PerformInvite(ctx, &api.PerformInviteRequest{
InviteInput: api.InviteInput{ InviteInput: api.InviteInput{
RoomID: roomID, RoomID: roomID,
Inviter: userID, Inviter: userID,
Invitee: *inviteeUserID, Invitee: *inviteeUserID,
DisplayName: createRequest.UserDisplayName, Reason: "",
AvatarURL: createRequest.UserAvatarURL, IsDirect: createRequest.IsDirect,
Reason: "", KeyID: createRequest.KeyID,
IsDirect: createRequest.IsDirect, PrivateKey: createRequest.PrivateKey,
KeyID: createRequest.KeyID, EventTime: createRequest.EventTime,
PrivateKey: createRequest.PrivateKey,
EventTime: createRequest.EventTime,
}, },
InviteRoomState: globalStrippedState, InviteRoomState: globalStrippedState,
SendAsServer: string(userID.Domain()), SendAsServer: string(userID.Domain()),

View file

@ -144,11 +144,9 @@ func (r *Inviter) PerformInvite(
} }
content := gomatrixserverlib.MemberContent{ content := gomatrixserverlib.MemberContent{
Membership: spec.Invite, Membership: spec.Invite,
DisplayName: req.InviteInput.DisplayName, Reason: req.InviteInput.Reason,
AvatarURL: req.InviteInput.AvatarURL, IsDirect: req.InviteInput.IsDirect,
Reason: req.InviteInput.Reason,
IsDirect: req.InviteInput.IsDirect,
} }
if err = proto.SetContent(content); err != nil { if err = proto.SetContent(content); err != nil {

View file

@ -17,6 +17,7 @@ package producers
import ( import (
"encoding/json" "encoding/json"
"github.com/matrix-org/dendrite/roomserver/storage/tables"
"github.com/nats-io/nats.go" "github.com/nats-io/nats.go"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/tidwall/gjson" "github.com/tidwall/gjson"
@ -75,7 +76,13 @@ func (r *RoomEventProducer) ProduceRoomEvents(roomID string, updates []api.Outpu
if eventType == acls.MRoomServerACL && update.NewRoomEvent.Event.StateKeyEquals("") { if eventType == acls.MRoomServerACL && update.NewRoomEvent.Event.StateKeyEquals("") {
ev := update.NewRoomEvent.Event.PDU ev := update.NewRoomEvent.Event.PDU
defer r.ACLs.OnServerACLUpdate(ev) strippedEvent := tables.StrippedEvent{
RoomID: ev.RoomID().String(),
EventType: ev.Type(),
StateKey: *ev.StateKey(),
ContentValue: string(ev.Content()),
}
defer r.ACLs.OnServerACLUpdate(strippedEvent)
} }
} }
logger.Tracef("Producing to topic '%s'", r.Topic) logger.Tracef("Producing to topic '%s'", r.Topic)

View file

@ -889,10 +889,10 @@ func (d *Database) assignRoomNID(
} }
// Check if we already have a numeric ID in the database. // Check if we already have a numeric ID in the database.
roomNID, err := d.RoomsTable.SelectRoomNID(ctx, txn, roomID) roomNID, err := d.RoomsTable.SelectRoomNID(ctx, txn, roomID)
if err == sql.ErrNoRows { if errors.Is(err, sql.ErrNoRows) {
// We don't have a numeric ID so insert one into the database. // We don't have a numeric ID so insert one into the database.
roomNID, err = d.RoomsTable.InsertRoomNID(ctx, txn, roomID, roomVersion) roomNID, err = d.RoomsTable.InsertRoomNID(ctx, txn, roomID, roomVersion)
if err == sql.ErrNoRows { if errors.Is(err, sql.ErrNoRows) {
// We raced with another insert so run the select again. // We raced with another insert so run the select again.
roomNID, err = d.RoomsTable.SelectRoomNID(ctx, txn, roomID) roomNID, err = d.RoomsTable.SelectRoomNID(ctx, txn, roomID)
} }
@ -914,10 +914,10 @@ func (d *Database) assignEventTypeNID(
} }
// Check if we already have a numeric ID in the database. // Check if we already have a numeric ID in the database.
eventTypeNID, err := d.EventTypesTable.SelectEventTypeNID(ctx, txn, eventType) eventTypeNID, err := d.EventTypesTable.SelectEventTypeNID(ctx, txn, eventType)
if err == sql.ErrNoRows { if errors.Is(err, sql.ErrNoRows) {
// We don't have a numeric ID so insert one into the database. // We don't have a numeric ID so insert one into the database.
eventTypeNID, err = d.EventTypesTable.InsertEventTypeNID(ctx, txn, eventType) eventTypeNID, err = d.EventTypesTable.InsertEventTypeNID(ctx, txn, eventType)
if err == sql.ErrNoRows { if errors.Is(err, sql.ErrNoRows) {
// We raced with another insert so run the select again. // We raced with another insert so run the select again.
eventTypeNID, err = d.EventTypesTable.SelectEventTypeNID(ctx, txn, eventType) eventTypeNID, err = d.EventTypesTable.SelectEventTypeNID(ctx, txn, eventType)
} }
@ -938,16 +938,19 @@ func (d *EventDatabase) assignStateKeyNID(
} }
// Check if we already have a numeric ID in the database. // Check if we already have a numeric ID in the database.
eventStateKeyNID, err := d.EventStateKeysTable.SelectEventStateKeyNID(ctx, txn, eventStateKey) eventStateKeyNID, err := d.EventStateKeysTable.SelectEventStateKeyNID(ctx, txn, eventStateKey)
if err == sql.ErrNoRows { if errors.Is(err, sql.ErrNoRows) {
// We don't have a numeric ID so insert one into the database. // We don't have a numeric ID so insert one into the database.
eventStateKeyNID, err = d.EventStateKeysTable.InsertEventStateKeyNID(ctx, txn, eventStateKey) eventStateKeyNID, err = d.EventStateKeysTable.InsertEventStateKeyNID(ctx, txn, eventStateKey)
if err == sql.ErrNoRows { if errors.Is(err, sql.ErrNoRows) {
// We raced with another insert so run the select again. // We raced with another insert so run the select again.
eventStateKeyNID, err = d.EventStateKeysTable.SelectEventStateKeyNID(ctx, txn, eventStateKey) eventStateKeyNID, err = d.EventStateKeysTable.SelectEventStateKeyNID(ctx, txn, eventStateKey)
} }
} }
if err != nil {
return 0, err
}
d.Cache.StoreEventStateKey(eventStateKeyNID, eventStateKey) d.Cache.StoreEventStateKey(eventStateKeyNID, eventStateKey)
return eventStateKeyNID, err return eventStateKeyNID, nil
} }
func extractRoomVersionFromCreateEvent(event gomatrixserverlib.PDU) ( func extractRoomVersionFromCreateEvent(event gomatrixserverlib.PDU) (

View file

@ -235,6 +235,10 @@ func ExtractContentValue(ev *types.HeaderedEvent) string {
key = "topic" key = "topic"
case "m.room.guest_access": case "m.room.guest_access":
key = "guest_access" key = "guest_access"
case "m.room.server_acl":
// We need the entire content and not only one key, so we can use it
// on startup to generate the ACLs. This is merely a workaround.
return string(content)
} }
result := gjson.GetBytes(content, key) result := gjson.GetBytes(content, key)
if !result.Exists() { if !result.Exists() {

View file

@ -50,6 +50,9 @@ import (
//go:embed static/*.gotmpl //go:embed static/*.gotmpl
var staticContent embed.FS var staticContent embed.FS
//go:embed static/client/login
var loginFallback embed.FS
const HTTPServerTimeout = time.Minute * 5 const HTTPServerTimeout = time.Minute * 5
// CreateClient creates a new client (normally used for media fetch requests). // CreateClient creates a new client (normally used for media fetch requests).
@ -158,6 +161,14 @@ func SetupAndServeHTTP(
_, _ = w.Write(landingPage.Bytes()) _, _ = w.Write(landingPage.Bytes())
}) })
// We only need the files beneath the static/client/login folder.
sub, err := fs.Sub(loginFallback, "static/client/login")
if err != nil {
logrus.Panicf("unable to read embedded files, this should never happen: %s", err)
}
// Serve a static page for login fallback
routers.Static.PathPrefix("/client/login/").Handler(http.StripPrefix("/_matrix/static/client/login/", http.FileServer(http.FS(sub))))
var clientHandler http.Handler var clientHandler http.Handler
clientHandler = routers.Client clientHandler = routers.Client
if cfg.Global.Sentry.Enabled { if cfg.Global.Sentry.Enabled {

View file

@ -0,0 +1,47 @@
<!doctype html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title> Login </title>
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="style.css">
<script src="js/jquery-3.4.1.min.js"></script>
<script src="js/login.js"></script>
</head>
<body onload="matrixLogin.onLoad()">
<div id="container">
<h1 id="title"></h1>
<span id="feedback"></span>
<div id="loading">
<img src="spinner.gif" />
</div>
<div id="sso_flow" class="login_flow" style="display: none;">
Single-sign on:
<form id="sso_form" action="/_matrix/client/v3/login/sso/redirect" method="get">
<input id="sso_redirect_url" type="hidden" name="redirectUrl" value=""/>
<input type="submit" value="Log in"/>
</form>
</div>
<div id="password_flow" class="login_flow" style="display: none;">
Password Authentication:
<form onsubmit="matrixLogin.passwordLogin(); return false;">
<input id="user_id" size="32" type="text" placeholder="Matrix ID (e.g. bob)" autocapitalize="off" autocorrect="off" />
<br/>
<input id="password" size="32" type="password" placeholder="Password"/>
<br/>
<input type="submit" value="Log in"/>
</form>
</div>
<div id="no_login_types" type="button" class="login_flow" style="display: none;">
Log in currently unavailable.
</div>
</div>
</body>
</html>

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,291 @@
window.matrixLogin = {
endpoint: location.origin + "/_matrix/client/v3/login",
serverAcceptsPassword: false,
serverAcceptsSso: false,
};
// Titles get updated through the process to give users feedback.
const TITLE_PRE_AUTH = "Log in with one of the following methods";
const TITLE_POST_AUTH = "Logging in...";
// The cookie used to store the original query parameters when using SSO.
const COOKIE_KEY = "dendrite_login_fallback_qs";
/*
* Submit a login request.
*
* type: The login type as a string (e.g. "m.login.foo").
* data: An object of data specific to the login type.
* extra: (Optional) An object to search for extra information to send with the
* login request, e.g. device_id.
* callback: (Optional) Function to call on successful login.
*/
function submitLogin(type, data, extra, callback) {
console.log("Logging in with " + type);
setTitle(TITLE_POST_AUTH);
// Add the login type.
data.type = type;
// Add the device information, if it was provided.
if (extra.device_id) {
data.device_id = extra.device_id;
}
if (extra.initial_device_display_name) {
data.initial_device_display_name = extra.initial_device_display_name;
}
$.post(matrixLogin.endpoint, JSON.stringify(data), function(response) {
if (callback) {
callback();
}
matrixLogin.onLogin(response);
}).fail(errorFunc);
}
/*
* Display an error to the user and show the login form again.
*/
function errorFunc(err) {
// We want to show the error to the user rather than redirecting immediately to the
// SSO portal (if SSO is the only login option), so we inhibit the redirect.
showLogin(true);
if (err.responseJSON && err.responseJSON.error) {
setFeedbackString(err.responseJSON.error + " (" + err.responseJSON.errcode + ")");
}
else {
setFeedbackString("Request failed: " + err.status);
}
}
/*
* Display an error to the user.
*/
function setFeedbackString(text) {
$("#feedback").text(text);
}
/*
* (Maybe) Show the login forms.
*
* This actually does a few unrelated functions:
*
* * Configures the SSO redirect URL to come back to this page.
* * Configures and shows the SSO form, if the server supports SSO.
* * Otherwise, shows the password form.
*/
function showLogin(inhibitRedirect) {
setTitle(TITLE_PRE_AUTH);
// If inhibitRedirect is false, and SSO is the only supported login method,
// we can redirect straight to the SSO page.
if (matrixLogin.serverAcceptsSso) {
// Set the redirect to come back to this page, a login token will get
// added as a query parameter and handled after the redirect.
$("#sso_redirect_url").val(window.location.origin + window.location.pathname);
// Before submitting SSO, set the current query parameters into a cookie
// for retrieval later.
var qs = parseQsFromUrl();
setCookie(COOKIE_KEY, JSON.stringify(qs));
// If password is not supported and redirects are allowed, then submit
// the form (redirecting to the SSO provider).
if (!inhibitRedirect && !matrixLogin.serverAcceptsPassword) {
$("#sso_form").submit();
return;
}
// Otherwise, show the SSO form
$("#sso_flow").show();
}
if (matrixLogin.serverAcceptsPassword) {
$("#password_flow").show();
}
// If neither password or SSO are supported, show an error to the user.
if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsSso) {
$("#no_login_types").show();
}
$("#loading").hide();
}
/*
* Hides the forms and shows a loading throbber.
*/
function showSpinner() {
$("#password_flow").hide();
$("#sso_flow").hide();
$("#no_login_types").hide();
$("#loading").show();
}
/*
* Helper to show the page's main title.
*/
function setTitle(title) {
$("#title").text(title);
}
/*
* Query the login endpoint for the homeserver's supported flows.
*
* This populates matrixLogin.serverAccepts* variables.
*/
function fetchLoginFlows(cb) {
$.get(matrixLogin.endpoint, function(response) {
for (var i = 0; i < response.flows.length; i++) {
var flow = response.flows[i];
if ("m.login.sso" === flow.type) {
matrixLogin.serverAcceptsSso = true;
console.log("Server accepts SSO");
}
if ("m.login.password" === flow.type) {
matrixLogin.serverAcceptsPassword = true;
console.log("Server accepts password");
}
}
cb();
}).fail(errorFunc);
}
/*
* Called on load to fetch login flows and attempt SSO login (if a token is available).
*/
matrixLogin.onLoad = function() {
fetchLoginFlows(function() {
// (Maybe) attempt logging in via SSO if a token is available.
if (!tryTokenLogin()) {
showLogin(false);
}
});
};
/*
* Submit simple user & password login.
*/
matrixLogin.passwordLogin = function() {
var user = $("#user_id").val();
var pwd = $("#password").val();
setFeedbackString("");
showSpinner();
submitLogin(
"m.login.password",
{user: user, password: pwd},
parseQsFromUrl());
};
/*
* The onLogin function gets called after a successful login.
*
* It is expected that implementations override this to be notified when the
* login is complete. The response to the login call is provided as the single
* parameter.
*/
matrixLogin.onLogin = function(response) {
// clobber this function
console.warn("onLogin - This function should be replaced to proceed.");
};
/*
* Process the query parameters from the current URL into an object.
*/
function parseQsFromUrl() {
var pos = window.location.href.indexOf("?");
if (pos == -1) {
return {};
}
var query = window.location.href.substr(pos + 1);
var result = {};
query.split("&").forEach(function(part) {
var item = part.split("=");
var key = item[0];
var val = item[1];
if (val) {
val = decodeURIComponent(val);
}
result[key] = val;
});
return result;
}
/*
* Process the cookies and return an object.
*/
function parseCookies() {
var allCookies = document.cookie;
var result = {};
allCookies.split(";").forEach(function(part) {
var item = part.split("=");
// Cookies might have arbitrary whitespace between them.
var key = item[0].trim();
// You can end up with a broken cookie that doesn't have an equals sign
// in it. Set to an empty value.
var val = (item[1] || "").trim();
// Values might be URI encoded.
if (val) {
val = decodeURIComponent(val);
}
result[key] = val;
});
return result;
}
/*
* Set a cookie that is valid for 1 hour.
*/
function setCookie(key, value) {
// The maximum age is set in seconds.
var maxAge = 60 * 60;
// Set the cookie, this defaults to the current domain and path.
document.cookie = key + "=" + encodeURIComponent(value) + ";max-age=" + maxAge + ";sameSite=lax";
}
/*
* Removes a cookie by key.
*/
function deleteCookie(key) {
// Delete a cookie by setting the expiration to 0. (Note that the value
// doesn't matter.)
document.cookie = key + "=deleted;expires=0";
}
/*
* Submits the login token if one is found in the query parameters. Returns a
* boolean of whether the login token was found or not.
*/
function tryTokenLogin() {
// Check if the login token is in the query parameters.
var qs = parseQsFromUrl();
var loginToken = qs.loginToken;
if (!loginToken) {
return false;
}
// Retrieve the original query parameters (from before the SSO redirect).
// They are stored as JSON in a cookie.
var cookies = parseCookies();
var originalQueryParams = JSON.parse(cookies[COOKIE_KEY] || "{}")
// If the login is successful, delete the cookie.
function callback() {
deleteCookie(COOKIE_KEY);
}
submitLogin(
"m.login.token",
{token: loginToken},
originalQueryParams,
callback);
return true;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

View file

@ -0,0 +1,79 @@
html {
height: 100%;
}
body {
height: 100%;
font-family: "Myriad Pro", "Myriad", Helvetica, Arial, sans-serif;
font-size: 12pt;
margin: 0px;
}
h1 {
font-size: 20pt;
}
a:link { color: #666; }
a:visited { color: #666; }
a:hover { color: #000; }
a:active { color: #000; }
input {
margin: 5px;
}
textbox, input[type="text"], input[type="password"] {
width: 90%;
}
form {
text-align: center;
margin: 10px 0 0 0;
}
ul.radiobuttons {
text-align: left;
list-style: none;
}
/*
* Add some padding to the viewport.
*/
#container {
padding: 10px;
}
/*
* Center all direct children of the main form.
*/
#container > * {
display: block;
margin-left: auto;
margin-right: auto;
text-align: center;
}
/*
* A wrapper around each login flow.
*/
.login_flow {
width: 300px;
text-align: left;
padding: 10px;
margin-bottom: 40px;
border-radius: 10px;
box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
background-color: #f8f8f8;
border: 1px #ccc solid;
}
/*
* Used to show error content.
*/
#feedback {
/* Red text. */
color: #ff0000;
/* A little space to not overlap the box-shadow. */
margin-bottom: 20px;
}

View file

@ -40,6 +40,9 @@ type AppServiceAPI struct {
// on appservice endpoints. This is not recommended in production! // on appservice endpoints. This is not recommended in production!
DisableTLSValidation bool `yaml:"disable_tls_validation"` DisableTLSValidation bool `yaml:"disable_tls_validation"`
LegacyAuth bool `yaml:"legacy_auth"`
LegacyPaths bool `yaml:"legacy_paths"`
ConfigFiles []string `yaml:"config_files"` ConfigFiles []string `yaml:"config_files"`
} }

View file

@ -18,6 +18,13 @@ type FederationAPI struct {
// The default value is 16 if not specified, which is circa 18 hours. // The default value is 16 if not specified, which is circa 18 hours.
FederationMaxRetries uint32 `yaml:"send_max_retries"` FederationMaxRetries uint32 `yaml:"send_max_retries"`
// P2P Feature: Whether relaying to specific nodes should be enabled.
// Defaults to false.
// Note: Enabling relays introduces a huge startup delay, if you are not using
// relays and have many servers to re-hydrate on start. Only enable this
// if you are using relays!
EnableRelays bool `yaml:"enable_relays"`
// P2P Feature: How many consecutive failures that we should tolerate when // P2P Feature: How many consecutive failures that we should tolerate when
// sending federation requests to a specific server until we should assume they // sending federation requests to a specific server until we should assume they
// are offline. If we assume they are offline then we will attempt to send // are offline. If we assume they are offline then we will attempt to send

View file

@ -110,6 +110,7 @@ func Context(
} }
stateFilter := synctypes.StateFilter{ stateFilter := synctypes.StateFilter{
Limit: filter.Limit,
NotSenders: filter.NotSenders, NotSenders: filter.NotSenders,
NotTypes: filter.NotTypes, NotTypes: filter.NotTypes,
Senders: filter.Senders, Senders: filter.Senders,
@ -157,6 +158,11 @@ func Context(
} }
} }
// Limit is split up for before/after events
if filter.Limit > 1 {
filter.Limit = filter.Limit / 2
}
eventsBefore, err := snapshot.SelectContextBeforeEvent(ctx, id, roomID, filter) eventsBefore, err := snapshot.SelectContextBeforeEvent(ctx, id, roomID, filter)
if err != nil && err != sql.ErrNoRows { if err != nil && err != sql.ErrNoRows {
logrus.WithError(err).Error("unable to fetch before events") logrus.WithError(err).Error("unable to fetch before events")

View file

@ -15,7 +15,6 @@
package routing package routing
import ( import (
"encoding/json"
"math" "math"
"net/http" "net/http"
@ -33,31 +32,13 @@ type getMembershipResponse struct {
Chunk []synctypes.ClientEvent `json:"chunk"` Chunk []synctypes.ClientEvent `json:"chunk"`
} }
// https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-rooms-roomid-joined-members
type getJoinedMembersResponse struct {
Joined map[string]joinedMember `json:"joined"`
}
type joinedMember struct {
DisplayName string `json:"display_name"`
AvatarURL string `json:"avatar_url"`
}
// The database stores 'displayname' without an underscore.
// Deserialize into this and then change to the actual API response
type databaseJoinedMember struct {
DisplayName string `json:"displayname"`
AvatarURL string `json:"avatar_url"`
}
// GetMemberships implements // GetMemberships implements
// //
// GET /rooms/{roomId}/members // GET /rooms/{roomId}/members
// GET /rooms/{roomId}/joined_members
func GetMemberships( func GetMemberships(
req *http.Request, device *userapi.Device, roomID string, req *http.Request, device *userapi.Device, roomID string,
syncDB storage.Database, rsAPI api.SyncRoomserverAPI, syncDB storage.Database, rsAPI api.SyncRoomserverAPI,
joinedOnly bool, membership, notMembership *string, at string, membership, notMembership *string, at string,
) util.JSONResponse { ) util.JSONResponse {
userID, err := spec.NewUserID(device.UserID, true) userID, err := spec.NewUserID(device.UserID, true)
if err != nil { if err != nil {
@ -87,13 +68,6 @@ func GetMemberships(
} }
} }
if joinedOnly && !queryRes.IsInRoom {
return util.JSONResponse{
Code: http.StatusForbidden,
JSON: spec.Forbidden("You aren't a member of the room and weren't previously a member of the room."),
}
}
db, err := syncDB.NewDatabaseSnapshot(req.Context()) db, err := syncDB.NewDatabaseSnapshot(req.Context())
if err != nil { if err != nil {
return util.JSONResponse{ return util.JSONResponse{
@ -139,40 +113,6 @@ func GetMemberships(
result := qryRes.Events result := qryRes.Events
if joinedOnly {
var res getJoinedMembersResponse
res.Joined = make(map[string]joinedMember)
for _, ev := range result {
var content databaseJoinedMember
if err := json.Unmarshal(ev.Content(), &content); err != nil {
util.GetLogger(req.Context()).WithError(err).Error("failed to unmarshal event content")
return util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
}
}
userID, err := rsAPI.QueryUserIDForSender(req.Context(), ev.RoomID(), ev.SenderID())
if err != nil || userID == nil {
util.GetLogger(req.Context()).WithError(err).Error("rsAPI.QueryUserIDForSender failed")
return util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
}
}
if err != nil {
return util.JSONResponse{
Code: http.StatusForbidden,
JSON: spec.Forbidden("You don't have permission to kick this user, unknown senderID"),
}
}
res.Joined[userID.String()] = joinedMember(content)
}
return util.JSONResponse{
Code: http.StatusOK,
JSON: res,
}
}
return util.JSONResponse{ return util.JSONResponse{
Code: http.StatusOK, Code: http.StatusOK,
JSON: getMembershipResponse{synctypes.ToClientEvents(gomatrixserverlib.ToPDUs(result), synctypes.FormatAll, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) { JSON: getMembershipResponse{synctypes.ToClientEvents(gomatrixserverlib.ToPDUs(result), synctypes.FormatAll, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {

View file

@ -135,13 +135,6 @@ func OnIncomingMessagesRequest(
var fromStream *types.StreamingToken var fromStream *types.StreamingToken
fromQuery := req.URL.Query().Get("from") fromQuery := req.URL.Query().Get("from")
toQuery := req.URL.Query().Get("to") toQuery := req.URL.Query().Get("to")
emptyFromSupplied := fromQuery == ""
if emptyFromSupplied {
// NOTSPEC: We will pretend they used the latest sync token if no ?from= was provided.
// We do this to allow clients to get messages without having to call `/sync` e.g Cerulean
currPos := srp.Notifier.CurrentPosition()
fromQuery = currPos.String()
}
// Direction to return events from. // Direction to return events from.
dir := req.URL.Query().Get("dir") dir := req.URL.Query().Get("dir")
@ -155,6 +148,23 @@ func OnIncomingMessagesRequest(
// to have one of the two accepted values (so dir == "f" <=> !backwardOrdering). // to have one of the two accepted values (so dir == "f" <=> !backwardOrdering).
backwardOrdering := (dir == "b") backwardOrdering := (dir == "b")
emptyFromSupplied := fromQuery == ""
if emptyFromSupplied {
// If "from" isn't provided, it defaults to either the earliest stream
// position (if we're going forward) or to the latest one (if we're
// going backward).
var from types.TopologyToken
if backwardOrdering {
from = types.TopologyToken{Depth: math.MaxInt64, PDUPosition: math.MaxInt64}
} else {
// go 1 earlier than the first event so we correctly fetch the earliest event
// this is because Database.GetEventsInTopologicalRange is exclusive of the lower-bound.
from = types.TopologyToken{}
}
fromQuery = from.String()
}
from, err := types.NewTopologyTokenFromString(fromQuery) from, err := types.NewTopologyTokenFromString(fromQuery)
if err != nil { if err != nil {
var streamToken types.StreamingToken var streamToken types.StreamingToken

View file

@ -197,19 +197,7 @@ func Setup(
} }
at := req.URL.Query().Get("at") at := req.URL.Query().Get("at")
return GetMemberships(req, device, vars["roomID"], syncDB, rsAPI, false, membership, notMembership, at) return GetMemberships(req, device, vars["roomID"], syncDB, rsAPI, membership, notMembership, at)
}, httputil.WithAllowGuests()), }, httputil.WithAllowGuests()),
).Methods(http.MethodGet, http.MethodOptions) ).Methods(http.MethodGet, http.MethodOptions)
v3mux.Handle("/rooms/{roomID}/joined_members",
httputil.MakeAuthAPI("rooms_members", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
vars, err := httputil.URLDecodeMapValues(mux.Vars(req))
if err != nil {
return util.ErrorResponse(err)
}
at := req.URL.Query().Get("at")
membership := spec.Join
return GetMemberships(req, device, vars["roomID"], syncDB, rsAPI, true, &membership, nil, at)
}),
).Methods(http.MethodGet, http.MethodOptions)
} }

View file

@ -203,6 +203,12 @@ func (p *PDUStreamProvider) IncrementalSync(
req.Log.WithError(err).Error("unable to update event filter with ignored users") req.Log.WithError(err).Error("unable to update event filter with ignored users")
} }
dbEvents, err := p.getRecentEvents(ctx, stateDeltas, r, eventFilter, snapshot)
if err != nil {
req.Log.WithError(err).Error("unable to get recent events")
return r.From
}
newPos = from newPos = from
for _, delta := range stateDeltas { for _, delta := range stateDeltas {
newRange := r newRange := r
@ -218,7 +224,7 @@ func (p *PDUStreamProvider) IncrementalSync(
} }
} }
var pos types.StreamPosition var pos types.StreamPosition
if pos, err = p.addRoomDeltaToResponse(ctx, snapshot, req.Device, newRange, delta, &eventFilter, &stateFilter, req); err != nil { if pos, err = p.addRoomDeltaToResponse(ctx, snapshot, req.Device, newRange, delta, &eventFilter, &stateFilter, req, dbEvents); err != nil {
req.Log.WithError(err).Error("d.addRoomDeltaToResponse failed") req.Log.WithError(err).Error("d.addRoomDeltaToResponse failed")
if err == context.DeadlineExceeded || err == context.Canceled || err == sql.ErrTxDone { if err == context.DeadlineExceeded || err == context.Canceled || err == sql.ErrTxDone {
return newPos return newPos
@ -240,6 +246,66 @@ func (p *PDUStreamProvider) IncrementalSync(
return newPos return newPos
} }
func (p *PDUStreamProvider) getRecentEvents(ctx context.Context, stateDeltas []types.StateDelta, r types.Range, eventFilter synctypes.RoomEventFilter, snapshot storage.DatabaseTransaction) (map[string]types.RecentEvents, error) {
var roomIDs []string
var newlyJoinedRoomIDs []string
for _, delta := range stateDeltas {
if delta.NewlyJoined {
newlyJoinedRoomIDs = append(newlyJoinedRoomIDs, delta.RoomID)
} else {
roomIDs = append(roomIDs, delta.RoomID)
}
}
dbEvents := make(map[string]types.RecentEvents)
if len(roomIDs) > 0 {
events, err := snapshot.RecentEvents(
ctx, roomIDs, r,
&eventFilter, true, true,
)
if err != nil {
if err != sql.ErrNoRows {
return nil, err
}
}
for k, v := range events {
dbEvents[k] = v
}
}
if len(newlyJoinedRoomIDs) > 0 {
// For rooms that were joined in this sync, try to fetch
// as much timeline events as allowed by the filter.
filter := eventFilter
// If we're going backwards, grep at least X events, this is mostly to satisfy Sytest
if eventFilter.Limit < recentEventBackwardsLimit {
filter.Limit = recentEventBackwardsLimit // TODO: Figure out a better way
diff := r.From - r.To
if diff > 0 && diff < recentEventBackwardsLimit {
filter.Limit = int(diff)
}
}
events, err := snapshot.RecentEvents(
ctx, newlyJoinedRoomIDs, types.Range{
From: r.To,
To: 0,
Backwards: true,
},
&filter, true, true,
)
if err != nil {
if err != sql.ErrNoRows {
return nil, err
}
}
for k, v := range events {
dbEvents[k] = v
}
}
return dbEvents, nil
}
// Limit the recent events to X when going backwards // Limit the recent events to X when going backwards
const recentEventBackwardsLimit = 100 const recentEventBackwardsLimit = 100
@ -253,29 +319,9 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
eventFilter *synctypes.RoomEventFilter, eventFilter *synctypes.RoomEventFilter,
stateFilter *synctypes.StateFilter, stateFilter *synctypes.StateFilter,
req *types.SyncRequest, req *types.SyncRequest,
dbEvents map[string]types.RecentEvents,
) (types.StreamPosition, error) { ) (types.StreamPosition, error) {
var err error var err error
originalLimit := eventFilter.Limit
// If we're going backwards, grep at least X events, this is mostly to satisfy Sytest
if r.Backwards && originalLimit < recentEventBackwardsLimit {
eventFilter.Limit = recentEventBackwardsLimit // TODO: Figure out a better way
diff := r.From - r.To
if diff > 0 && diff < recentEventBackwardsLimit {
eventFilter.Limit = int(diff)
}
}
dbEvents, err := snapshot.RecentEvents(
ctx, []string{delta.RoomID}, r,
eventFilter, true, true,
)
if err != nil {
if err == sql.ErrNoRows {
return r.To, nil
}
return r.From, fmt.Errorf("p.DB.RecentEvents: %w", err)
}
recentStreamEvents := dbEvents[delta.RoomID].Events recentStreamEvents := dbEvents[delta.RoomID].Events
limited := dbEvents[delta.RoomID].Limited limited := dbEvents[delta.RoomID].Limited
@ -337,9 +383,9 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
logrus.WithError(err).Error("unable to apply history visibility filter") logrus.WithError(err).Error("unable to apply history visibility filter")
} }
if r.Backwards && len(events) > originalLimit { if r.Backwards && len(events) > eventFilter.Limit {
// We're going backwards and the events are ordered chronologically, so take the last `limit` events // We're going backwards and the events are ordered chronologically, so take the last `limit` events
events = events[len(events)-originalLimit:] events = events[len(events)-eventFilter.Limit:]
limited = true limited = true
} }

View file

@ -753,24 +753,6 @@ func TestGetMembership(t *testing.T) {
}, },
wantOK: false, wantOK: false,
}, },
{
name: "/joined_members - Bob never joined",
request: func(t *testing.T, room *test.Room) *http.Request {
return test.NewRequest(t, "GET", fmt.Sprintf("/_matrix/client/v3/rooms/%s/joined_members", room.ID), test.WithQueryParams(map[string]string{
"access_token": bobDev.AccessToken,
}))
},
wantOK: false,
},
{
name: "/joined_members - Alice joined",
request: func(t *testing.T, room *test.Room) *http.Request {
return test.NewRequest(t, "GET", fmt.Sprintf("/_matrix/client/v3/rooms/%s/joined_members", room.ID), test.WithQueryParams(map[string]string{
"access_token": aliceDev.AccessToken,
}))
},
wantOK: true,
},
{ {
name: "Alice leaves before Bob joins, should not be able to see Bob", name: "Alice leaves before Bob joins, should not be able to see Bob",
request: func(t *testing.T, room *test.Room) *http.Request { request: func(t *testing.T, room *test.Room) *http.Request {
@ -809,21 +791,6 @@ func TestGetMembership(t *testing.T) {
wantOK: true, wantOK: true,
wantMemberCount: 2, wantMemberCount: 2,
}, },
{
name: "/joined_members - Alice leaves, shouldn't be able to see members ",
request: func(t *testing.T, room *test.Room) *http.Request {
return test.NewRequest(t, "GET", fmt.Sprintf("/_matrix/client/v3/rooms/%s/joined_members", room.ID), test.WithQueryParams(map[string]string{
"access_token": aliceDev.AccessToken,
}))
},
additionalEvents: func(t *testing.T, room *test.Room) {
room.CreateAndInsert(t, alice, spec.MRoomMember, map[string]interface{}{
"membership": "leave",
}, test.WithStateKey(alice.ID))
},
useSleep: true,
wantOK: false,
},
{ {
name: "'at' specified, returns memberships before Bob joins", name: "'at' specified, returns memberships before Bob joins",
request: func(t *testing.T, room *test.Room) *http.Request { request: func(t *testing.T, room *test.Room) *http.Request {
@ -1169,7 +1136,7 @@ func testContext(t *testing.T, dbType test.DBType) {
}, },
{ {
name: "events are not limited", name: "events are not limited",
wantBeforeLength: 7, wantBeforeLength: 5,
}, },
{ {
name: "all events are limited", name: "all events are limited",

View file

@ -286,8 +286,8 @@ func NewTopologyTokenFromString(tok string) (token TopologyToken, err error) {
if i > len(positions) { if i > len(positions) {
break break
} }
var pos int var pos int64
pos, err = strconv.Atoi(p) pos, err = strconv.ParseInt(p, 10, 64)
if err != nil { if err != nil {
return return
} }
@ -318,8 +318,8 @@ func NewStreamTokenFromString(tok string) (token StreamingToken, err error) {
if i >= len(positions) { if i >= len(positions) {
break break
} }
var pos int var pos int64
pos, err = strconv.Atoi(p) pos, err = strconv.ParseInt(p, 10, 64)
if err != nil { if err != nil {
err = ErrMalformedSyncToken err = ErrMalformedSyncToken
return return

View file

@ -3,6 +3,7 @@ package types
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"math"
"reflect" "reflect"
"testing" "testing"
@ -33,12 +34,28 @@ func TestSyncTokens(t *testing.T) {
"s3_1_0_0_0_0_2_0_5": StreamingToken{3, 1, 0, 0, 0, 0, 2, 0, 5}.String(), "s3_1_0_0_0_0_2_0_5": StreamingToken{3, 1, 0, 0, 0, 0, 2, 0, 5}.String(),
"s3_1_2_3_5_0_0_0_6": StreamingToken{3, 1, 2, 3, 5, 0, 0, 0, 6}.String(), "s3_1_2_3_5_0_0_0_6": StreamingToken{3, 1, 2, 3, 5, 0, 0, 0, 6}.String(),
"t3_1": TopologyToken{3, 1}.String(), "t3_1": TopologyToken{3, 1}.String(),
"t9223372036854775807_9223372036854775807": TopologyToken{Depth: math.MaxInt64, PDUPosition: math.MaxInt64}.String(),
"s9223372036854775807_1_2_3_5_0_0_0_6": StreamingToken{math.MaxInt64, 1, 2, 3, 5, 0, 0, 0, 6}.String(),
} }
for a, b := range shouldPass { for a, b := range shouldPass {
if a != b { if a != b {
t.Errorf("expected %q, got %q", a, b) t.Errorf("expected %q, got %q", a, b)
} }
// parse as topology token
if a[0] == 't' {
if _, err := NewTopologyTokenFromString(a); err != nil {
t.Errorf("expected %q to pass, but got %q", a, err)
}
}
// parse as sync token
if a[0] == 's' {
if _, err := NewStreamTokenFromString(a); err != nil {
t.Errorf("expected %q to pass, but got %q", a, err)
}
}
} }
shouldFail := []string{ shouldFail := []string{

View file

@ -379,6 +379,10 @@ type PerformDeviceCreationRequest struct {
// update for this account. Generally the only reason to do this is if the account // update for this account. Generally the only reason to do this is if the account
// is an appservice account. // is an appservice account.
NoDeviceListUpdate bool NoDeviceListUpdate bool
// FromRegistration determines if this request comes from registering a new account
// and is in most cases false.
FromRegistration bool
} }
// PerformDeviceCreationResponse is the response for PerformDeviceCreation // PerformDeviceCreationResponse is the response for PerformDeviceCreation
@ -803,6 +807,10 @@ type PerformUploadKeysRequest struct {
// itself doesn't change but it's easier to pretend upload new keys and reuse the same code paths. // itself doesn't change but it's easier to pretend upload new keys and reuse the same code paths.
// Without this flag, requests to modify device display names would delete device keys. // Without this flag, requests to modify device display names would delete device keys.
OnlyDisplayNameUpdates bool OnlyDisplayNameUpdates bool
// FromRegistration is set if this key upload comes right after creating an account
// and determines if we need to inform downstream components.
FromRegistration bool
} }
// PerformUploadKeysResponse is the response to PerformUploadKeys // PerformUploadKeysResponse is the response to PerformUploadKeys

View file

@ -711,9 +711,15 @@ func (a *UserInternalAPI) uploadLocalDeviceKeys(ctx context.Context, req *api.Pe
} }
return return
} }
err = emitDeviceKeyChanges(a.KeyChangeProducer, existingKeys, keysToStore, req.OnlyDisplayNameUpdates)
if err != nil { // If the request does _not_ come right after registering an account
util.GetLogger(ctx).Errorf("Failed to emitDeviceKeyChanges: %s", err) // inform downstream components. However, we're fine with just creating the
// database entries above in other cases.
if !req.FromRegistration {
err = emitDeviceKeyChanges(a.KeyChangeProducer, existingKeys, keysToStore, req.OnlyDisplayNameUpdates)
if err != nil {
util.GetLogger(ctx).Errorf("Failed to emitDeviceKeyChanges: %s", err)
}
} }
} }

View file

@ -316,7 +316,7 @@ func (a *UserInternalAPI) PerformDeviceCreation(ctx context.Context, req *api.Pe
return nil return nil
} }
// create empty device keys and upload them to trigger device list changes // create empty device keys and upload them to trigger device list changes
return a.deviceListUpdate(dev.UserID, []string{dev.ID}) return a.deviceListUpdate(dev.UserID, []string{dev.ID}, req.FromRegistration)
} }
func (a *UserInternalAPI) PerformDeviceDeletion(ctx context.Context, req *api.PerformDeviceDeletionRequest, res *api.PerformDeviceDeletionResponse) error { func (a *UserInternalAPI) PerformDeviceDeletion(ctx context.Context, req *api.PerformDeviceDeletionRequest, res *api.PerformDeviceDeletionResponse) error {
@ -356,10 +356,10 @@ func (a *UserInternalAPI) PerformDeviceDeletion(ctx context.Context, req *api.Pe
return fmt.Errorf("a.KeyAPI.PerformDeleteKeys: %w", err) return fmt.Errorf("a.KeyAPI.PerformDeleteKeys: %w", err)
} }
// create empty device keys and upload them to delete what was once there and trigger device list changes // create empty device keys and upload them to delete what was once there and trigger device list changes
return a.deviceListUpdate(req.UserID, deletedDeviceIDs) return a.deviceListUpdate(req.UserID, deletedDeviceIDs, false)
} }
func (a *UserInternalAPI) deviceListUpdate(userID string, deviceIDs []string) error { func (a *UserInternalAPI) deviceListUpdate(userID string, deviceIDs []string, fromRegistration bool) error {
deviceKeys := make([]api.DeviceKeys, len(deviceIDs)) deviceKeys := make([]api.DeviceKeys, len(deviceIDs))
for i, did := range deviceIDs { for i, did := range deviceIDs {
deviceKeys[i] = api.DeviceKeys{ deviceKeys[i] = api.DeviceKeys{
@ -371,8 +371,9 @@ func (a *UserInternalAPI) deviceListUpdate(userID string, deviceIDs []string) er
var uploadRes api.PerformUploadKeysResponse var uploadRes api.PerformUploadKeysResponse
if err := a.PerformUploadKeys(context.Background(), &api.PerformUploadKeysRequest{ if err := a.PerformUploadKeys(context.Background(), &api.PerformUploadKeysRequest{
UserID: userID, UserID: userID,
DeviceKeys: deviceKeys, DeviceKeys: deviceKeys,
FromRegistration: fromRegistration,
}, &uploadRes); err != nil { }, &uploadRes); err != nil {
return err return err
} }