Merge branch 'main' of github.com:matrix-org/dendrite into s7evink/consent-tracking
This commit is contained in:
commit
019f0922ea
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -2,6 +2,7 @@
|
|||
|
||||
<!-- Please read docs/CONTRIBUTING.md before submitting your pull request -->
|
||||
|
||||
* [ ] I have added added tests for PR _or_ I have justified why this PR doesn't need tests.
|
||||
* [ ] Pull request includes a [sign off](https://github.com/matrix-org/dendrite/blob/main/docs/CONTRIBUTING.md#sign-off)
|
||||
|
||||
Signed-off-by: `Your Name <your@email.example.org>`
|
||||
|
|
34
.github/workflows/codeql-analysis.yml
vendored
34
.github/workflows/codeql-analysis.yml
vendored
|
@ -1,34 +0,0 @@
|
|||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: ["go"]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
346
.github/workflows/dendrite.yml
vendored
Normal file
346
.github/workflows/dendrite.yml
vendored
Normal file
|
@ -0,0 +1,346 @@
|
|||
name: Dendrite
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
wasm:
|
||||
name: WASM build test
|
||||
timeout-minutes: 5
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-wasm-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-wasm
|
||||
|
||||
- name: Install Node
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 14
|
||||
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Reconfigure Git to use HTTPS auth for repo packages
|
||||
run: >
|
||||
git config --global url."https://github.com/".insteadOf
|
||||
ssh://git@github.com/
|
||||
|
||||
- name: Install test dependencies
|
||||
working-directory: ./test/wasm
|
||||
run: npm ci
|
||||
|
||||
- name: Test
|
||||
run: ./test-dendritejs.sh
|
||||
|
||||
# Run golangci-lint
|
||||
lint:
|
||||
timeout-minutes: 5
|
||||
name: Linting
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
|
||||
# run go test with different go versions
|
||||
test:
|
||||
timeout-minutes: 5
|
||||
name: Unit tests (Go ${{ matrix.go }})
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
go: ["1.16", "1.17", "1.18"]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go${{ matrix.go }}-test-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go${{ matrix.go }}-test-
|
||||
- run: go test ./...
|
||||
|
||||
# build Dendrite for linux with different architectures and go versions
|
||||
build:
|
||||
name: Build for Linux
|
||||
timeout-minutes: 10
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
go: ["1.16", "1.17", "1.18"]
|
||||
goos: ["linux"]
|
||||
goarch: ["amd64", "386"]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
- name: Install dependencies x86
|
||||
if: ${{ matrix.goarch == '386' }}
|
||||
run: sudo apt update && sudo apt-get install -y gcc-multilib
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go${{ matrix.go }}-${{ matrix.goarch }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go${{ matrix.go }}-${{ matrix.goarch }}-
|
||||
- env:
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
CGO_ENABLED: 1
|
||||
run: go build -trimpath -v -o "bin/" ./cmd/...
|
||||
|
||||
# build for Windows 64-bit
|
||||
build_windows:
|
||||
name: Build for Windows
|
||||
timeout-minutes: 10
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go: ["1.16", "1.17", "1.18"]
|
||||
goos: ["windows"]
|
||||
goarch: ["amd64"]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Setup Go ${{ matrix.go }}
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
- name: Install dependencies
|
||||
run: sudo apt update && sudo apt install -y gcc-mingw-w64-x86-64 # install required gcc
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go${{ matrix.go }}-${{ matrix.goos }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go${{ matrix.go }}-${{ matrix.goos }}
|
||||
- env:
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
CGO_ENABLED: 1
|
||||
CC: "/usr/bin/x86_64-w64-mingw32-gcc"
|
||||
run: go build -trimpath -v -o "bin/" ./cmd/...
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
initial-tests-done:
|
||||
name: Initial tests passed
|
||||
needs: [lint, test, build, build_windows]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
steps:
|
||||
- name: Check initial tests passed
|
||||
uses: re-actors/alls-green@release/v1
|
||||
with:
|
||||
jobs: ${{ toJSON(needs) }}
|
||||
|
||||
# run database upgrade tests
|
||||
upgrade_test:
|
||||
name: Upgrade tests
|
||||
timeout-minutes: 20
|
||||
needs: initial-tests-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Setup go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-upgrade-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-upgrade
|
||||
- name: Build upgrade-tests
|
||||
run: go build ./cmd/dendrite-upgrade-tests
|
||||
- name: Test upgrade
|
||||
run: ./dendrite-upgrade-tests --head .
|
||||
|
||||
# run Sytest in different variations
|
||||
sytest:
|
||||
timeout-minutes: 20
|
||||
needs: initial-tests-done
|
||||
name: "Sytest (${{ matrix.label }})"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- label: SQLite
|
||||
|
||||
- label: SQLite, full HTTP APIs
|
||||
api: full-http
|
||||
|
||||
- label: PostgreSQL
|
||||
postgres: postgres
|
||||
|
||||
- label: PostgreSQL, full HTTP APIs
|
||||
postgres: postgres
|
||||
api: full-http
|
||||
container:
|
||||
image: matrixdotorg/sytest-dendrite:latest
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
POSTGRES: ${{ matrix.postgres && 1}}
|
||||
API: ${{ matrix.api && 1 }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Run Sytest
|
||||
run: /bootstrap.sh dendrite
|
||||
working-directory: /src
|
||||
- name: Summarise results.tap
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
|
||||
- name: Upload Sytest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (Dendrite, ${{ join(matrix.*, ', ') }})
|
||||
path: |
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
|
||||
# run Complement
|
||||
complement:
|
||||
name: "Complement (${{ matrix.label }})"
|
||||
timeout-minutes: 20
|
||||
needs: initial-tests-done
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- label: SQLite
|
||||
|
||||
- label: SQLite, full HTTP APIs
|
||||
api: full-http
|
||||
|
||||
- label: PostgreSQL
|
||||
postgres: Postgres
|
||||
|
||||
- label: PostgreSQL, full HTTP APIs
|
||||
postgres: Postgres
|
||||
api: full-http
|
||||
steps:
|
||||
# Env vars are set file a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on env to run Complement.
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
|
||||
- name: "Set Go Version"
|
||||
run: |
|
||||
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
|
||||
echo "~/go/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: "Install Complement Dependencies"
|
||||
# We don't need to install Go because it is included on the Ubuntu 20.04 image:
|
||||
# See https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md specifically GOROOT_1_17_X64
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
|
||||
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
|
||||
- name: Run actions/checkout@v2 for dendrite
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: dendrite
|
||||
|
||||
# Attempt to check out the same branch of Complement as the PR. If it
|
||||
# doesn't exist, fallback to main.
|
||||
- name: Checkout complement
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p complement
|
||||
# Attempt to use the version of complement which best matches the current
|
||||
# build. Depending on whether this is a PR or release, etc. we need to
|
||||
# use different fallbacks.
|
||||
#
|
||||
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
|
||||
# for pull requests, otherwise GITHUB_REF).
|
||||
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
|
||||
# (GITHUB_BASE_REF for pull requests).
|
||||
# 3. Use the default complement branch ("master").
|
||||
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "master"; do
|
||||
# Skip empty branch names and merge commits.
|
||||
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
|
||||
done
|
||||
|
||||
# Build initial Dendrite image
|
||||
- run: docker build -t complement-dendrite -f build/scripts/Complement${{ matrix.postgres }}.Dockerfile .
|
||||
working-directory: dendrite
|
||||
|
||||
# Run Complement
|
||||
- run: |
|
||||
set -o pipefail &&
|
||||
go test -v -json -tags dendrite_blacklist ./tests/... 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
env:
|
||||
COMPLEMENT_BASE_IMAGE: complement-dendrite:latest
|
||||
API: ${{ matrix.api && 1 }}
|
||||
working-directory: complement
|
||||
|
||||
integration-tests-done:
|
||||
name: Integration tests passed
|
||||
needs: [initial-tests-done, upgrade_test, sytest, complement]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
steps:
|
||||
- name: Check integration tests passed
|
||||
uses: re-actors/alls-green@release/v1
|
||||
with:
|
||||
jobs: ${{ toJSON(needs) }}
|
||||
|
||||
update-docker-images:
|
||||
name: Update Docker images
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
if: github.repository == 'matrix-org/dendrite' && github.ref_name == 'main'
|
||||
needs: [integration-tests-done]
|
||||
uses: matrix-org/dendrite/.github/workflows/docker.yml@main
|
||||
secrets:
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
71
.github/workflows/docker-hub.yml
vendored
71
.github/workflows/docker-hub.yml
vendored
|
@ -1,71 +0,0 @@
|
|||
# Based on https://github.com/docker/build-push-action
|
||||
|
||||
name: "Docker Hub"
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
env:
|
||||
DOCKER_NAMESPACE: matrixdotorg
|
||||
DOCKER_HUB_USER: dendritegithub
|
||||
PLATFORMS: linux/amd64,linux/arm64,linux/arm/v7
|
||||
|
||||
jobs:
|
||||
Monolith:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Get release tag
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ env.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: Build monolith image
|
||||
id: docker_build_monolith
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/docker/Dockerfile.monolith
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-monolith:latest
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-monolith:${{ env.RELEASE_VERSION }}
|
||||
|
||||
Polylith:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Get release tag
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ env.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: Build polylith image
|
||||
id: docker_build_polylith
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/docker/Dockerfile.polylith
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-polylith:latest
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-polylith:${{ env.RELEASE_VERSION }}
|
139
.github/workflows/docker.yml
vendored
Normal file
139
.github/workflows/docker.yml
vendored
Normal file
|
@ -0,0 +1,139 @@
|
|||
# Based on https://github.com/docker/build-push-action
|
||||
|
||||
name: "Docker"
|
||||
|
||||
on:
|
||||
release: # A GitHub release was published
|
||||
types: [published]
|
||||
workflow_dispatch: # A build was manually requested
|
||||
workflow_call: # Another pipeline called us
|
||||
secrets:
|
||||
DOCKER_TOKEN:
|
||||
required: true
|
||||
|
||||
env:
|
||||
DOCKER_NAMESPACE: matrixdotorg
|
||||
DOCKER_HUB_USER: dendritegithub
|
||||
GHCR_NAMESPACE: matrix-org
|
||||
PLATFORMS: linux/amd64,linux/arm64,linux/arm/v7
|
||||
|
||||
jobs:
|
||||
monolith:
|
||||
name: Monolith image
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Get release tag
|
||||
if: github.event_name == 'release' # Only for GitHub releases
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ env.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
- name: Login to GitHub Containers
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build main monolith image
|
||||
if: github.ref_name == 'main'
|
||||
id: docker_build_monolith
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
context: .
|
||||
file: ./build/docker/Dockerfile.monolith
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-monolith:${{ github.ref_name }}
|
||||
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:${{ github.ref_name }}
|
||||
|
||||
- name: Build release monolith image
|
||||
if: github.event_name == 'release' # Only for GitHub releases
|
||||
id: docker_build_monolith_release
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
context: .
|
||||
file: ./build/docker/Dockerfile.monolith
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-monolith:latest
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-monolith:${{ env.RELEASE_VERSION }}
|
||||
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:latest
|
||||
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:${{ env.RELEASE_VERSION }}
|
||||
|
||||
polylith:
|
||||
name: Polylith image
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Get release tag
|
||||
if: github.event_name == 'release' # Only for GitHub releases
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ env.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
- name: Login to GitHub Containers
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build main polylith image
|
||||
if: github.ref_name == 'main'
|
||||
id: docker_build_polylith
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
context: .
|
||||
file: ./build/docker/Dockerfile.polylith
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-polylith:${{ github.ref_name }}
|
||||
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-polylith:${{ github.ref_name }}
|
||||
|
||||
- name: Build release polylith image
|
||||
if: github.event_name == 'release' # Only for GitHub releases
|
||||
id: docker_build_polylith_release
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
context: .
|
||||
file: ./build/docker/Dockerfile.polylith
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-polylith:latest
|
||||
${{ env.DOCKER_NAMESPACE }}/dendrite-polylith:${{ env.RELEASE_VERSION }}
|
||||
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-polylith:latest
|
||||
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-polylith:${{ env.RELEASE_VERSION }}
|
71
.github/workflows/tests.yml
vendored
71
.github/workflows/tests.yml
vendored
|
@ -1,71 +0,0 @@
|
|||
name: Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["main"]
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
complement:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Env vars are set file a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on env to run Complement.
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
|
||||
- name: "Set Go Version"
|
||||
run: |
|
||||
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
|
||||
echo "~/go/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: "Install Complement Dependencies"
|
||||
# We don't need to install Go because it is included on the Ubuntu 20.04 image:
|
||||
# See https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md specifically GOROOT_1_17_X64
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
|
||||
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
|
||||
- name: Run actions/checkout@v2 for dendrite
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: dendrite
|
||||
|
||||
# Attempt to check out the same branch of Complement as the PR. If it
|
||||
# doesn't exist, fallback to main.
|
||||
- name: Checkout complement
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p complement
|
||||
# Attempt to use the version of complement which best matches the current
|
||||
# build. Depending on whether this is a PR or release, etc. we need to
|
||||
# use different fallbacks.
|
||||
#
|
||||
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
|
||||
# for pull requests, otherwise GITHUB_REF).
|
||||
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
|
||||
# (GITHUB_BASE_REF for pull requests).
|
||||
# 3. Use the default complement branch ("master").
|
||||
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "master"; do
|
||||
# Skip empty branch names and merge commits.
|
||||
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
|
||||
done
|
||||
|
||||
# Build initial Dendrite image
|
||||
- run: docker build -t complement-dendrite -f build/scripts/Complement.Dockerfile .
|
||||
working-directory: dendrite
|
||||
|
||||
# Run Complement
|
||||
- run: |
|
||||
set -o pipefail &&
|
||||
go test -v -json -tags dendrite_blacklist ./tests/... 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
env:
|
||||
COMPLEMENT_BASE_IMAGE: complement-dendrite:latest
|
||||
working-directory: complement
|
49
.github/workflows/wasm.yml
vendored
49
.github/workflows/wasm.yml
vendored
|
@ -1,49 +0,0 @@
|
|||
name: WebAssembly
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Install Node
|
||||
uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 14
|
||||
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Reconfigure Git to use HTTPS auth for repo packages
|
||||
run: >
|
||||
git config --global url."https://github.com/".insteadOf
|
||||
ssh://git@github.com/
|
||||
|
||||
- name: Install test dependencies
|
||||
working-directory: ./test/wasm
|
||||
run: npm ci
|
||||
|
||||
- name: Test
|
||||
run: ./test-dendritejs.sh
|
38
CHANGES.md
38
CHANGES.md
|
@ -1,5 +1,43 @@
|
|||
# Changelog
|
||||
|
||||
## Dendrite 0.7.0 (2022-03-25)
|
||||
|
||||
### Features
|
||||
|
||||
* The roomserver input API will now queue all events into NATS, which provides better crash resilience
|
||||
* The roomserver input API now configures per-room consumers, which should use less memory
|
||||
* Canonical aliases can now be added and removed
|
||||
* MSC2946 Spaces Summary now works correctly, both locally and over federation
|
||||
* Healthcheck endpoints are now available at:
|
||||
* `/_dendrite/monitor/up`, which will return 200 when Dendrite is ready to accept requests
|
||||
* `/_dendrite/monitor/health`, which will return 200 if healthy and 503 if degraded for some reason
|
||||
* The `X-Matrix` federation authorisation header now includes a `destination` field, as per MSC3383
|
||||
* The `/sync` endpoint now uses less memory by only ranging state for rooms that the user has participated in
|
||||
* The `/messages` endpoint now accepts stream positions in both the `from` and `to` parameters
|
||||
* Dendrite will now log a warning at startup if the file descriptor limit is set too low
|
||||
* The federation client will now attempt to use HTTP/2 if available
|
||||
* The federation client will now attempt to resume TLS sessions if possible, to reduce handshake overheads
|
||||
* The built-in NATS Server has been updated to version 2.7.4
|
||||
* NATS streams that don't match the desired configuration will now be recreated automatically
|
||||
* When performing a graceful shutdown, Dendrite will now wait for NATS Server to shutdown completely, which should avoid some corruption of data on-disk
|
||||
* The `create-account` tool has seen a number of improvements, will now ask for passwords automatically
|
||||
|
||||
### Fixes
|
||||
|
||||
* The `/sync` endpoint will no longer lose state events when truncating the timeline for history visibility
|
||||
* The `/context` endpoint now works correctly with `lazy_load_members`
|
||||
* The `/directory/list/room/{roomID}` endpoint now correctly reports whether a room is published in the server room directory or not
|
||||
* Some bugs around appservice username validation have been fixed
|
||||
* Roomserver output messages are no longer unnecessarily inflated by state events, which should reduce the number of NATS message size errors
|
||||
* Stream IDs for device list updates are now always 64-bit, which should fix some problems when running Dendrite on a 32-bit system
|
||||
* Purging room state in the sync API has been fixed after a faulty database query was corrected
|
||||
* The federation client will now release host records for remote destinations after 5 minutes instead of holding them in memory forever
|
||||
* Remote media requests will now correctly return an error if the file cannot be found or downloaded
|
||||
* A panic in the media API that could happen when the remote file doesn't exist has been fixed
|
||||
* Various bugs around membership state and invites have been fixed
|
||||
* The memberships table will now be correctly updated when rejecting a federated invite
|
||||
* The client API and appservice API will now access the user database using the user API rather than accessing the database directly
|
||||
|
||||
## Dendrite 0.6.5 (2022-03-04)
|
||||
|
||||
### Features
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# Dendrite [![Build Status](https://badge.buildkite.com/4be40938ab19f2bbc4a6c6724517353ee3ec1422e279faf374.svg?branch=master)](https://buildkite.com/matrix-dot-org/dendrite) [![Dendrite](https://img.shields.io/matrix/dendrite:matrix.org.svg?label=%23dendrite%3Amatrix.org&logo=matrix&server_fqdn=matrix.org)](https://matrix.to/#/#dendrite:matrix.org) [![Dendrite Dev](https://img.shields.io/matrix/dendrite-dev:matrix.org.svg?label=%23dendrite-dev%3Amatrix.org&logo=matrix&server_fqdn=matrix.org)](https://matrix.to/#/#dendrite-dev:matrix.org)
|
||||
# Dendrite
|
||||
[![Build status](https://github.com/matrix-org/dendrite/actions/workflows/dendrite.yml/badge.svg?event=push)](https://github.com/matrix-org/dendrite/actions/workflows/dendrite.yml) [![Dendrite](https://img.shields.io/matrix/dendrite:matrix.org.svg?label=%23dendrite%3Amatrix.org&logo=matrix&server_fqdn=matrix.org)](https://matrix.to/#/#dendrite:matrix.org) [![Dendrite Dev](https://img.shields.io/matrix/dendrite-dev:matrix.org.svg?label=%23dendrite-dev%3Amatrix.org&logo=matrix&server_fqdn=matrix.org)](https://matrix.to/#/#dendrite-dev:matrix.org)
|
||||
|
||||
Dendrite is a second-generation Matrix homeserver written in Go.
|
||||
It intends to provide an **efficient**, **reliable** and **scalable** alternative to [Synapse](https://github.com/matrix-org/synapse):
|
||||
|
|
|
@ -19,11 +19,10 @@ package api
|
|||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
|
@ -85,7 +84,7 @@ func RetrieveUserProfile(
|
|||
ctx context.Context,
|
||||
userID string,
|
||||
asAPI AppServiceQueryAPI,
|
||||
accountDB userdb.Database,
|
||||
profileAPI userapi.UserProfileAPI,
|
||||
) (*authtypes.Profile, error) {
|
||||
localpart, _, err := gomatrixserverlib.SplitID('@', userID)
|
||||
if err != nil {
|
||||
|
@ -93,10 +92,17 @@ func RetrieveUserProfile(
|
|||
}
|
||||
|
||||
// Try to query the user from the local database
|
||||
profile, err := accountDB.GetProfileByLocalpart(ctx, localpart)
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
res := &userapi.QueryProfileResponse{}
|
||||
err = profileAPI.QueryProfile(ctx, &userapi.QueryProfileRequest{UserID: userID}, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if profile != nil {
|
||||
}
|
||||
profile := &authtypes.Profile{
|
||||
Localpart: localpart,
|
||||
DisplayName: res.DisplayName,
|
||||
AvatarURL: res.AvatarURL,
|
||||
}
|
||||
if res.UserExists {
|
||||
return profile, nil
|
||||
}
|
||||
|
||||
|
@ -113,11 +119,15 @@ func RetrieveUserProfile(
|
|||
}
|
||||
|
||||
// Try to query the user from the local database again
|
||||
profile, err = accountDB.GetProfileByLocalpart(ctx, localpart)
|
||||
err = profileAPI.QueryProfile(ctx, &userapi.QueryProfileRequest{UserID: userID}, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// profile should not be nil at this point
|
||||
return profile, nil
|
||||
return &authtypes.Profile{
|
||||
Localpart: localpart,
|
||||
DisplayName: res.DisplayName,
|
||||
AvatarURL: res.AvatarURL,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ func NewInternalAPI(
|
|||
},
|
||||
},
|
||||
}
|
||||
js, _ := jetstream.Prepare(&base.Cfg.Global.JetStream)
|
||||
js, _ := jetstream.Prepare(base.ProcessContext, &base.Cfg.Global.JetStream)
|
||||
|
||||
// Create a connection to the appservice postgres DB
|
||||
appserviceDB, err := storage.NewDatabase(&base.Cfg.AppServiceAPI.Database)
|
||||
|
|
|
@ -56,7 +56,7 @@ func NewOutputRoomEventConsumer(
|
|||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
durable: cfg.Global.JetStream.Durable("AppserviceRoomserverConsumer"),
|
||||
topic: cfg.Global.JetStream.TopicFor(jetstream.OutputRoomEvent),
|
||||
topic: cfg.Global.JetStream.Prefixed(jetstream.OutputRoomEvent),
|
||||
asDB: appserviceDB,
|
||||
rsAPI: rsAPI,
|
||||
serverName: string(cfg.Global.ServerName),
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
|
||||
// Database stores events intended to be later sent to application services
|
||||
type Database struct {
|
||||
sqlutil.PartitionOffsetStatements
|
||||
events eventsStatements
|
||||
txnID txnStatements
|
||||
db *sql.DB
|
||||
|
@ -46,9 +45,6 @@ func NewDatabase(dbProperties *config.DatabaseOptions) (*Database, error) {
|
|||
if err = result.prepare(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = result.PartitionOffsetStatements.Prepare(result.db, result.writer, "appservice"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
|
||||
// Database stores events intended to be later sent to application services
|
||||
type Database struct {
|
||||
sqlutil.PartitionOffsetStatements
|
||||
events eventsStatements
|
||||
txnID txnStatements
|
||||
db *sql.DB
|
||||
|
@ -45,9 +44,6 @@ func NewDatabase(dbProperties *config.DatabaseOptions) (*Database, error) {
|
|||
if err = result.prepare(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = result.PartitionOffsetStatements.Prepare(result.db, result.writer, "appservice"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -13,6 +13,10 @@ RUN go build -trimpath -o bin/ ./cmd/create-account
|
|||
RUN go build -trimpath -o bin/ ./cmd/generate-keys
|
||||
|
||||
FROM alpine:latest
|
||||
LABEL org.opencontainers.image.title="Dendrite (Monolith)"
|
||||
LABEL org.opencontainers.image.description="Next-generation Matrix homeserver written in Go"
|
||||
LABEL org.opencontainers.image.source="https://github.com/matrix-org/dendrite"
|
||||
LABEL org.opencontainers.image.licenses="Apache-2.0"
|
||||
|
||||
COPY --from=base /build/bin/* /usr/bin/
|
||||
|
||||
|
|
|
@ -13,6 +13,10 @@ RUN go build -trimpath -o bin/ ./cmd/create-account
|
|||
RUN go build -trimpath -o bin/ ./cmd/generate-keys
|
||||
|
||||
FROM alpine:latest
|
||||
LABEL org.opencontainers.image.title="Dendrite (Polylith)"
|
||||
LABEL org.opencontainers.image.description="Next-generation Matrix homeserver written in Go"
|
||||
LABEL org.opencontainers.image.source="https://github.com/matrix-org/dendrite"
|
||||
LABEL org.opencontainers.image.licenses="Apache-2.0"
|
||||
|
||||
COPY --from=base /build/bin/* /usr/bin/
|
||||
|
||||
|
|
|
@ -408,6 +408,8 @@ func (m *DendriteMonolith) Stop() {
|
|||
_ = m.PineconeRouter.Close()
|
||||
}
|
||||
|
||||
const MaxFrameSize = types.MaxFrameSize
|
||||
|
||||
type Conduit struct {
|
||||
conn net.Conn
|
||||
port types.SwitchPortID
|
||||
|
|
|
@ -21,6 +21,7 @@ WORKDIR /dendrite
|
|||
RUN ./generate-keys --private-key matrix_key.pem
|
||||
|
||||
ENV SERVER_NAME=localhost
|
||||
ENV API=0
|
||||
EXPOSE 8008 8448
|
||||
|
||||
# At runtime, generate TLS cert based on the CA now mounted at /ca
|
||||
|
@ -28,4 +29,4 @@ EXPOSE 8008 8448
|
|||
CMD ./generate-keys --server $SERVER_NAME --tls-cert server.crt --tls-key server.key --tls-authority-cert /complement/ca/ca.crt --tls-authority-key /complement/ca/ca.key && \
|
||||
./generate-config -server $SERVER_NAME --ci > dendrite.yaml && \
|
||||
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates && \
|
||||
./dendrite-monolith-server --tls-cert server.crt --tls-key server.key --config dendrite.yaml
|
||||
./dendrite-monolith-server --tls-cert server.crt --tls-key server.key --config dendrite.yaml -api=${API:-0}
|
||||
|
|
|
@ -39,6 +39,7 @@ WORKDIR /dendrite
|
|||
RUN ./generate-keys --private-key matrix_key.pem
|
||||
|
||||
ENV SERVER_NAME=localhost
|
||||
ENV API=0
|
||||
EXPOSE 8008 8448
|
||||
|
||||
|
||||
|
@ -50,4 +51,4 @@ CMD /build/run_postgres.sh && ./generate-keys --server $SERVER_NAME --tls-cert s
|
|||
sed -i "s%connection_string:.*$%connection_string: postgresql://postgres@localhost/postgres?sslmode=disable%g" dendrite.yaml && \
|
||||
sed -i 's/max_open_conns:.*$/max_open_conns: 100/g' dendrite.yaml && \
|
||||
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates && \
|
||||
./dendrite-monolith-server --tls-cert server.crt --tls-key server.key --config dendrite.yaml
|
||||
./dendrite-monolith-server --tls-cert server.crt --tls-key server.key --config dendrite.yaml -api=${API:-0}
|
|
@ -33,7 +33,7 @@ import (
|
|||
// called after authorization has completed, with the result of the authorization.
|
||||
// If the final return value is non-nil, an error occurred and the cleanup function
|
||||
// is nil.
|
||||
func LoginFromJSONReader(ctx context.Context, r io.Reader, accountDB AccountDatabase, userAPI UserInternalAPIForLogin, cfg *config.ClientAPI) (*Login, LoginCleanupFunc, *util.JSONResponse) {
|
||||
func LoginFromJSONReader(ctx context.Context, r io.Reader, useraccountAPI uapi.UserAccountAPI, userAPI UserInternalAPIForLogin, cfg *config.ClientAPI) (*Login, LoginCleanupFunc, *util.JSONResponse) {
|
||||
reqBytes, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
err := &util.JSONResponse{
|
||||
|
@ -58,7 +58,7 @@ func LoginFromJSONReader(ctx context.Context, r io.Reader, accountDB AccountData
|
|||
switch header.Type {
|
||||
case authtypes.LoginTypePassword:
|
||||
typ = &LoginTypePassword{
|
||||
GetAccountByPassword: accountDB.GetAccountByPassword,
|
||||
GetAccountByPassword: useraccountAPI.QueryAccountByPassword,
|
||||
Config: cfg,
|
||||
}
|
||||
case authtypes.LoginTypeToken:
|
||||
|
|
|
@ -16,7 +16,6 @@ package auth
|
|||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
@ -64,14 +63,13 @@ func TestLoginFromJSONReader(t *testing.T) {
|
|||
}
|
||||
for _, tst := range tsts {
|
||||
t.Run(tst.Name, func(t *testing.T) {
|
||||
var accountDB fakeAccountDB
|
||||
var userAPI fakeUserInternalAPI
|
||||
cfg := &config.ClientAPI{
|
||||
Matrix: &config.Global{
|
||||
ServerName: serverName,
|
||||
},
|
||||
}
|
||||
login, cleanup, err := LoginFromJSONReader(ctx, strings.NewReader(tst.Body), &accountDB, &userAPI, cfg)
|
||||
login, cleanup, err := LoginFromJSONReader(ctx, strings.NewReader(tst.Body), &userAPI, &userAPI, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("LoginFromJSONReader failed: %+v", err)
|
||||
}
|
||||
|
@ -143,14 +141,13 @@ func TestBadLoginFromJSONReader(t *testing.T) {
|
|||
}
|
||||
for _, tst := range tsts {
|
||||
t.Run(tst.Name, func(t *testing.T) {
|
||||
var accountDB fakeAccountDB
|
||||
var userAPI fakeUserInternalAPI
|
||||
cfg := &config.ClientAPI{
|
||||
Matrix: &config.Global{
|
||||
ServerName: serverName,
|
||||
},
|
||||
}
|
||||
_, cleanup, errRes := LoginFromJSONReader(ctx, strings.NewReader(tst.Body), &accountDB, &userAPI, cfg)
|
||||
_, cleanup, errRes := LoginFromJSONReader(ctx, strings.NewReader(tst.Body), &userAPI, &userAPI, cfg)
|
||||
if errRes == nil {
|
||||
cleanup(ctx, nil)
|
||||
t.Fatalf("LoginFromJSONReader err: got %+v, want code %q", errRes, tst.WantErrCode)
|
||||
|
@ -161,24 +158,22 @@ func TestBadLoginFromJSONReader(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type fakeAccountDB struct {
|
||||
AccountDatabase
|
||||
}
|
||||
|
||||
func (*fakeAccountDB) GetAccountByPassword(ctx context.Context, localpart, password string) (*uapi.Account, error) {
|
||||
if password == "invalidpassword" {
|
||||
return nil, sql.ErrNoRows
|
||||
}
|
||||
|
||||
return &uapi.Account{}, nil
|
||||
}
|
||||
|
||||
type fakeUserInternalAPI struct {
|
||||
UserInternalAPIForLogin
|
||||
|
||||
uapi.UserAccountAPI
|
||||
DeletedTokens []string
|
||||
}
|
||||
|
||||
func (ua *fakeUserInternalAPI) QueryAccountByPassword(ctx context.Context, req *uapi.QueryAccountByPasswordRequest, res *uapi.QueryAccountByPasswordResponse) error {
|
||||
if req.PlaintextPassword == "invalidpassword" {
|
||||
res.Account = nil
|
||||
return nil
|
||||
}
|
||||
res.Exists = true
|
||||
res.Account = &uapi.Account{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ua *fakeUserInternalAPI) PerformLoginTokenDeletion(ctx context.Context, req *uapi.PerformLoginTokenDeletionRequest, res *uapi.PerformLoginTokenDeletionResponse) error {
|
||||
ua.DeletedTokens = append(ua.DeletedTokens, req.Token)
|
||||
return nil
|
||||
|
|
|
@ -16,7 +16,6 @@ package auth
|
|||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
|
@ -29,7 +28,7 @@ import (
|
|||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
type GetAccountByPassword func(ctx context.Context, localpart, password string) (*api.Account, error)
|
||||
type GetAccountByPassword func(ctx context.Context, req *api.QueryAccountByPasswordRequest, res *api.QueryAccountByPasswordResponse) error
|
||||
|
||||
type PasswordRequest struct {
|
||||
Login
|
||||
|
@ -62,7 +61,7 @@ func (t *LoginTypePassword) LoginFromJSON(ctx context.Context, reqBytes []byte)
|
|||
|
||||
func (t *LoginTypePassword) Login(ctx context.Context, req interface{}) (*Login, *util.JSONResponse) {
|
||||
r := req.(*PasswordRequest)
|
||||
username := strings.ToLower(r.Username())
|
||||
username := strings.ToLower(r.Username())
|
||||
if username == "" {
|
||||
return nil, &util.JSONResponse{
|
||||
Code: http.StatusUnauthorized,
|
||||
|
@ -77,19 +76,33 @@ func (t *LoginTypePassword) Login(ctx context.Context, req interface{}) (*Login,
|
|||
}
|
||||
}
|
||||
// Squash username to all lowercase letters
|
||||
_, err = t.GetAccountByPassword(ctx, strings.ToLower(localpart), r.Password)
|
||||
res := &api.QueryAccountByPasswordResponse{}
|
||||
err = t.GetAccountByPassword(ctx, &api.QueryAccountByPasswordRequest{Localpart: strings.ToLower(localpart), PlaintextPassword: r.Password}, res)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
_, err = t.GetAccountByPassword(ctx, localpart, r.Password)
|
||||
if err == nil {
|
||||
return &r.Login, nil
|
||||
return nil, &util.JSONResponse{
|
||||
Code: http.StatusInternalServerError,
|
||||
JSON: jsonerror.Unknown("unable to fetch account by password"),
|
||||
}
|
||||
}
|
||||
|
||||
if !res.Exists {
|
||||
err = t.GetAccountByPassword(ctx, &api.QueryAccountByPasswordRequest{
|
||||
Localpart: localpart,
|
||||
PlaintextPassword: r.Password,
|
||||
}, res)
|
||||
if err != nil {
|
||||
return nil, &util.JSONResponse{
|
||||
Code: http.StatusInternalServerError,
|
||||
JSON: jsonerror.Unknown("unable to fetch account by password"),
|
||||
}
|
||||
}
|
||||
// Technically we could tell them if the user does not exist by checking if err == sql.ErrNoRows
|
||||
// but that would leak the existence of the user.
|
||||
return nil, &util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("The username or password was incorrect or the account does not exist."),
|
||||
if !res.Exists {
|
||||
return nil, &util.JSONResponse{
|
||||
Code: http.StatusForbidden,
|
||||
JSON: jsonerror.Forbidden("The username or password was incorrect or the account does not exist."),
|
||||
}
|
||||
}
|
||||
}
|
||||
return &r.Login, nil
|
||||
|
|
|
@ -110,9 +110,9 @@ type UserInteractive struct {
|
|||
Sessions map[string][]string
|
||||
}
|
||||
|
||||
func NewUserInteractive(accountDB AccountDatabase, cfg *config.ClientAPI) *UserInteractive {
|
||||
func NewUserInteractive(userAccountAPI api.UserAccountAPI, cfg *config.ClientAPI) *UserInteractive {
|
||||
typePassword := &LoginTypePassword{
|
||||
GetAccountByPassword: accountDB.GetAccountByPassword,
|
||||
GetAccountByPassword: userAccountAPI.QueryAccountByPassword,
|
||||
Config: cfg,
|
||||
}
|
||||
return &UserInteractive{
|
||||
|
|
|
@ -25,15 +25,25 @@ var (
|
|||
)
|
||||
|
||||
type fakeAccountDatabase struct {
|
||||
AccountDatabase
|
||||
api.UserAccountAPI
|
||||
}
|
||||
|
||||
func (*fakeAccountDatabase) GetAccountByPassword(ctx context.Context, localpart, plaintextPassword string) (*api.Account, error) {
|
||||
acc, ok := lookup[localpart+" "+plaintextPassword]
|
||||
func (d *fakeAccountDatabase) PerformPasswordUpdate(ctx context.Context, req *api.PerformPasswordUpdateRequest, res *api.PerformPasswordUpdateResponse) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *fakeAccountDatabase) PerformAccountDeactivation(ctx context.Context, req *api.PerformAccountDeactivationRequest, res *api.PerformAccountDeactivationResponse) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *fakeAccountDatabase) QueryAccountByPassword(ctx context.Context, req *api.QueryAccountByPasswordRequest, res *api.QueryAccountByPasswordResponse) error {
|
||||
acc, ok := lookup[req.Localpart+" "+req.PlaintextPassword]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown user/password")
|
||||
return fmt.Errorf("unknown user/password")
|
||||
}
|
||||
return acc, nil
|
||||
res.Account = acc
|
||||
res.Exists = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func setup() *UserInteractive {
|
||||
|
|
|
@ -27,17 +27,17 @@ import (
|
|||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
// AddPublicRoutes sets up and registers HTTP handlers for the ClientAPI component.
|
||||
func AddPublicRoutes(
|
||||
process *process.ProcessContext,
|
||||
router *mux.Router,
|
||||
synapseAdminRouter *mux.Router,
|
||||
cfg *config.ClientAPI,
|
||||
accountsDB userdb.Database,
|
||||
federation *gomatrixserverlib.FederationClient,
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
eduInputAPI eduServerAPI.EDUServerInputAPI,
|
||||
|
@ -49,16 +49,16 @@ func AddPublicRoutes(
|
|||
extRoomsProvider api.ExtraPublicRoomsProvider,
|
||||
mscCfg *config.MSCs,
|
||||
) {
|
||||
js, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
|
||||
js, _ := jetstream.Prepare(process, &cfg.Matrix.JetStream)
|
||||
|
||||
syncProducer := &producers.SyncAPIProducer{
|
||||
JetStream: js,
|
||||
Topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputClientData),
|
||||
Topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputClientData),
|
||||
}
|
||||
|
||||
routing.Setup(
|
||||
router, synapseAdminRouter, cfg, eduInputAPI, rsAPI, asAPI,
|
||||
accountsDB, userAPI, federation,
|
||||
userAPI, federation,
|
||||
syncProducer, transactionsCache, fsAPI, keyAPI,
|
||||
extRoomsProvider, mscCfg,
|
||||
)
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
@ -138,7 +137,7 @@ type fledglingEvent struct {
|
|||
func CreateRoom(
|
||||
req *http.Request, device *api.Device,
|
||||
cfg *config.ClientAPI,
|
||||
accountDB userdb.Database, rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
profileAPI api.UserProfileAPI, rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
) util.JSONResponse {
|
||||
var r createRoomRequest
|
||||
|
@ -156,7 +155,7 @@ func CreateRoom(
|
|||
JSON: jsonerror.InvalidArgumentValue(err.Error()),
|
||||
}
|
||||
}
|
||||
return createRoom(req.Context(), r, device, cfg, accountDB, rsAPI, asAPI, evTime)
|
||||
return createRoom(req.Context(), r, device, cfg, profileAPI, rsAPI, asAPI, evTime)
|
||||
}
|
||||
|
||||
// createRoom implements /createRoom
|
||||
|
@ -165,7 +164,7 @@ func createRoom(
|
|||
ctx context.Context,
|
||||
r createRoomRequest, device *api.Device,
|
||||
cfg *config.ClientAPI,
|
||||
accountDB userdb.Database, rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
profileAPI api.UserProfileAPI, rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
evTime time.Time,
|
||||
) util.JSONResponse {
|
||||
|
@ -201,7 +200,7 @@ func createRoom(
|
|||
"roomVersion": roomVersion,
|
||||
}).Info("Creating new room")
|
||||
|
||||
profile, err := appserviceAPI.RetrieveUserProfile(ctx, userID, asAPI, accountDB)
|
||||
profile, err := appserviceAPI.RetrieveUserProfile(ctx, userID, asAPI, profileAPI)
|
||||
if err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("appserviceAPI.RetrieveUserProfile failed")
|
||||
return jsonerror.InternalServerError()
|
||||
|
@ -520,7 +519,7 @@ func createRoom(
|
|||
for _, invitee := range r.Invite {
|
||||
// Build the invite event.
|
||||
inviteEvent, err := buildMembershipEvent(
|
||||
ctx, invitee, "", accountDB, device, gomatrixserverlib.Invite,
|
||||
ctx, invitee, "", profileAPI, device, gomatrixserverlib.Invite,
|
||||
roomID, true, cfg, evTime, rsAPI, asAPI,
|
||||
)
|
||||
if err != nil {
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
func Deactivate(
|
||||
req *http.Request,
|
||||
userInteractiveAuth *auth.UserInteractive,
|
||||
userAPI api.UserInternalAPI,
|
||||
accountAPI api.UserAccountAPI,
|
||||
deviceAPI *api.Device,
|
||||
) util.JSONResponse {
|
||||
ctx := req.Context()
|
||||
|
@ -40,7 +40,7 @@ func Deactivate(
|
|||
}
|
||||
|
||||
var res api.PerformAccountDeactivationResponse
|
||||
err = userAPI.PerformAccountDeactivation(ctx, &api.PerformAccountDeactivationRequest{
|
||||
err = accountAPI.PerformAccountDeactivation(ctx, &api.PerformAccountDeactivationRequest{
|
||||
Localpart: localpart,
|
||||
}, &res)
|
||||
if err != nil {
|
||||
|
|
|
@ -18,12 +18,10 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/userapi/api"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
@ -32,7 +30,7 @@ func JoinRoomByIDOrAlias(
|
|||
req *http.Request,
|
||||
device *api.Device,
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
accountDB userdb.Database,
|
||||
profileAPI api.UserProfileAPI,
|
||||
roomIDOrAlias string,
|
||||
) util.JSONResponse {
|
||||
// Prepare to ask the roomserver to perform the room join.
|
||||
|
@ -60,19 +58,23 @@ func JoinRoomByIDOrAlias(
|
|||
_ = httputil.UnmarshalJSONRequest(req, &joinReq.Content)
|
||||
|
||||
// Work out our localpart for the client profile request.
|
||||
localpart, _, err := gomatrixserverlib.SplitID('@', device.UserID)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("gomatrixserverlib.SplitID failed")
|
||||
} else {
|
||||
// Request our profile content to populate the request content with.
|
||||
var profile *authtypes.Profile
|
||||
profile, err = accountDB.GetProfileByLocalpart(req.Context(), localpart)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("accountDB.GetProfileByLocalpart failed")
|
||||
} else {
|
||||
joinReq.Content["displayname"] = profile.DisplayName
|
||||
joinReq.Content["avatar_url"] = profile.AvatarURL
|
||||
|
||||
// Request our profile content to populate the request content with.
|
||||
res := &api.QueryProfileResponse{}
|
||||
err := profileAPI.QueryProfile(req.Context(), &api.QueryProfileRequest{UserID: device.UserID}, res)
|
||||
if err != nil || !res.UserExists {
|
||||
if !res.UserExists {
|
||||
util.GetLogger(req.Context()).Error("Unable to query user profile, no profile found.")
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusInternalServerError,
|
||||
JSON: jsonerror.Unknown("Unable to query user profile, no profile found."),
|
||||
}
|
||||
}
|
||||
|
||||
util.GetLogger(req.Context()).WithError(err).Error("UserProfileAPI.QueryProfile failed")
|
||||
} else {
|
||||
joinReq.Content["displayname"] = res.DisplayName
|
||||
joinReq.Content["avatar_url"] = res.AvatarURL
|
||||
}
|
||||
|
||||
// Ask the roomserver to perform the join.
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"github.com/matrix-org/dendrite/keyserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
|
@ -36,7 +35,7 @@ type crossSigningRequest struct {
|
|||
func UploadCrossSigningDeviceKeys(
|
||||
req *http.Request, userInteractiveAuth *auth.UserInteractive,
|
||||
keyserverAPI api.KeyInternalAPI, device *userapi.Device,
|
||||
accountDB userdb.Database, cfg *config.ClientAPI,
|
||||
accountAPI userapi.UserAccountAPI, cfg *config.ClientAPI,
|
||||
) util.JSONResponse {
|
||||
uploadReq := &crossSigningRequest{}
|
||||
uploadRes := &api.PerformUploadDeviceKeysResponse{}
|
||||
|
@ -64,7 +63,7 @@ func UploadCrossSigningDeviceKeys(
|
|||
}
|
||||
}
|
||||
typePassword := auth.LoginTypePassword{
|
||||
GetAccountByPassword: accountDB.GetAccountByPassword,
|
||||
GetAccountByPassword: accountAPI.QueryAccountByPassword,
|
||||
Config: cfg,
|
||||
}
|
||||
if _, authErr := typePassword.Login(req.Context(), &uploadReq.Auth.PasswordRequest); authErr != nil {
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"github.com/matrix-org/dendrite/clientapi/userutil"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
@ -54,7 +53,7 @@ func passwordLogin() flows {
|
|||
|
||||
// Login implements GET and POST /login
|
||||
func Login(
|
||||
req *http.Request, accountDB userdb.Database, userAPI userapi.UserInternalAPI,
|
||||
req *http.Request, userAPI userapi.UserInternalAPI,
|
||||
cfg *config.ClientAPI,
|
||||
) util.JSONResponse {
|
||||
if req.Method == http.MethodGet {
|
||||
|
@ -64,7 +63,7 @@ func Login(
|
|||
JSON: passwordLogin(),
|
||||
}
|
||||
} else if req.Method == http.MethodPost {
|
||||
login, cleanup, authErr := auth.LoginFromJSONReader(req.Context(), req.Body, accountDB, userAPI, cfg)
|
||||
login, cleanup, authErr := auth.LoginFromJSONReader(req.Context(), req.Body, userAPI, userAPI, cfg)
|
||||
if authErr != nil {
|
||||
return *authErr
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
"github.com/matrix-org/util"
|
||||
|
@ -39,7 +38,7 @@ import (
|
|||
var errMissingUserID = errors.New("'user_id' must be supplied")
|
||||
|
||||
func SendBan(
|
||||
req *http.Request, accountDB userdb.Database, device *userapi.Device,
|
||||
req *http.Request, profileAPI userapi.UserProfileAPI, device *userapi.Device,
|
||||
roomID string, cfg *config.ClientAPI,
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI, asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
) util.JSONResponse {
|
||||
|
@ -78,16 +77,16 @@ func SendBan(
|
|||
}
|
||||
}
|
||||
|
||||
return sendMembership(req.Context(), accountDB, device, roomID, "ban", body.Reason, cfg, body.UserID, evTime, roomVer, rsAPI, asAPI)
|
||||
return sendMembership(req.Context(), profileAPI, device, roomID, "ban", body.Reason, cfg, body.UserID, evTime, roomVer, rsAPI, asAPI)
|
||||
}
|
||||
|
||||
func sendMembership(ctx context.Context, accountDB userdb.Database, device *userapi.Device,
|
||||
func sendMembership(ctx context.Context, profileAPI userapi.UserProfileAPI, device *userapi.Device,
|
||||
roomID, membership, reason string, cfg *config.ClientAPI, targetUserID string, evTime time.Time,
|
||||
roomVer gomatrixserverlib.RoomVersion,
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI, asAPI appserviceAPI.AppServiceQueryAPI) util.JSONResponse {
|
||||
|
||||
event, err := buildMembershipEvent(
|
||||
ctx, targetUserID, reason, accountDB, device, membership,
|
||||
ctx, targetUserID, reason, profileAPI, device, membership,
|
||||
roomID, false, cfg, evTime, rsAPI, asAPI,
|
||||
)
|
||||
if err == errMissingUserID {
|
||||
|
@ -125,7 +124,7 @@ func sendMembership(ctx context.Context, accountDB userdb.Database, device *user
|
|||
}
|
||||
|
||||
func SendKick(
|
||||
req *http.Request, accountDB userdb.Database, device *userapi.Device,
|
||||
req *http.Request, profileAPI userapi.UserProfileAPI, device *userapi.Device,
|
||||
roomID string, cfg *config.ClientAPI,
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI, asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
) util.JSONResponse {
|
||||
|
@ -161,11 +160,11 @@ func SendKick(
|
|||
}
|
||||
}
|
||||
// TODO: should we be using SendLeave instead?
|
||||
return sendMembership(req.Context(), accountDB, device, roomID, "leave", body.Reason, cfg, body.UserID, evTime, roomVer, rsAPI, asAPI)
|
||||
return sendMembership(req.Context(), profileAPI, device, roomID, "leave", body.Reason, cfg, body.UserID, evTime, roomVer, rsAPI, asAPI)
|
||||
}
|
||||
|
||||
func SendUnban(
|
||||
req *http.Request, accountDB userdb.Database, device *userapi.Device,
|
||||
req *http.Request, profileAPI userapi.UserProfileAPI, device *userapi.Device,
|
||||
roomID string, cfg *config.ClientAPI,
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI, asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
) util.JSONResponse {
|
||||
|
@ -196,11 +195,11 @@ func SendUnban(
|
|||
}
|
||||
}
|
||||
// TODO: should we be using SendLeave instead?
|
||||
return sendMembership(req.Context(), accountDB, device, roomID, "leave", body.Reason, cfg, body.UserID, evTime, roomVer, rsAPI, asAPI)
|
||||
return sendMembership(req.Context(), profileAPI, device, roomID, "leave", body.Reason, cfg, body.UserID, evTime, roomVer, rsAPI, asAPI)
|
||||
}
|
||||
|
||||
func SendInvite(
|
||||
req *http.Request, accountDB userdb.Database, device *userapi.Device,
|
||||
req *http.Request, profileAPI userapi.UserProfileAPI, device *userapi.Device,
|
||||
roomID string, cfg *config.ClientAPI,
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI, asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
) util.JSONResponse {
|
||||
|
@ -210,7 +209,7 @@ func SendInvite(
|
|||
}
|
||||
|
||||
inviteStored, jsonErrResp := checkAndProcessThreepid(
|
||||
req, device, body, cfg, rsAPI, accountDB, roomID, evTime,
|
||||
req, device, body, cfg, rsAPI, profileAPI, roomID, evTime,
|
||||
)
|
||||
if jsonErrResp != nil {
|
||||
return *jsonErrResp
|
||||
|
@ -227,14 +226,14 @@ func SendInvite(
|
|||
}
|
||||
|
||||
// We already received the return value, so no need to check for an error here.
|
||||
response, _ := sendInvite(req.Context(), accountDB, device, roomID, body.UserID, body.Reason, cfg, rsAPI, asAPI, evTime)
|
||||
response, _ := sendInvite(req.Context(), profileAPI, device, roomID, body.UserID, body.Reason, cfg, rsAPI, asAPI, evTime)
|
||||
return response
|
||||
}
|
||||
|
||||
// sendInvite sends an invitation to a user. Returns a JSONResponse and an error
|
||||
func sendInvite(
|
||||
ctx context.Context,
|
||||
accountDB userdb.Database,
|
||||
profileAPI userapi.UserProfileAPI,
|
||||
device *userapi.Device,
|
||||
roomID, userID, reason string,
|
||||
cfg *config.ClientAPI,
|
||||
|
@ -242,7 +241,7 @@ func sendInvite(
|
|||
asAPI appserviceAPI.AppServiceQueryAPI, evTime time.Time,
|
||||
) (util.JSONResponse, error) {
|
||||
event, err := buildMembershipEvent(
|
||||
ctx, userID, reason, accountDB, device, "invite",
|
||||
ctx, userID, reason, profileAPI, device, "invite",
|
||||
roomID, false, cfg, evTime, rsAPI, asAPI,
|
||||
)
|
||||
if err == errMissingUserID {
|
||||
|
@ -286,13 +285,13 @@ func sendInvite(
|
|||
|
||||
func buildMembershipEvent(
|
||||
ctx context.Context,
|
||||
targetUserID, reason string, accountDB userdb.Database,
|
||||
targetUserID, reason string, profileAPI userapi.UserProfileAPI,
|
||||
device *userapi.Device,
|
||||
membership, roomID string, isDirect bool,
|
||||
cfg *config.ClientAPI, evTime time.Time,
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI, asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
) (*gomatrixserverlib.HeaderedEvent, error) {
|
||||
profile, err := loadProfile(ctx, targetUserID, cfg, accountDB, asAPI)
|
||||
profile, err := loadProfile(ctx, targetUserID, cfg, profileAPI, asAPI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -327,7 +326,7 @@ func loadProfile(
|
|||
ctx context.Context,
|
||||
userID string,
|
||||
cfg *config.ClientAPI,
|
||||
accountDB userdb.Database,
|
||||
profileAPI userapi.UserProfileAPI,
|
||||
asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
) (*authtypes.Profile, error) {
|
||||
_, serverName, err := gomatrixserverlib.SplitID('@', userID)
|
||||
|
@ -337,7 +336,7 @@ func loadProfile(
|
|||
|
||||
var profile *authtypes.Profile
|
||||
if serverName == cfg.Matrix.ServerName {
|
||||
profile, err = appserviceAPI.RetrieveUserProfile(ctx, userID, asAPI, accountDB)
|
||||
profile, err = appserviceAPI.RetrieveUserProfile(ctx, userID, asAPI, profileAPI)
|
||||
} else {
|
||||
profile = &authtypes.Profile{}
|
||||
}
|
||||
|
@ -381,13 +380,13 @@ func checkAndProcessThreepid(
|
|||
body *threepid.MembershipRequest,
|
||||
cfg *config.ClientAPI,
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
accountDB userdb.Database,
|
||||
profileAPI userapi.UserProfileAPI,
|
||||
roomID string,
|
||||
evTime time.Time,
|
||||
) (inviteStored bool, errRes *util.JSONResponse) {
|
||||
|
||||
inviteStored, err := threepid.CheckAndProcessInvite(
|
||||
req.Context(), device, body, cfg, rsAPI, accountDB,
|
||||
req.Context(), device, body, cfg, rsAPI, profileAPI,
|
||||
roomID, evTime,
|
||||
)
|
||||
if err == threepid.ErrMissingParameter {
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/userapi/api"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -30,7 +29,6 @@ type newPasswordAuth struct {
|
|||
func Password(
|
||||
req *http.Request,
|
||||
userAPI api.UserInternalAPI,
|
||||
accountDB userdb.Database,
|
||||
device *api.Device,
|
||||
cfg *config.ClientAPI,
|
||||
) util.JSONResponse {
|
||||
|
@ -74,7 +72,7 @@ func Password(
|
|||
|
||||
// Check if the existing password is correct.
|
||||
typePassword := auth.LoginTypePassword{
|
||||
GetAccountByPassword: accountDB.GetAccountByPassword,
|
||||
GetAccountByPassword: userAPI.QueryAccountByPassword,
|
||||
Config: cfg,
|
||||
}
|
||||
if _, authErr := typePassword.Login(req.Context(), &r.Auth.PasswordRequest); authErr != nil {
|
||||
|
|
|
@ -19,7 +19,6 @@ import (
|
|||
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/userapi/api"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
@ -28,7 +27,6 @@ func PeekRoomByIDOrAlias(
|
|||
req *http.Request,
|
||||
device *api.Device,
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
accountDB userdb.Database,
|
||||
roomIDOrAlias string,
|
||||
) util.JSONResponse {
|
||||
// if this is a remote roomIDOrAlias, we have to ask the roomserver (or federation sender?) to
|
||||
|
@ -82,7 +80,6 @@ func UnpeekRoomByID(
|
|||
req *http.Request,
|
||||
device *api.Device,
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
accountDB userdb.Database,
|
||||
roomID string,
|
||||
) util.JSONResponse {
|
||||
unpeekReq := roomserverAPI.PerformUnpeekRequest{
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
||||
"github.com/matrix-org/gomatrix"
|
||||
|
@ -36,12 +35,12 @@ import (
|
|||
|
||||
// GetProfile implements GET /profile/{userID}
|
||||
func GetProfile(
|
||||
req *http.Request, accountDB userdb.Database, cfg *config.ClientAPI,
|
||||
req *http.Request, profileAPI userapi.UserProfileAPI, cfg *config.ClientAPI,
|
||||
userID string,
|
||||
asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
federation *gomatrixserverlib.FederationClient,
|
||||
) util.JSONResponse {
|
||||
profile, err := getProfile(req.Context(), accountDB, cfg, userID, asAPI, federation)
|
||||
profile, err := getProfile(req.Context(), profileAPI, cfg, userID, asAPI, federation)
|
||||
if err != nil {
|
||||
if err == eventutil.ErrProfileNoExists {
|
||||
return util.JSONResponse{
|
||||
|
@ -65,11 +64,11 @@ func GetProfile(
|
|||
|
||||
// GetAvatarURL implements GET /profile/{userID}/avatar_url
|
||||
func GetAvatarURL(
|
||||
req *http.Request, accountDB userdb.Database, cfg *config.ClientAPI,
|
||||
req *http.Request, profileAPI userapi.UserProfileAPI, cfg *config.ClientAPI,
|
||||
userID string, asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
federation *gomatrixserverlib.FederationClient,
|
||||
) util.JSONResponse {
|
||||
profile, err := getProfile(req.Context(), accountDB, cfg, userID, asAPI, federation)
|
||||
profile, err := getProfile(req.Context(), profileAPI, cfg, userID, asAPI, federation)
|
||||
if err != nil {
|
||||
if err == eventutil.ErrProfileNoExists {
|
||||
return util.JSONResponse{
|
||||
|
@ -92,7 +91,7 @@ func GetAvatarURL(
|
|||
|
||||
// SetAvatarURL implements PUT /profile/{userID}/avatar_url
|
||||
func SetAvatarURL(
|
||||
req *http.Request, accountDB userdb.Database,
|
||||
req *http.Request, profileAPI userapi.UserProfileAPI,
|
||||
device *userapi.Device, userID string, cfg *config.ClientAPI, rsAPI api.RoomserverInternalAPI,
|
||||
) util.JSONResponse {
|
||||
if userID != device.UserID {
|
||||
|
@ -127,22 +126,34 @@ func SetAvatarURL(
|
|||
}
|
||||
}
|
||||
|
||||
oldProfile, err := accountDB.GetProfileByLocalpart(req.Context(), localpart)
|
||||
res := &userapi.QueryProfileResponse{}
|
||||
err = profileAPI.QueryProfile(req.Context(), &userapi.QueryProfileRequest{
|
||||
UserID: userID,
|
||||
}, res)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("accountDB.GetProfileByLocalpart failed")
|
||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.QueryProfile failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
oldProfile := &authtypes.Profile{
|
||||
Localpart: localpart,
|
||||
DisplayName: res.DisplayName,
|
||||
AvatarURL: res.AvatarURL,
|
||||
}
|
||||
|
||||
setRes := &userapi.PerformSetAvatarURLResponse{}
|
||||
if err = profileAPI.SetAvatarURL(req.Context(), &userapi.PerformSetAvatarURLRequest{
|
||||
Localpart: localpart,
|
||||
AvatarURL: r.AvatarURL,
|
||||
}, setRes); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.SetAvatarURL failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
if err = accountDB.SetAvatarURL(req.Context(), localpart, r.AvatarURL); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("accountDB.SetAvatarURL failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
var res api.QueryRoomsForUserResponse
|
||||
var roomsRes api.QueryRoomsForUserResponse
|
||||
err = rsAPI.QueryRoomsForUser(req.Context(), &api.QueryRoomsForUserRequest{
|
||||
UserID: device.UserID,
|
||||
WantMembership: "join",
|
||||
}, &res)
|
||||
}, &roomsRes)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("QueryRoomsForUser failed")
|
||||
return jsonerror.InternalServerError()
|
||||
|
@ -155,7 +166,7 @@ func SetAvatarURL(
|
|||
}
|
||||
|
||||
events, err := buildMembershipEvents(
|
||||
req.Context(), res.RoomIDs, newProfile, userID, cfg, evTime, rsAPI,
|
||||
req.Context(), roomsRes.RoomIDs, newProfile, userID, cfg, evTime, rsAPI,
|
||||
)
|
||||
switch e := err.(type) {
|
||||
case nil:
|
||||
|
@ -182,11 +193,11 @@ func SetAvatarURL(
|
|||
|
||||
// GetDisplayName implements GET /profile/{userID}/displayname
|
||||
func GetDisplayName(
|
||||
req *http.Request, accountDB userdb.Database, cfg *config.ClientAPI,
|
||||
req *http.Request, profileAPI userapi.UserProfileAPI, cfg *config.ClientAPI,
|
||||
userID string, asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
federation *gomatrixserverlib.FederationClient,
|
||||
) util.JSONResponse {
|
||||
profile, err := getProfile(req.Context(), accountDB, cfg, userID, asAPI, federation)
|
||||
profile, err := getProfile(req.Context(), profileAPI, cfg, userID, asAPI, federation)
|
||||
if err != nil {
|
||||
if err == eventutil.ErrProfileNoExists {
|
||||
return util.JSONResponse{
|
||||
|
@ -209,7 +220,7 @@ func GetDisplayName(
|
|||
|
||||
// SetDisplayName implements PUT /profile/{userID}/displayname
|
||||
func SetDisplayName(
|
||||
req *http.Request, accountDB userdb.Database,
|
||||
req *http.Request, profileAPI userapi.UserProfileAPI,
|
||||
device *userapi.Device, userID string, cfg *config.ClientAPI, rsAPI api.RoomserverInternalAPI,
|
||||
) util.JSONResponse {
|
||||
if userID != device.UserID {
|
||||
|
@ -244,14 +255,26 @@ func SetDisplayName(
|
|||
}
|
||||
}
|
||||
|
||||
oldProfile, err := accountDB.GetProfileByLocalpart(req.Context(), localpart)
|
||||
pRes := &userapi.QueryProfileResponse{}
|
||||
err = profileAPI.QueryProfile(req.Context(), &userapi.QueryProfileRequest{
|
||||
UserID: userID,
|
||||
}, pRes)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("accountDB.GetProfileByLocalpart failed")
|
||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.QueryProfile failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
oldProfile := &authtypes.Profile{
|
||||
Localpart: localpart,
|
||||
DisplayName: pRes.DisplayName,
|
||||
AvatarURL: pRes.AvatarURL,
|
||||
}
|
||||
|
||||
if err = accountDB.SetDisplayName(req.Context(), localpart, r.DisplayName); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("accountDB.SetDisplayName failed")
|
||||
err = profileAPI.SetDisplayName(req.Context(), &userapi.PerformUpdateDisplayNameRequest{
|
||||
Localpart: localpart,
|
||||
DisplayName: r.DisplayName,
|
||||
}, &struct{}{})
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.SetDisplayName failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
|
@ -302,7 +325,7 @@ func SetDisplayName(
|
|||
// Returns an error when something goes wrong or specifically
|
||||
// eventutil.ErrProfileNoExists when the profile doesn't exist.
|
||||
func getProfile(
|
||||
ctx context.Context, accountDB userdb.Database, cfg *config.ClientAPI,
|
||||
ctx context.Context, profileAPI userapi.UserProfileAPI, cfg *config.ClientAPI,
|
||||
userID string,
|
||||
asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
federation *gomatrixserverlib.FederationClient,
|
||||
|
@ -331,7 +354,7 @@ func getProfile(
|
|||
}, nil
|
||||
}
|
||||
|
||||
profile, err := appserviceAPI.RetrieveUserProfile(ctx, userID, asAPI, accountDB)
|
||||
profile, err := appserviceAPI.RetrieveUserProfile(ctx, userID, asAPI, profileAPI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -44,7 +44,6 @@ import (
|
|||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||
"github.com/matrix-org/dendrite/clientapi/userutil"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -523,8 +522,7 @@ func validateApplicationService(
|
|||
// http://matrix.org/speculator/spec/HEAD/client_server/unstable.html#post-matrix-client-unstable-register
|
||||
func Register(
|
||||
req *http.Request,
|
||||
userAPI userapi.UserInternalAPI,
|
||||
accountDB userdb.Database,
|
||||
userAPI userapi.UserRegisterAPI,
|
||||
cfg *config.ClientAPI,
|
||||
) util.JSONResponse {
|
||||
var r registerRequest
|
||||
|
@ -552,13 +550,12 @@ func Register(
|
|||
}
|
||||
// Auto generate a numeric username if r.Username is empty
|
||||
if r.Username == "" {
|
||||
id, err := accountDB.GetNewNumericLocalpart(req.Context())
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("accountDB.GetNewNumericLocalpart failed")
|
||||
res := &userapi.QueryNumericLocalpartResponse{}
|
||||
if err := userAPI.QueryNumericLocalpart(req.Context(), res); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("userAPI.QueryNumericLocalpart failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
r.Username = strconv.FormatInt(id, 10)
|
||||
r.Username = strconv.FormatInt(res.ID, 10)
|
||||
}
|
||||
|
||||
// Is this an appservice registration? It will be if the access
|
||||
|
@ -606,7 +603,7 @@ func handleGuestRegistration(
|
|||
req *http.Request,
|
||||
r registerRequest,
|
||||
cfg *config.ClientAPI,
|
||||
userAPI userapi.UserInternalAPI,
|
||||
userAPI userapi.UserRegisterAPI,
|
||||
) util.JSONResponse {
|
||||
if cfg.RegistrationDisabled || cfg.GuestsDisabled {
|
||||
return util.JSONResponse{
|
||||
|
@ -671,7 +668,7 @@ func handleRegistrationFlow(
|
|||
r registerRequest,
|
||||
sessionID string,
|
||||
cfg *config.ClientAPI,
|
||||
userAPI userapi.UserInternalAPI,
|
||||
userAPI userapi.UserRegisterAPI,
|
||||
accessToken string,
|
||||
accessTokenErr error,
|
||||
) util.JSONResponse {
|
||||
|
@ -762,7 +759,7 @@ func handleApplicationServiceRegistration(
|
|||
req *http.Request,
|
||||
r registerRequest,
|
||||
cfg *config.ClientAPI,
|
||||
userAPI userapi.UserInternalAPI,
|
||||
userAPI userapi.UserRegisterAPI,
|
||||
) util.JSONResponse {
|
||||
// Check if we previously had issues extracting the access token from the
|
||||
// request.
|
||||
|
@ -805,7 +802,7 @@ func checkAndCompleteFlow(
|
|||
r registerRequest,
|
||||
sessionID string,
|
||||
cfg *config.ClientAPI,
|
||||
userAPI userapi.UserInternalAPI,
|
||||
userAPI userapi.UserRegisterAPI,
|
||||
) util.JSONResponse {
|
||||
if checkFlowCompleted(flow, cfg.Derived.Registration.Flows) {
|
||||
policyVersion := ""
|
||||
|
@ -836,7 +833,7 @@ func checkAndCompleteFlow(
|
|||
// not all
|
||||
func completeRegistration(
|
||||
ctx context.Context,
|
||||
userAPI userapi.UserInternalAPI,
|
||||
userAPI userapi.UserRegisterAPI,
|
||||
username, password, appserviceID, ipAddr, userAgent, sessionID, policyVersion string,
|
||||
inhibitLogin eventutil.WeakBoolean,
|
||||
displayName, deviceID *string,
|
||||
|
@ -1003,7 +1000,7 @@ type availableResponse struct {
|
|||
func RegisterAvailable(
|
||||
req *http.Request,
|
||||
cfg *config.ClientAPI,
|
||||
accountDB userdb.Database,
|
||||
registerAPI userapi.UserRegisterAPI,
|
||||
) util.JSONResponse {
|
||||
username := req.URL.Query().Get("username")
|
||||
|
||||
|
@ -1025,14 +1022,18 @@ func RegisterAvailable(
|
|||
}
|
||||
}
|
||||
|
||||
availability, availabilityErr := accountDB.CheckAccountAvailability(req.Context(), username)
|
||||
if availabilityErr != nil {
|
||||
res := &userapi.QueryAccountAvailabilityResponse{}
|
||||
err := registerAPI.QueryAccountAvailability(req.Context(), &userapi.QueryAccountAvailabilityRequest{
|
||||
Localpart: username,
|
||||
}, res)
|
||||
if err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusInternalServerError,
|
||||
JSON: jsonerror.Unknown("failed to check availability: " + availabilityErr.Error()),
|
||||
JSON: jsonerror.Unknown("failed to check availability:" + err.Error()),
|
||||
}
|
||||
}
|
||||
if !availability {
|
||||
|
||||
if !res.Available {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.UserInUse("Desired User ID is already taken."),
|
||||
|
|
|
@ -34,7 +34,6 @@ import (
|
|||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -51,7 +50,6 @@ func Setup(
|
|||
eduAPI eduServerAPI.EDUServerInputAPI,
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
accountDB userdb.Database,
|
||||
userAPI userapi.UserInternalAPI,
|
||||
federation *gomatrixserverlib.FederationClient,
|
||||
syncProducer *producers.SyncAPIProducer,
|
||||
|
@ -62,7 +60,7 @@ func Setup(
|
|||
mscCfg *config.MSCs,
|
||||
) {
|
||||
rateLimits := httputil.NewRateLimits(&cfg.RateLimiting)
|
||||
userInteractiveAuth := auth.NewUserInteractive(accountDB, cfg)
|
||||
userInteractiveAuth := auth.NewUserInteractive(userAPI, cfg)
|
||||
|
||||
unstableFeatures := map[string]bool{
|
||||
"org.matrix.e2e_cross_signing": true,
|
||||
|
@ -124,7 +122,7 @@ func Setup(
|
|||
)
|
||||
if cfg.Matrix.ServerNotices.Enabled {
|
||||
logrus.Info("Enabling server notices at /_synapse/admin/v1/send_server_notice")
|
||||
serverNotificationSender, err = getSenderDevice(context.Background(), userAPI, accountDB, cfg)
|
||||
serverNotificationSender, err = getSenderDevice(context.Background(), userAPI, cfg)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Fatal("unable to get account for sending sending server notices")
|
||||
}
|
||||
|
@ -142,7 +140,7 @@ func Setup(
|
|||
txnID := vars["txnID"]
|
||||
return SendServerNotice(
|
||||
req, &cfg.Matrix.ServerNotices,
|
||||
cfg, userAPI, rsAPI, accountDB, asAPI,
|
||||
cfg, userAPI, rsAPI, asAPI,
|
||||
device, serverNotificationSender,
|
||||
&txnID, transactionsCache,
|
||||
)
|
||||
|
@ -157,7 +155,7 @@ func Setup(
|
|||
}
|
||||
return SendServerNotice(
|
||||
req, &cfg.Matrix.ServerNotices,
|
||||
cfg, userAPI, rsAPI, accountDB, asAPI,
|
||||
cfg, userAPI, rsAPI, asAPI,
|
||||
device, serverNotificationSender,
|
||||
nil, transactionsCache,
|
||||
)
|
||||
|
@ -192,9 +190,8 @@ func Setup(
|
|||
consentRequiredCheck := httputil.WithConsentCheck(cfg.Matrix.UserConsentOptions, userAPI)
|
||||
|
||||
v3mux.Handle("/createRoom",
|
||||
|
||||
httputil.MakeAuthAPI("createRoom", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
return CreateRoom(req, device, cfg, accountDB, rsAPI, asAPI)
|
||||
return CreateRoom(req, device, cfg, userAPI, rsAPI, asAPI)
|
||||
}, consentRequiredCheck),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
v3mux.Handle("/join/{roomIDOrAlias}",
|
||||
|
@ -207,7 +204,7 @@ func Setup(
|
|||
return util.ErrorResponse(err)
|
||||
}
|
||||
return JoinRoomByIDOrAlias(
|
||||
req, device, rsAPI, accountDB, vars["roomIDOrAlias"],
|
||||
req, device, rsAPI, userAPI, vars["roomIDOrAlias"],
|
||||
)
|
||||
}),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
@ -223,7 +220,7 @@ func Setup(
|
|||
return util.ErrorResponse(err)
|
||||
}
|
||||
return PeekRoomByIDOrAlias(
|
||||
req, device, rsAPI, accountDB, vars["roomIDOrAlias"],
|
||||
req, device, rsAPI, vars["roomIDOrAlias"],
|
||||
)
|
||||
}, consentRequiredCheck),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
@ -243,7 +240,7 @@ func Setup(
|
|||
return util.ErrorResponse(err)
|
||||
}
|
||||
return JoinRoomByIDOrAlias(
|
||||
req, device, rsAPI, accountDB, vars["roomID"],
|
||||
req, device, rsAPI, userAPI, vars["roomID"],
|
||||
)
|
||||
}),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
@ -268,7 +265,7 @@ func Setup(
|
|||
return util.ErrorResponse(err)
|
||||
}
|
||||
return UnpeekRoomByID(
|
||||
req, device, rsAPI, accountDB, vars["roomID"],
|
||||
req, device, rsAPI, vars["roomID"],
|
||||
)
|
||||
}, consentRequiredCheck),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
@ -278,7 +275,7 @@ func Setup(
|
|||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return SendBan(req, accountDB, device, vars["roomID"], cfg, rsAPI, asAPI)
|
||||
return SendBan(req, userAPI, device, vars["roomID"], cfg, rsAPI, asAPI)
|
||||
}, consentRequiredCheck),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
v3mux.Handle("/rooms/{roomID}/invite",
|
||||
|
@ -290,7 +287,7 @@ func Setup(
|
|||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return SendInvite(req, accountDB, device, vars["roomID"], cfg, rsAPI, asAPI)
|
||||
return SendInvite(req, userAPI, device, vars["roomID"], cfg, rsAPI, asAPI)
|
||||
}, consentRequiredCheck),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
v3mux.Handle("/rooms/{roomID}/kick",
|
||||
|
@ -299,7 +296,7 @@ func Setup(
|
|||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return SendKick(req, accountDB, device, vars["roomID"], cfg, rsAPI, asAPI)
|
||||
return SendKick(req, userAPI, device, vars["roomID"], cfg, rsAPI, asAPI)
|
||||
}, consentRequiredCheck),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
v3mux.Handle("/rooms/{roomID}/unban",
|
||||
|
@ -308,7 +305,7 @@ func Setup(
|
|||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return SendUnban(req, accountDB, device, vars["roomID"], cfg, rsAPI, asAPI)
|
||||
return SendUnban(req, userAPI, device, vars["roomID"], cfg, rsAPI, asAPI)
|
||||
}, consentRequiredCheck),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
v3mux.Handle("/rooms/{roomID}/send/{eventType}",
|
||||
|
@ -404,14 +401,14 @@ func Setup(
|
|||
if r := rateLimits.Limit(req); r != nil {
|
||||
return *r
|
||||
}
|
||||
return Register(req, userAPI, accountDB, cfg)
|
||||
return Register(req, userAPI, cfg)
|
||||
})).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
v3mux.Handle("/register/available", httputil.MakeExternalAPI("registerAvailable", func(req *http.Request) util.JSONResponse {
|
||||
if r := rateLimits.Limit(req); r != nil {
|
||||
return *r
|
||||
}
|
||||
return RegisterAvailable(req, cfg, accountDB)
|
||||
return RegisterAvailable(req, cfg, userAPI)
|
||||
})).Methods(http.MethodGet, http.MethodOptions)
|
||||
|
||||
v3mux.Handle("/directory/room/{roomAlias}",
|
||||
|
@ -489,7 +486,7 @@ func Setup(
|
|||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return SendTyping(req, device, vars["roomID"], vars["userID"], accountDB, eduAPI, rsAPI)
|
||||
return SendTyping(req, device, vars["roomID"], vars["userID"], eduAPI, rsAPI)
|
||||
}),
|
||||
).Methods(http.MethodPut, http.MethodOptions)
|
||||
v3mux.Handle("/rooms/{roomID}/redact/{eventID}",
|
||||
|
@ -550,7 +547,7 @@ func Setup(
|
|||
if r := rateLimits.Limit(req); r != nil {
|
||||
return *r
|
||||
}
|
||||
return Password(req, userAPI, accountDB, device, cfg)
|
||||
return Password(req, userAPI, device, cfg)
|
||||
}),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
|
@ -570,7 +567,7 @@ func Setup(
|
|||
if r := rateLimits.Limit(req); r != nil {
|
||||
return *r
|
||||
}
|
||||
return Login(req, accountDB, userAPI, cfg)
|
||||
return Login(req, userAPI, cfg)
|
||||
}),
|
||||
).Methods(http.MethodGet, http.MethodPost, http.MethodOptions)
|
||||
|
||||
|
@ -725,7 +722,7 @@ func Setup(
|
|||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return GetProfile(req, accountDB, cfg, vars["userID"], asAPI, federation)
|
||||
return GetProfile(req, userAPI, cfg, vars["userID"], asAPI, federation)
|
||||
}),
|
||||
).Methods(http.MethodGet, http.MethodOptions)
|
||||
|
||||
|
@ -735,7 +732,7 @@ func Setup(
|
|||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return GetAvatarURL(req, accountDB, cfg, vars["userID"], asAPI, federation)
|
||||
return GetAvatarURL(req, userAPI, cfg, vars["userID"], asAPI, federation)
|
||||
}),
|
||||
).Methods(http.MethodGet, http.MethodOptions)
|
||||
|
||||
|
@ -748,7 +745,7 @@ func Setup(
|
|||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return SetAvatarURL(req, accountDB, device, vars["userID"], cfg, rsAPI)
|
||||
return SetAvatarURL(req, userAPI, device, vars["userID"], cfg, rsAPI)
|
||||
}, consentRequiredCheck),
|
||||
).Methods(http.MethodPut, http.MethodOptions)
|
||||
// Browsers use the OPTIONS HTTP method to check if the CORS policy allows
|
||||
|
@ -760,7 +757,7 @@ func Setup(
|
|||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return GetDisplayName(req, accountDB, cfg, vars["userID"], asAPI, federation)
|
||||
return GetDisplayName(req, userAPI, cfg, vars["userID"], asAPI, federation)
|
||||
}),
|
||||
).Methods(http.MethodGet, http.MethodOptions)
|
||||
|
||||
|
@ -773,7 +770,7 @@ func Setup(
|
|||
if err != nil {
|
||||
return util.ErrorResponse(err)
|
||||
}
|
||||
return SetDisplayName(req, accountDB, device, vars["userID"], cfg, rsAPI)
|
||||
return SetDisplayName(req, userAPI, device, vars["userID"], cfg, rsAPI)
|
||||
}, consentRequiredCheck),
|
||||
).Methods(http.MethodPut, http.MethodOptions)
|
||||
// Browsers use the OPTIONS HTTP method to check if the CORS policy allows
|
||||
|
@ -781,25 +778,25 @@ func Setup(
|
|||
|
||||
v3mux.Handle("/account/3pid",
|
||||
httputil.MakeAuthAPI("account_3pid", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
return GetAssociated3PIDs(req, accountDB, device)
|
||||
return GetAssociated3PIDs(req, userAPI, device)
|
||||
}, consentRequiredCheck),
|
||||
).Methods(http.MethodGet, http.MethodOptions)
|
||||
|
||||
v3mux.Handle("/account/3pid",
|
||||
httputil.MakeAuthAPI("account_3pid", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
return CheckAndSave3PIDAssociation(req, accountDB, device, cfg)
|
||||
return CheckAndSave3PIDAssociation(req, userAPI, device, cfg)
|
||||
}, consentRequiredCheck),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
unstableMux.Handle("/account/3pid/delete",
|
||||
httputil.MakeAuthAPI("account_3pid", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
return Forget3PID(req, accountDB)
|
||||
return Forget3PID(req, userAPI)
|
||||
}, consentRequiredCheck),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
v3mux.Handle("/{path:(?:account/3pid|register)}/email/requestToken",
|
||||
httputil.MakeExternalAPI("account_3pid_request_token", func(req *http.Request) util.JSONResponse {
|
||||
return RequestEmailToken(req, accountDB, cfg)
|
||||
return RequestEmailToken(req, userAPI, cfg)
|
||||
}),
|
||||
).Methods(http.MethodPost, http.MethodOptions)
|
||||
|
||||
|
@ -1274,7 +1271,7 @@ func Setup(
|
|||
// Cross-signing device keys
|
||||
|
||||
postDeviceSigningKeys := httputil.MakeAuthAPI("post_device_signing_keys", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
return UploadCrossSigningDeviceKeys(req, userInteractiveAuth, keyAPI, device, accountDB, cfg)
|
||||
return UploadCrossSigningDeviceKeys(req, userInteractiveAuth, keyAPI, device, userAPI, cfg)
|
||||
}, consentRequiredCheck)
|
||||
|
||||
postDeviceSigningSignatures := httputil.MakeAuthAPI("post_device_signing_signatures", userAPI, func(req *http.Request, device *userapi.Device) util.JSONResponse {
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"github.com/matrix-org/dendrite/eduserver/api"
|
||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
"github.com/matrix-org/util"
|
||||
)
|
||||
|
||||
|
@ -33,7 +32,7 @@ type typingContentJSON struct {
|
|||
// sends the typing events to client API typingProducer
|
||||
func SendTyping(
|
||||
req *http.Request, device *userapi.Device, roomID string,
|
||||
userID string, accountDB userdb.Database,
|
||||
userID string,
|
||||
eduAPI api.EDUServerInputAPI,
|
||||
rsAPI roomserverAPI.RoomserverInternalAPI,
|
||||
) util.JSONResponse {
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/matrix-org/dendrite/roomserver/version"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
"github.com/matrix-org/gomatrix"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
"github.com/matrix-org/gomatrixserverlib/tokens"
|
||||
|
@ -59,7 +58,6 @@ func SendServerNotice(
|
|||
cfgClient *config.ClientAPI,
|
||||
userAPI userapi.UserInternalAPI,
|
||||
rsAPI api.RoomserverInternalAPI,
|
||||
accountsDB userdb.Database,
|
||||
asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
device *userapi.Device,
|
||||
senderDevice *userapi.Device,
|
||||
|
@ -86,7 +84,7 @@ func SendServerNotice(
|
|||
if resErr != nil {
|
||||
return *resErr
|
||||
}
|
||||
res, _ := sendServerNotice(ctx, r, rsAPI, cfgNotices, cfgClient, senderDevice, accountsDB, asAPI, userAPI, txnID, device, txnCache)
|
||||
res, _ := sendServerNotice(ctx, r, rsAPI, cfgNotices, cfgClient, senderDevice, asAPI, userAPI, txnID, device, txnCache)
|
||||
return res
|
||||
}
|
||||
|
||||
|
@ -97,7 +95,6 @@ func sendServerNotice(
|
|||
cfgNotices *config.ServerNotices,
|
||||
cfgClient *config.ClientAPI,
|
||||
senderDevice *userapi.Device,
|
||||
accountsDB userdb.Database,
|
||||
asAPI appserviceAPI.AppServiceQueryAPI,
|
||||
userAPI userapi.UserInternalAPI,
|
||||
txnID *string,
|
||||
|
@ -155,7 +152,7 @@ func sendServerNotice(
|
|||
PowerLevelContentOverride: pl,
|
||||
}
|
||||
|
||||
roomRes := createRoom(ctx, crReq, senderDevice, cfgClient, accountsDB, rsAPI, asAPI, time.Now())
|
||||
roomRes := createRoom(ctx, crReq, senderDevice, cfgClient, userAPI, rsAPI, asAPI, time.Now())
|
||||
|
||||
switch data := roomRes.JSON.(type) {
|
||||
case createRoomResponse:
|
||||
|
@ -190,7 +187,7 @@ func sendServerNotice(
|
|||
}
|
||||
// re-invite the user
|
||||
if res.Membership != gomatrixserverlib.Join {
|
||||
res, err := sendInvite(ctx, accountsDB, senderDevice, roomID, serverNoticeRequest.UserID, "Server notice room", cfgClient, rsAPI, asAPI, time.Now())
|
||||
res, err := sendInvite(ctx, userAPI, senderDevice, roomID, serverNoticeRequest.UserID, "Server notice room", cfgClient, rsAPI, asAPI, time.Now())
|
||||
if err != nil {
|
||||
return res, nil
|
||||
}
|
||||
|
@ -274,7 +271,6 @@ func (r sendServerNoticeRequest) valid() (ok bool) {
|
|||
func getSenderDevice(
|
||||
ctx context.Context,
|
||||
userAPI userapi.UserInternalAPI,
|
||||
accountDB userdb.Database,
|
||||
cfg *config.ClientAPI,
|
||||
) (*userapi.Device, error) {
|
||||
var accRes userapi.PerformAccountCreationResponse
|
||||
|
@ -289,8 +285,12 @@ func getSenderDevice(
|
|||
}
|
||||
|
||||
// set the avatarurl for the user
|
||||
if err = accountDB.SetAvatarURL(ctx, cfg.Matrix.ServerNotices.LocalPart, cfg.Matrix.ServerNotices.AvatarURL); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("accountDB.SetAvatarURL failed")
|
||||
res := &userapi.PerformSetAvatarURLResponse{}
|
||||
if err = userAPI.SetAvatarURL(ctx, &userapi.PerformSetAvatarURLRequest{
|
||||
Localpart: cfg.Matrix.ServerNotices.LocalPart,
|
||||
AvatarURL: cfg.Matrix.ServerNotices.AvatarURL,
|
||||
}, res); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Error("userAPI.SetAvatarURL failed")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ type threePIDsResponse struct {
|
|||
// RequestEmailToken implements:
|
||||
// POST /account/3pid/email/requestToken
|
||||
// POST /register/email/requestToken
|
||||
func RequestEmailToken(req *http.Request, accountDB userdb.Database, cfg *config.ClientAPI) util.JSONResponse {
|
||||
func RequestEmailToken(req *http.Request, threePIDAPI api.UserThreePIDAPI, cfg *config.ClientAPI) util.JSONResponse {
|
||||
var body threepid.EmailAssociationRequest
|
||||
if reqErr := httputil.UnmarshalJSONRequest(req, &body); reqErr != nil {
|
||||
return *reqErr
|
||||
|
@ -50,13 +50,18 @@ func RequestEmailToken(req *http.Request, accountDB userdb.Database, cfg *config
|
|||
var err error
|
||||
|
||||
// Check if the 3PID is already in use locally
|
||||
localpart, err := accountDB.GetLocalpartForThreePID(req.Context(), body.Email, "email")
|
||||
res := &api.QueryLocalpartForThreePIDResponse{}
|
||||
err = threePIDAPI.QueryLocalpartForThreePID(req.Context(), &api.QueryLocalpartForThreePIDRequest{
|
||||
ThreePID: body.Email,
|
||||
Medium: "email",
|
||||
}, res)
|
||||
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("accountDB.GetLocalpartForThreePID failed")
|
||||
util.GetLogger(req.Context()).WithError(err).Error("threePIDAPI.QueryLocalpartForThreePID failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
if len(localpart) > 0 {
|
||||
if len(res.Localpart) > 0 {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.MatrixError{
|
||||
|
@ -85,7 +90,7 @@ func RequestEmailToken(req *http.Request, accountDB userdb.Database, cfg *config
|
|||
|
||||
// CheckAndSave3PIDAssociation implements POST /account/3pid
|
||||
func CheckAndSave3PIDAssociation(
|
||||
req *http.Request, accountDB userdb.Database, device *api.Device,
|
||||
req *http.Request, threePIDAPI api.UserThreePIDAPI, device *api.Device,
|
||||
cfg *config.ClientAPI,
|
||||
) util.JSONResponse {
|
||||
var body threepid.EmailAssociationCheckRequest
|
||||
|
@ -136,8 +141,12 @@ func CheckAndSave3PIDAssociation(
|
|||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
if err = accountDB.SaveThreePIDAssociation(req.Context(), address, localpart, medium); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("accountsDB.SaveThreePIDAssociation failed")
|
||||
if err = threePIDAPI.PerformSaveThreePIDAssociation(req.Context(), &api.PerformSaveThreePIDAssociationRequest{
|
||||
ThreePID: address,
|
||||
Localpart: localpart,
|
||||
Medium: medium,
|
||||
}, &struct{}{}); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("threePIDAPI.PerformSaveThreePIDAssociation failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
|
@ -149,7 +158,7 @@ func CheckAndSave3PIDAssociation(
|
|||
|
||||
// GetAssociated3PIDs implements GET /account/3pid
|
||||
func GetAssociated3PIDs(
|
||||
req *http.Request, accountDB userdb.Database, device *api.Device,
|
||||
req *http.Request, threepidAPI api.UserThreePIDAPI, device *api.Device,
|
||||
) util.JSONResponse {
|
||||
localpart, _, err := gomatrixserverlib.SplitID('@', device.UserID)
|
||||
if err != nil {
|
||||
|
@ -157,27 +166,30 @@ func GetAssociated3PIDs(
|
|||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
threepids, err := accountDB.GetThreePIDsForLocalpart(req.Context(), localpart)
|
||||
res := &api.QueryThreePIDsForLocalpartResponse{}
|
||||
err = threepidAPI.QueryThreePIDsForLocalpart(req.Context(), &api.QueryThreePIDsForLocalpartRequest{
|
||||
Localpart: localpart,
|
||||
}, res)
|
||||
if err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("accountDB.GetThreePIDsForLocalpart failed")
|
||||
util.GetLogger(req.Context()).WithError(err).Error("threepidAPI.QueryThreePIDsForLocalpart failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusOK,
|
||||
JSON: threePIDsResponse{threepids},
|
||||
JSON: threePIDsResponse{res.ThreePIDs},
|
||||
}
|
||||
}
|
||||
|
||||
// Forget3PID implements POST /account/3pid/delete
|
||||
func Forget3PID(req *http.Request, accountDB userdb.Database) util.JSONResponse {
|
||||
func Forget3PID(req *http.Request, threepidAPI api.UserThreePIDAPI) util.JSONResponse {
|
||||
var body authtypes.ThreePID
|
||||
if reqErr := httputil.UnmarshalJSONRequest(req, &body); reqErr != nil {
|
||||
return *reqErr
|
||||
}
|
||||
|
||||
if err := accountDB.RemoveThreePIDAssociation(req.Context(), body.Address, body.Medium); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("accountDB.RemoveThreePIDAssociation failed")
|
||||
if err := threepidAPI.PerformForgetThreePID(req.Context(), &api.PerformForgetThreePIDRequest{}, &struct{}{}); err != nil {
|
||||
util.GetLogger(req.Context()).WithError(err).Error("threepidAPI.PerformForgetThreePID failed")
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||
userdb "github.com/matrix-org/dendrite/userapi/storage"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
)
|
||||
|
||||
|
@ -87,7 +86,7 @@ var (
|
|||
func CheckAndProcessInvite(
|
||||
ctx context.Context,
|
||||
device *userapi.Device, body *MembershipRequest, cfg *config.ClientAPI,
|
||||
rsAPI api.RoomserverInternalAPI, db userdb.Database,
|
||||
rsAPI api.RoomserverInternalAPI, db userapi.UserProfileAPI,
|
||||
roomID string,
|
||||
evTime time.Time,
|
||||
) (inviteStoredOnIDServer bool, err error) {
|
||||
|
@ -137,7 +136,7 @@ func CheckAndProcessInvite(
|
|||
// Returns an error if a check or a request failed.
|
||||
func queryIDServer(
|
||||
ctx context.Context,
|
||||
db userdb.Database, cfg *config.ClientAPI, device *userapi.Device,
|
||||
db userapi.UserProfileAPI, cfg *config.ClientAPI, device *userapi.Device,
|
||||
body *MembershipRequest, roomID string,
|
||||
) (lookupRes *idServerLookupResponse, storeInviteRes *idServerStoreInviteResponse, err error) {
|
||||
if err = isTrusted(body.IDServer, cfg); err != nil {
|
||||
|
@ -206,7 +205,7 @@ func queryIDServerLookup(ctx context.Context, body *MembershipRequest) (*idServe
|
|||
// Returns an error if the request failed to send or if the response couldn't be parsed.
|
||||
func queryIDServerStoreInvite(
|
||||
ctx context.Context,
|
||||
db userdb.Database, cfg *config.ClientAPI, device *userapi.Device,
|
||||
db userapi.UserProfileAPI, cfg *config.ClientAPI, device *userapi.Device,
|
||||
body *MembershipRequest, roomID string,
|
||||
) (*idServerStoreInviteResponse, error) {
|
||||
// Retrieve the sender's profile to get their display name
|
||||
|
@ -217,10 +216,17 @@ func queryIDServerStoreInvite(
|
|||
|
||||
var profile *authtypes.Profile
|
||||
if serverName == cfg.Matrix.ServerName {
|
||||
profile, err = db.GetProfileByLocalpart(ctx, localpart)
|
||||
res := &userapi.QueryProfileResponse{}
|
||||
err = db.QueryProfile(ctx, &userapi.QueryProfileRequest{UserID: device.UserID}, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
profile = &authtypes.Profile{
|
||||
Localpart: localpart,
|
||||
DisplayName: res.DisplayName,
|
||||
AvatarURL: res.AvatarURL,
|
||||
}
|
||||
|
||||
} else {
|
||||
profile = &authtypes.Profile{}
|
||||
}
|
||||
|
|
|
@ -24,12 +24,11 @@ import (
|
|||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/matrix-org/dendrite/setup"
|
||||
"github.com/matrix-org/dendrite/setup/base"
|
||||
"github.com/matrix-org/dendrite/userapi/api"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/term"
|
||||
|
||||
"github.com/matrix-org/dendrite/setup"
|
||||
"github.com/matrix-org/dendrite/userapi/api"
|
||||
)
|
||||
|
||||
const usage = `Usage: %s
|
||||
|
@ -43,7 +42,7 @@ Example:
|
|||
# use password from file
|
||||
%s --config dendrite.yaml -username alice -passwordfile my.pass
|
||||
# ask user to provide password
|
||||
%s --config dendrite.yaml -username alice -ask-pass
|
||||
%s --config dendrite.yaml -username alice
|
||||
# read password from stdin
|
||||
%s --config dendrite.yaml -username alice -passwordstdin < my.pass
|
||||
cat my.pass | %s --config dendrite.yaml -username alice -passwordstdin
|
||||
|
@ -56,10 +55,10 @@ Arguments:
|
|||
|
||||
var (
|
||||
username = flag.String("username", "", "The username of the account to register (specify the localpart only, e.g. 'alice' for '@alice:domain.com')")
|
||||
password = flag.String("password", "", "The password to associate with the account (optional, account will be password-less if not specified)")
|
||||
password = flag.String("password", "", "The password to associate with the account")
|
||||
pwdFile = flag.String("passwordfile", "", "The file to use for the password (e.g. for automated account creation)")
|
||||
pwdStdin = flag.Bool("passwordstdin", false, "Reads the password from stdin")
|
||||
askPass = flag.Bool("ask-pass", false, "Ask for the password to use")
|
||||
pwdLess = flag.Bool("passwordless", false, "Create a passwordless account, e.g. if only an accesstoken is required")
|
||||
isAdmin = flag.Bool("admin", false, "Create an admin account")
|
||||
resetPassword = flag.Bool("reset-password", false, "Resets the password for the given username")
|
||||
validUsernameRegex = regexp.MustCompile(`^[0-9a-z_\-=./]+$`)
|
||||
|
@ -78,22 +77,44 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
if *pwdLess && *resetPassword {
|
||||
logrus.Fatalf("Can not reset to an empty password, unable to login afterwards.")
|
||||
}
|
||||
|
||||
if !validUsernameRegex.MatchString(*username) {
|
||||
logrus.Warn("Username can only contain characters a-z, 0-9, or '_-./='")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
pass := getPassword(password, pwdFile, pwdStdin, askPass, os.Stdin)
|
||||
if len(fmt.Sprintf("@%s:%s", *username, cfg.Global.ServerName)) > 255 {
|
||||
logrus.Fatalf("Username can not be longer than 255 characters: %s", fmt.Sprintf("@%s:%s", *username, cfg.Global.ServerName))
|
||||
}
|
||||
|
||||
b := base.NewBaseDendrite(cfg, "create-account")
|
||||
var pass string
|
||||
var err error
|
||||
if !*pwdLess {
|
||||
pass, err = getPassword(*password, *pwdFile, *pwdStdin, os.Stdin)
|
||||
if err != nil {
|
||||
logrus.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
b := base.NewBaseDendrite(cfg, "Monolith")
|
||||
accountDB := b.CreateAccountsDB()
|
||||
|
||||
accType := api.AccountTypeUser
|
||||
if *isAdmin {
|
||||
accType = api.AccountTypeAdmin
|
||||
}
|
||||
var err error
|
||||
|
||||
available, err := accountDB.CheckAccountAvailability(context.Background(), *username)
|
||||
if err != nil {
|
||||
logrus.Fatalln("Unable check username existence.")
|
||||
}
|
||||
if *resetPassword {
|
||||
if available {
|
||||
logrus.Fatalln("Username could not be found.")
|
||||
}
|
||||
err = accountDB.SetPassword(context.Background(), *username, pass)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Failed to update password for user %s: %s", *username, err.Error())
|
||||
|
@ -104,6 +125,9 @@ func main() {
|
|||
logrus.Infof("Updated password for user %s and invalidated all logins\n", *username)
|
||||
return
|
||||
}
|
||||
if !available {
|
||||
logrus.Fatalln("Username is already in use.")
|
||||
}
|
||||
|
||||
policyVersion := ""
|
||||
if cfg.Global.UserConsentOptions.Enabled {
|
||||
|
@ -118,53 +142,44 @@ func main() {
|
|||
logrus.Infoln("Created account", *username)
|
||||
}
|
||||
|
||||
func getPassword(password, pwdFile *string, pwdStdin, askPass *bool, r io.Reader) string {
|
||||
// no password option set, use empty password
|
||||
if password == nil && pwdFile == nil && pwdStdin == nil && askPass == nil {
|
||||
return ""
|
||||
}
|
||||
// password defined as parameter
|
||||
if password != nil && *password != "" {
|
||||
return *password
|
||||
}
|
||||
|
||||
func getPassword(password, pwdFile string, pwdStdin bool, r io.Reader) (string, error) {
|
||||
// read password from file
|
||||
if pwdFile != nil && *pwdFile != "" {
|
||||
pw, err := ioutil.ReadFile(*pwdFile)
|
||||
if pwdFile != "" {
|
||||
pw, err := ioutil.ReadFile(pwdFile)
|
||||
if err != nil {
|
||||
logrus.Fatalln("Unable to read password from file:", err)
|
||||
return "", fmt.Errorf("Unable to read password from file: %v", err)
|
||||
}
|
||||
return strings.TrimSpace(string(pw))
|
||||
return strings.TrimSpace(string(pw)), nil
|
||||
}
|
||||
|
||||
// read password from stdin
|
||||
if pwdStdin != nil && *pwdStdin {
|
||||
if pwdStdin {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
logrus.Fatalln("Unable to read password from stdin:", err)
|
||||
return "", fmt.Errorf("Unable to read password from stdin: %v", err)
|
||||
}
|
||||
return strings.TrimSpace(string(data))
|
||||
return strings.TrimSpace(string(data)), nil
|
||||
}
|
||||
|
||||
// ask the user to provide the password
|
||||
if *askPass {
|
||||
// If no parameter was set, ask the user to provide the password
|
||||
if password == "" {
|
||||
fmt.Print("Enter Password: ")
|
||||
bytePassword, err := term.ReadPassword(int(os.Stdin.Fd()))
|
||||
if err != nil {
|
||||
logrus.Fatalln("Unable to read password:", err)
|
||||
return "", fmt.Errorf("Unable to read password: %v", err)
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Print("Confirm Password: ")
|
||||
bytePassword2, err := term.ReadPassword(int(os.Stdin.Fd()))
|
||||
if err != nil {
|
||||
logrus.Fatalln("Unable to read password:", err)
|
||||
return "", fmt.Errorf("Unable to read password: %v", err)
|
||||
}
|
||||
fmt.Println()
|
||||
if strings.TrimSpace(string(bytePassword)) != strings.TrimSpace(string(bytePassword2)) {
|
||||
logrus.Fatalln("Entered passwords don't match")
|
||||
return "", fmt.Errorf("Entered passwords don't match")
|
||||
}
|
||||
return strings.TrimSpace(string(bytePassword))
|
||||
return strings.TrimSpace(string(bytePassword)), nil
|
||||
}
|
||||
|
||||
return ""
|
||||
return password, nil
|
||||
}
|
||||
|
|
|
@ -8,45 +8,48 @@ import (
|
|||
|
||||
func Test_getPassword(t *testing.T) {
|
||||
type args struct {
|
||||
password *string
|
||||
pwdFile *string
|
||||
pwdStdin *bool
|
||||
askPass *bool
|
||||
password string
|
||||
pwdFile string
|
||||
pwdStdin bool
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
pass := "mySecretPass"
|
||||
passwordFile := "testdata/my.pass"
|
||||
passwordStdin := true
|
||||
reader := &bytes.Buffer{}
|
||||
_, err := reader.WriteString(pass)
|
||||
if err != nil {
|
||||
t.Errorf("unable to write to buffer: %+v", err)
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no password defined",
|
||||
args: args{},
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "password defined",
|
||||
args: args{password: &pass},
|
||||
args: args{
|
||||
password: pass,
|
||||
},
|
||||
want: pass,
|
||||
},
|
||||
{
|
||||
name: "pwdFile defined",
|
||||
args: args{pwdFile: &passwordFile},
|
||||
args: args{
|
||||
pwdFile: passwordFile,
|
||||
},
|
||||
want: pass,
|
||||
},
|
||||
{
|
||||
name: "pwdFile does not exist",
|
||||
args: args{pwdFile: "iDontExist"},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "read pass from stdin defined",
|
||||
args: args{
|
||||
pwdStdin: &passwordStdin,
|
||||
pwdStdin: true,
|
||||
reader: reader,
|
||||
},
|
||||
want: pass,
|
||||
|
@ -54,7 +57,11 @@ func Test_getPassword(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := getPassword(tt.args.password, tt.args.pwdFile, tt.args.pwdStdin, tt.args.askPass, tt.args.reader); got != tt.want {
|
||||
got, err := getPassword(tt.args.password, tt.args.pwdFile, tt.args.pwdStdin, tt.args.reader)
|
||||
if !tt.wantErr && err != nil {
|
||||
t.Errorf("expected no error, but got %v", err)
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("getPassword() = '%v', want '%v'", got, tt.want)
|
||||
}
|
||||
})
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
)
|
||||
|
||||
func ClientAPI(base *basepkg.BaseDendrite, cfg *config.Dendrite) {
|
||||
accountDB := base.CreateAccountsDB()
|
||||
federation := base.CreateFederationClient()
|
||||
|
||||
asQuery := base.AppserviceHTTPClient()
|
||||
|
@ -33,9 +32,9 @@ func ClientAPI(base *basepkg.BaseDendrite, cfg *config.Dendrite) {
|
|||
keyAPI := base.KeyServerHTTPClient()
|
||||
|
||||
clientapi.AddPublicRoutes(
|
||||
base.PublicClientAPIMux, base.SynapseAdminMux, &base.Cfg.ClientAPI, accountDB, federation,
|
||||
rsAPI, eduInputAPI, asQuery, transactions.New(), fsAPI, userAPI, keyAPI, nil,
|
||||
&cfg.MSCs,
|
||||
base.ProcessContext, base.PublicClientAPIMux, base.SynapseAdminMux, &base.Cfg.ClientAPI,
|
||||
federation, rsAPI, eduInputAPI, asQuery, transactions.New(), fsAPI, userAPI,
|
||||
keyAPI, nil, &cfg.MSCs,
|
||||
)
|
||||
|
||||
base.SetupAndServeHTTP(
|
||||
|
|
|
@ -12,6 +12,10 @@ No, although a good portion of the Matrix specification has been implemented. Mo
|
|||
|
||||
No, not at present. There will be in the future when Dendrite reaches version 1.0.
|
||||
|
||||
### Can I use Dendrite with an existing Synapse database?
|
||||
|
||||
No, Dendrite has a very different database schema to Synapse and the two are not interchangeable.
|
||||
|
||||
### Should I run a monolith or a polylith deployment?
|
||||
|
||||
Monolith deployments are always preferred where possible, and at this time, are far better tested than polylith deployments are. The only reason to consider a polylith deployment is if you wish to run different Dendrite components on separate physical machines.
|
||||
|
@ -33,7 +37,7 @@ It should do, although we are aware of some minor issues:
|
|||
|
||||
### Does Dendrite support push notifications?
|
||||
|
||||
No, not yet. This is a planned feature.
|
||||
Yes, we have experimental support for push notifications. Configure them in the usual way in your Matrix client.
|
||||
|
||||
### Does Dendrite support application services/bridges?
|
||||
|
||||
|
|
|
@ -42,15 +42,15 @@ func NewInternalAPI(
|
|||
) api.EDUServerInputAPI {
|
||||
cfg := &base.Cfg.EDUServer
|
||||
|
||||
js, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
|
||||
js, _ := jetstream.Prepare(base.ProcessContext, &cfg.Matrix.JetStream)
|
||||
|
||||
return &input.EDUServerInputAPI{
|
||||
Cache: eduCache,
|
||||
UserAPI: userAPI,
|
||||
JetStream: js,
|
||||
OutputTypingEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputTypingEvent),
|
||||
OutputSendToDeviceEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputSendToDeviceEvent),
|
||||
OutputReceiptEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputReceiptEvent),
|
||||
OutputTypingEventTopic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputTypingEvent),
|
||||
OutputSendToDeviceEventTopic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputSendToDeviceEvent),
|
||||
OutputReceiptEventTopic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputReceiptEvent),
|
||||
ServerName: cfg.Matrix.ServerName,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,9 +58,9 @@ func NewOutputEDUConsumer(
|
|||
db: store,
|
||||
ServerName: cfg.Matrix.ServerName,
|
||||
durable: cfg.Matrix.JetStream.Durable("FederationAPIEDUServerConsumer"),
|
||||
typingTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputTypingEvent),
|
||||
sendToDeviceTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputSendToDeviceEvent),
|
||||
receiptTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputReceiptEvent),
|
||||
typingTopic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputTypingEvent),
|
||||
sendToDeviceTopic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputSendToDeviceEvent),
|
||||
receiptTopic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputReceiptEvent),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -55,8 +55,8 @@ func NewKeyChangeConsumer(
|
|||
return &KeyChangeConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
durable: cfg.Matrix.JetStream.TopicFor("FederationAPIKeyChangeConsumer"),
|
||||
topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent),
|
||||
durable: cfg.Matrix.JetStream.Prefixed("FederationAPIKeyChangeConsumer"),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputKeyChangeEvent),
|
||||
queues: queues,
|
||||
db: store,
|
||||
serverName: cfg.Matrix.ServerName,
|
||||
|
|
|
@ -61,7 +61,7 @@ func NewOutputRoomEventConsumer(
|
|||
queues: queues,
|
||||
rsAPI: rsAPI,
|
||||
durable: cfg.Matrix.JetStream.Durable("FederationAPIRoomServerConsumer"),
|
||||
topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputRoomEvent),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputRoomEvent),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ func NewInternalAPI(
|
|||
FailuresUntilBlacklist: cfg.FederationMaxRetries,
|
||||
}
|
||||
|
||||
js, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
|
||||
js, _ := jetstream.Prepare(base.ProcessContext, &cfg.Matrix.JetStream)
|
||||
|
||||
queues := queue.NewOutgoingQueues(
|
||||
federationDB, base.ProcessContext,
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
// Database stores information needed by the federation sender
|
||||
type Database struct {
|
||||
shared.Database
|
||||
sqlutil.PartitionOffsetStatements
|
||||
db *sql.DB
|
||||
writer sqlutil.Writer
|
||||
}
|
||||
|
@ -104,8 +103,5 @@ func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationC
|
|||
NotaryServerKeysMetadata: notaryMetadata,
|
||||
ServerSigningKeys: serverSigningKeys,
|
||||
}
|
||||
if err = d.PartitionOffsetStatements.Prepare(d.db, d.writer, "federationsender"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &d, nil
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
// Database stores information needed by the federation sender
|
||||
type Database struct {
|
||||
shared.Database
|
||||
sqlutil.PartitionOffsetStatements
|
||||
db *sql.DB
|
||||
writer sqlutil.Writer
|
||||
}
|
||||
|
@ -103,8 +102,5 @@ func NewDatabase(dbProperties *config.DatabaseOptions, cache caching.FederationC
|
|||
NotaryServerKeysMetadata: notaryKeysMetadata,
|
||||
ServerSigningKeys: serverSigningKeys,
|
||||
}
|
||||
if err = d.PartitionOffsetStatements.Prepare(d.db, d.writer, "federationsender"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &d, nil
|
||||
}
|
||||
|
|
2
go.mod
2
go.mod
|
@ -39,7 +39,7 @@ require (
|
|||
github.com/matrix-org/go-sqlite3-js v0.0.0-20210709140738-b0d1ba599a6d
|
||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220317164600-0980b7f341e0
|
||||
github.com/matrix-org/pinecone v0.0.0-20220308124038-cfde1f8054c5
|
||||
github.com/matrix-org/pinecone v0.0.0-20220323142759-6fb077377278
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
|
||||
github.com/mattn/go-sqlite3 v1.14.10
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
|
|
4
go.sum
4
go.sum
|
@ -943,8 +943,8 @@ github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 h1:ZtO5uywdd5d
|
|||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220317164600-0980b7f341e0 h1:IINbE/0jSYGb7M31StazufyIQdYWSivRlhuns3JYPOM=
|
||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20220317164600-0980b7f341e0/go.mod h1:+WF5InseAMgi1fTnU46JH39IDpEvLep0fDzx9LDf2Bo=
|
||||
github.com/matrix-org/pinecone v0.0.0-20220308124038-cfde1f8054c5 h1:7viLTiLAA2MtGKY+uf14j6TjfKvvGLAMj/qdm70jJuQ=
|
||||
github.com/matrix-org/pinecone v0.0.0-20220308124038-cfde1f8054c5/go.mod h1:r6dsL+ylE0yXe/7zh8y/Bdh6aBYI1r+u4yZni9A4iyk=
|
||||
github.com/matrix-org/pinecone v0.0.0-20220323142759-6fb077377278 h1:lRrvMMv7x1FIVW1mcBdU89lvbgAXKz6RyYR0VQTAr3E=
|
||||
github.com/matrix-org/pinecone v0.0.0-20220323142759-6fb077377278/go.mod h1:r6dsL+ylE0yXe/7zh8y/Bdh6aBYI1r+u4yZni9A4iyk=
|
||||
github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 h1:eCEHXWDv9Rm335MSuB49mFUK44bwZPFSDde3ORE3syk=
|
||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
||||
|
|
|
@ -21,4 +21,6 @@ const (
|
|||
PublicMediaPathPrefix = "/_matrix/media/"
|
||||
PublicWellKnownPrefix = "/.well-known/matrix/"
|
||||
InternalPathPrefix = "/api/"
|
||||
DendriteAdminPathPrefix = "/_dendrite/"
|
||||
SynapseAdminPathPrefix = "/_synapse/"
|
||||
)
|
||||
|
|
|
@ -1,133 +0,0 @@
|
|||
// Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sqlutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A PartitionOffset is the offset into a partition of the input log.
|
||||
type PartitionOffset struct {
|
||||
// The ID of the partition.
|
||||
Partition int32
|
||||
// The offset into the partition.
|
||||
Offset int64
|
||||
}
|
||||
|
||||
const partitionOffsetsSchema = `
|
||||
-- The offsets that the server has processed up to.
|
||||
CREATE TABLE IF NOT EXISTS ${prefix}_partition_offsets (
|
||||
-- The name of the topic.
|
||||
topic TEXT NOT NULL,
|
||||
-- The 32-bit partition ID
|
||||
partition INTEGER NOT NULL,
|
||||
-- The 64-bit offset.
|
||||
partition_offset BIGINT NOT NULL,
|
||||
UNIQUE (topic, partition)
|
||||
);
|
||||
`
|
||||
|
||||
const selectPartitionOffsetsSQL = "" +
|
||||
"SELECT partition, partition_offset FROM ${prefix}_partition_offsets WHERE topic = $1"
|
||||
|
||||
const upsertPartitionOffsetsSQL = "" +
|
||||
"INSERT INTO ${prefix}_partition_offsets (topic, partition, partition_offset) VALUES ($1, $2, $3)" +
|
||||
" ON CONFLICT (topic, partition)" +
|
||||
" DO UPDATE SET partition_offset = $3"
|
||||
|
||||
// PartitionOffsetStatements represents a set of statements that can be run on a partition_offsets table.
|
||||
type PartitionOffsetStatements struct {
|
||||
db *sql.DB
|
||||
writer Writer
|
||||
selectPartitionOffsetsStmt *sql.Stmt
|
||||
upsertPartitionOffsetStmt *sql.Stmt
|
||||
}
|
||||
|
||||
// Prepare converts the raw SQL statements into prepared statements.
|
||||
// Takes a prefix to prepend to the table name used to store the partition offsets.
|
||||
// This allows multiple components to share the same database schema.
|
||||
func (s *PartitionOffsetStatements) Prepare(db *sql.DB, writer Writer, prefix string) (err error) {
|
||||
s.db = db
|
||||
s.writer = writer
|
||||
_, err = db.Exec(strings.Replace(partitionOffsetsSchema, "${prefix}", prefix, -1))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if s.selectPartitionOffsetsStmt, err = db.Prepare(
|
||||
strings.Replace(selectPartitionOffsetsSQL, "${prefix}", prefix, -1),
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
if s.upsertPartitionOffsetStmt, err = db.Prepare(
|
||||
strings.Replace(upsertPartitionOffsetsSQL, "${prefix}", prefix, -1),
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// PartitionOffsets implements PartitionStorer
|
||||
func (s *PartitionOffsetStatements) PartitionOffsets(
|
||||
ctx context.Context, topic string,
|
||||
) ([]PartitionOffset, error) {
|
||||
return s.selectPartitionOffsets(ctx, topic)
|
||||
}
|
||||
|
||||
// SetPartitionOffset implements PartitionStorer
|
||||
func (s *PartitionOffsetStatements) SetPartitionOffset(
|
||||
ctx context.Context, topic string, partition int32, offset int64,
|
||||
) error {
|
||||
return s.upsertPartitionOffset(ctx, topic, partition, offset)
|
||||
}
|
||||
|
||||
// selectPartitionOffsets returns all the partition offsets for the given topic.
|
||||
func (s *PartitionOffsetStatements) selectPartitionOffsets(
|
||||
ctx context.Context, topic string,
|
||||
) (results []PartitionOffset, err error) {
|
||||
rows, err := s.selectPartitionOffsetsStmt.QueryContext(ctx, topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer checkNamedErr(rows.Close, &err)
|
||||
for rows.Next() {
|
||||
var offset PartitionOffset
|
||||
if err = rows.Scan(&offset.Partition, &offset.Offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, offset)
|
||||
}
|
||||
err = rows.Err()
|
||||
return results, err
|
||||
}
|
||||
|
||||
// checkNamedErr calls fn and overwrite err if it was nil and fn returned non-nil
|
||||
func checkNamedErr(fn func() error, err *error) {
|
||||
if e := fn(); e != nil && *err == nil {
|
||||
*err = e
|
||||
}
|
||||
}
|
||||
|
||||
// UpsertPartitionOffset updates or inserts the partition offset for the given topic.
|
||||
func (s *PartitionOffsetStatements) upsertPartitionOffset(
|
||||
ctx context.Context, topic string, partition int32, offset int64,
|
||||
) error {
|
||||
return s.writer.Do(s.db, nil, func(txn *sql.Tx) error {
|
||||
stmt := TxStmt(txn, s.upsertPartitionOffsetStmt)
|
||||
_, err := stmt.ExecContext(ctx, topic, partition, offset)
|
||||
return err
|
||||
})
|
||||
}
|
|
@ -16,8 +16,8 @@ var build string
|
|||
|
||||
const (
|
||||
VersionMajor = 0
|
||||
VersionMinor = 6
|
||||
VersionPatch = 5
|
||||
VersionMinor = 7
|
||||
VersionPatch = 0
|
||||
VersionTag = "" // example: "rc1"
|
||||
)
|
||||
|
||||
|
|
|
@ -223,6 +223,7 @@ func (a *KeyInternalAPI) QueryDeviceMessages(ctx context.Context, req *api.Query
|
|||
res.StreamID = maxStreamID
|
||||
}
|
||||
|
||||
// nolint:gocyclo
|
||||
func (a *KeyInternalAPI) QueryKeys(ctx context.Context, req *api.QueryKeysRequest, res *api.QueryKeysResponse) {
|
||||
res.DeviceKeys = make(map[string]map[string]json.RawMessage)
|
||||
res.MasterKeys = make(map[string]gomatrixserverlib.CrossSigningKey)
|
||||
|
|
|
@ -39,14 +39,14 @@ func AddInternalRoutes(router *mux.Router, intAPI api.KeyInternalAPI) {
|
|||
func NewInternalAPI(
|
||||
base *base.BaseDendrite, cfg *config.KeyServer, fedClient fedsenderapi.FederationClient,
|
||||
) api.KeyInternalAPI {
|
||||
js, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
|
||||
js, _ := jetstream.Prepare(base.ProcessContext, &cfg.Matrix.JetStream)
|
||||
|
||||
db, err := storage.NewDatabase(&cfg.Database)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Panicf("failed to connect to key server database")
|
||||
}
|
||||
keyChangeProducer := &producers.KeyChange{
|
||||
Topic: string(cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent)),
|
||||
Topic: string(cfg.Matrix.JetStream.Prefixed(jetstream.OutputKeyChangeEvent)),
|
||||
JetStream: js,
|
||||
DB: db,
|
||||
}
|
||||
|
|
|
@ -70,8 +70,5 @@ func NewDatabase(dbProperties *config.DatabaseOptions) (*shared.Database, error)
|
|||
CrossSigningKeysTable: csk,
|
||||
CrossSigningSigsTable: css,
|
||||
}
|
||||
if err = d.PartitionOffsetStatements.Prepare(db, d.Writer, "keyserver"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
|
|
@ -36,7 +36,6 @@ type Database struct {
|
|||
StaleDeviceListsTable tables.StaleDeviceLists
|
||||
CrossSigningKeysTable tables.CrossSigningKeys
|
||||
CrossSigningSigsTable tables.CrossSigningSigs
|
||||
sqlutil.PartitionOffsetStatements
|
||||
}
|
||||
|
||||
func (d *Database) ExistingOneTimeKeys(ctx context.Context, userID, deviceID string, keyIDsWithAlgorithms []string) (map[string]json.RawMessage, error) {
|
||||
|
|
|
@ -69,8 +69,5 @@ func NewDatabase(dbProperties *config.DatabaseOptions) (*shared.Database, error)
|
|||
CrossSigningKeysTable: csk,
|
||||
CrossSigningSigsTable: css,
|
||||
}
|
||||
if err = d.PartitionOffsetStatements.Prepare(db, d.Writer, "keyserver"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
|
|
@ -722,8 +722,8 @@ func (r *downloadRequest) fetchRemoteFile(
|
|||
|
||||
// create request for remote file
|
||||
resp, err := client.CreateMediaDownloadRequest(ctx, r.MediaMetadata.Origin, string(r.MediaMetadata.MediaID))
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
if err != nil || (resp != nil && resp.StatusCode != http.StatusOK) {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return "", false, fmt.Errorf("File with media ID %q does not exist on %s", r.MediaMetadata.MediaID, r.MediaMetadata.Origin)
|
||||
}
|
||||
return "", false, fmt.Errorf("file with media ID %q could not be downloaded from %s", r.MediaMetadata.MediaID, r.MediaMetadata.Origin)
|
||||
|
|
|
@ -102,4 +102,3 @@ func (a AliasEvent) Valid() bool {
|
|||
}
|
||||
return a.Alias == "" || validateAliasRegex.MatchString(a.Alias)
|
||||
}
|
||||
|
||||
|
|
|
@ -22,29 +22,29 @@ func TestAliasEvent_Valid(t *testing.T) {
|
|||
{
|
||||
name: "empty alias, invalid alt aliases",
|
||||
fields: fields{
|
||||
Alias: "",
|
||||
AltAliases: []string{ "%not:valid.local"},
|
||||
Alias: "",
|
||||
AltAliases: []string{"%not:valid.local"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid alias, invalid alt aliases",
|
||||
fields: fields{
|
||||
Alias: "#valid:test.local",
|
||||
AltAliases: []string{ "%not:valid.local"},
|
||||
Alias: "#valid:test.local",
|
||||
AltAliases: []string{"%not:valid.local"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty alias, invalid alt aliases",
|
||||
fields: fields{
|
||||
Alias: "",
|
||||
AltAliases: []string{ "%not:valid.local"},
|
||||
Alias: "",
|
||||
AltAliases: []string{"%not:valid.local"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid alias",
|
||||
fields: fields{
|
||||
Alias: "%not:valid.local",
|
||||
AltAliases: []string{ },
|
||||
Alias: "%not:valid.local",
|
||||
AltAliases: []string{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -173,12 +173,15 @@ func (r *RoomserverInternalAPI) RemoveRoomAlias(
|
|||
}
|
||||
|
||||
if creatorID != request.UserID {
|
||||
plEvent, err := r.DB.GetStateEvent(ctx, roomID, gomatrixserverlib.MRoomPowerLevels, "")
|
||||
var plEvent *gomatrixserverlib.HeaderedEvent
|
||||
var pls *gomatrixserverlib.PowerLevelContent
|
||||
|
||||
plEvent, err = r.DB.GetStateEvent(ctx, roomID, gomatrixserverlib.MRoomPowerLevels, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("r.DB.GetStateEvent: %w", err)
|
||||
}
|
||||
|
||||
pls, err := plEvent.PowerLevels()
|
||||
pls, err = plEvent.PowerLevels()
|
||||
if err != nil {
|
||||
return fmt.Errorf("plEvent.PowerLevels: %w", err)
|
||||
}
|
||||
|
@ -223,7 +226,7 @@ func (r *RoomserverInternalAPI) RemoveRoomAlias(
|
|||
}
|
||||
|
||||
stateRes := &api.QueryLatestEventsAndStateResponse{}
|
||||
if err := helpers.QueryLatestEventsAndState(ctx, r.DB, &api.QueryLatestEventsAndStateRequest{RoomID: roomID, StateToFetch: eventsNeeded.Tuples()}, stateRes); err != nil {
|
||||
if err = helpers.QueryLatestEventsAndState(ctx, r.DB, &api.QueryLatestEventsAndStateRequest{RoomID: roomID, StateToFetch: eventsNeeded.Tuples()}, stateRes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -90,6 +90,7 @@ func (r *RoomserverInternalAPI) SetFederationAPI(fsAPI fsAPI.FederationInternalA
|
|||
r.KeyRing = keyRing
|
||||
|
||||
r.Inputer = &input.Inputer{
|
||||
Cfg: r.Cfg,
|
||||
ProcessContext: r.ProcessContext,
|
||||
DB: r.DB,
|
||||
InputRoomEventTopic: r.InputRoomEventTopic,
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -29,6 +30,7 @@ import (
|
|||
"github.com/matrix-org/dendrite/roomserver/api"
|
||||
"github.com/matrix-org/dendrite/roomserver/internal/query"
|
||||
"github.com/matrix-org/dendrite/roomserver/storage"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/jetstream"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/matrix-org/gomatrixserverlib"
|
||||
|
@ -45,7 +47,35 @@ var keyContentFields = map[string]string{
|
|||
"m.room.member": "membership",
|
||||
}
|
||||
|
||||
// Inputer is responsible for consuming from the roomserver input
|
||||
// streams and processing the events. All input events are queued
|
||||
// into a single NATS stream and the order is preserved strictly.
|
||||
// The `room_id` message header will contain the room ID which will
|
||||
// be used to assign the pending event to a per-room worker.
|
||||
//
|
||||
// The input API maintains an ephemeral headers-only consumer. It
|
||||
// will speed through the stream working out which room IDs are
|
||||
// pending and create durable consumers for them. The durable
|
||||
// consumer will then be used for each room worker goroutine to
|
||||
// fetch events one by one and process them. Each room having a
|
||||
// durable consumer of its own means there is no head-of-line
|
||||
// blocking between rooms. Filtering ensures that each durable
|
||||
// consumer only receives events for the room it is interested in.
|
||||
//
|
||||
// The ephemeral consumer closely tracks the newest events. The
|
||||
// per-room durable consumers will only progress through the stream
|
||||
// as events are processed.
|
||||
//
|
||||
// A BC * -> positions of each consumer (* = ephemeral)
|
||||
// ⌄ ⌄⌄ ⌄
|
||||
// ABAABCAABCAA -> newest (letter = subject for each message)
|
||||
//
|
||||
// In this example, A is still processing an event but has two
|
||||
// pending events to process afterwards. Both B and C are caught
|
||||
// up, so they will do nothing until a new event comes in for B
|
||||
// or C.
|
||||
type Inputer struct {
|
||||
Cfg *config.RoomServer
|
||||
ProcessContext *process.ProcessContext
|
||||
DB storage.Database
|
||||
NATSClient *nats.Conn
|
||||
|
@ -57,147 +87,275 @@ type Inputer struct {
|
|||
ACLs *acls.ServerACLs
|
||||
InputRoomEventTopic string
|
||||
OutputRoomEventTopic string
|
||||
workers sync.Map // room ID -> *phony.Inbox
|
||||
workers sync.Map // room ID -> *worker
|
||||
|
||||
Queryer *query.Queryer
|
||||
}
|
||||
|
||||
func (r *Inputer) workerForRoom(roomID string) *phony.Inbox {
|
||||
inbox, _ := r.workers.LoadOrStore(roomID, &phony.Inbox{})
|
||||
return inbox.(*phony.Inbox)
|
||||
type worker struct {
|
||||
phony.Inbox
|
||||
sync.Mutex
|
||||
r *Inputer
|
||||
roomID string
|
||||
subscription *nats.Subscription
|
||||
}
|
||||
|
||||
// eventsInProgress is an in-memory map to keep a track of which events we have
|
||||
// queued up for processing. If we get a redelivery from NATS and we still have
|
||||
// the queued up item then we won't do anything with the redelivered message. If
|
||||
// we've restarted Dendrite and now this map is empty then it means that we will
|
||||
// reload pending work from NATS.
|
||||
var eventsInProgress sync.Map
|
||||
func (r *Inputer) startWorkerForRoom(roomID string) {
|
||||
v, loaded := r.workers.LoadOrStore(roomID, &worker{
|
||||
r: r,
|
||||
roomID: roomID,
|
||||
})
|
||||
w := v.(*worker)
|
||||
w.Lock()
|
||||
defer w.Unlock()
|
||||
if !loaded || w.subscription == nil {
|
||||
consumer := r.Cfg.Matrix.JetStream.Prefixed("RoomInput" + jetstream.Tokenise(w.roomID))
|
||||
subject := r.Cfg.Matrix.JetStream.Prefixed(jetstream.InputRoomEventSubj(w.roomID))
|
||||
|
||||
// onMessage is called when a new event arrives in the roomserver input stream.
|
||||
// Create the consumer. We do this as a specific step rather than
|
||||
// letting PullSubscribe create it for us because we need the consumer
|
||||
// to outlive the subscription. If we do it this way, we can Bind in the
|
||||
// next step, and when we Unsubscribe, the consumer continues to live. If
|
||||
// we leave PullSubscribe to create the durable consumer, Unsubscribe will
|
||||
// delete it because it thinks it "owns" it, which in turn breaks the
|
||||
// interest-based retention storage policy.
|
||||
// If the durable consumer already exists, this is effectively a no-op.
|
||||
// Another interesting tid-bit here: the ACK policy is set to "all" so that
|
||||
// if we acknowledge a message, we also acknowledge everything that comes
|
||||
// before it. This is necessary because otherwise our consumer will never
|
||||
// acknowledge things we filtered out for other subjects and therefore they
|
||||
// will linger around forever.
|
||||
if _, err := w.r.JetStream.AddConsumer(
|
||||
r.Cfg.Matrix.JetStream.Prefixed(jetstream.InputRoomEvent),
|
||||
&nats.ConsumerConfig{
|
||||
Durable: consumer,
|
||||
AckPolicy: nats.AckAllPolicy,
|
||||
DeliverPolicy: nats.DeliverAllPolicy,
|
||||
FilterSubject: subject,
|
||||
AckWait: MaximumMissingProcessingTime + (time.Second * 10),
|
||||
},
|
||||
); err != nil {
|
||||
logrus.WithError(err).Errorf("Failed to create consumer for room %q", w.roomID)
|
||||
return
|
||||
}
|
||||
|
||||
// Bind to our durable consumer. We want to receive all messages waiting
|
||||
// for this subject and we want to manually acknowledge them, so that we
|
||||
// can ensure they are only cleaned up when we are done processing them.
|
||||
sub, err := w.r.JetStream.PullSubscribe(
|
||||
subject, consumer,
|
||||
nats.ManualAck(),
|
||||
nats.DeliverAll(),
|
||||
nats.AckWait(MaximumMissingProcessingTime+(time.Second*10)),
|
||||
nats.Bind(r.InputRoomEventTopic, consumer),
|
||||
)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("Failed to subscribe to stream for room %q", w.roomID)
|
||||
return
|
||||
}
|
||||
|
||||
// Go and start pulling messages off the queue.
|
||||
w.subscription = sub
|
||||
w.Act(nil, w._next)
|
||||
}
|
||||
}
|
||||
|
||||
// Start creates an ephemeral non-durable consumer on the roomserver
|
||||
// input topic. It is configured to deliver us headers only because we
|
||||
// don't actually care about the contents of the message at this point,
|
||||
// we only care about the `room_id` field. Once a message arrives, we
|
||||
// will look to see if we have a worker for that room which has its
|
||||
// own consumer. If we don't, we'll start one.
|
||||
func (r *Inputer) Start() error {
|
||||
_, err := r.JetStream.Subscribe(
|
||||
r.InputRoomEventTopic,
|
||||
// We specifically don't use jetstream.WithJetStreamMessage here because we
|
||||
// queue the task off to a room-specific queue and the ACK needs to be sent
|
||||
// later, possibly with an error response to the inputter if synchronous.
|
||||
func(msg *nats.Msg) {
|
||||
roomID := msg.Header.Get("room_id")
|
||||
var inputRoomEvent api.InputRoomEvent
|
||||
if err := json.Unmarshal(msg.Data, &inputRoomEvent); err != nil {
|
||||
_ = msg.Term()
|
||||
return
|
||||
}
|
||||
|
||||
_ = msg.InProgress()
|
||||
index := roomID + "\000" + inputRoomEvent.Event.EventID()
|
||||
if _, ok := eventsInProgress.LoadOrStore(index, struct{}{}); ok {
|
||||
// We're already waiting to deal with this event, so there's no
|
||||
// point in queuing it up again. We've notified NATS that we're
|
||||
// working on the message still, so that will have deferred the
|
||||
// redelivery by a bit.
|
||||
return
|
||||
}
|
||||
|
||||
roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Inc()
|
||||
r.workerForRoom(roomID).Act(nil, func() {
|
||||
_ = msg.InProgress() // resets the acknowledgement wait timer
|
||||
defer eventsInProgress.Delete(index)
|
||||
defer roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Dec()
|
||||
var errString string
|
||||
if err := r.processRoomEvent(r.ProcessContext.Context(), &inputRoomEvent); err != nil {
|
||||
if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) {
|
||||
sentry.CaptureException(err)
|
||||
}
|
||||
logrus.WithError(err).WithFields(logrus.Fields{
|
||||
"room_id": roomID,
|
||||
"event_id": inputRoomEvent.Event.EventID(),
|
||||
"type": inputRoomEvent.Event.Type(),
|
||||
}).Warn("Roomserver failed to process async event")
|
||||
_ = msg.Term()
|
||||
errString = err.Error()
|
||||
} else {
|
||||
_ = msg.Ack()
|
||||
}
|
||||
if replyTo := msg.Header.Get("sync"); replyTo != "" {
|
||||
if err := r.NATSClient.Publish(replyTo, []byte(errString)); err != nil {
|
||||
logrus.WithError(err).WithFields(logrus.Fields{
|
||||
"room_id": roomID,
|
||||
"event_id": inputRoomEvent.Event.EventID(),
|
||||
"type": inputRoomEvent.Event.Type(),
|
||||
}).Warn("Roomserver failed to respond for sync event")
|
||||
}
|
||||
}
|
||||
})
|
||||
"", // This is blank because we specified it in BindStream.
|
||||
func(m *nats.Msg) {
|
||||
roomID := m.Header.Get(jetstream.RoomID)
|
||||
r.startWorkerForRoom(roomID)
|
||||
_ = m.Ack()
|
||||
},
|
||||
// NATS wants to acknowledge automatically by default when the message is
|
||||
// read from the stream, but we want to override that behaviour by making
|
||||
// sure that we only acknowledge when we're happy we've done everything we
|
||||
// can. This ensures we retry things when it makes sense to do so.
|
||||
nats.ManualAck(),
|
||||
// Use a durable named consumer.
|
||||
r.Durable,
|
||||
// If we've missed things in the stream, e.g. we restarted, then replay
|
||||
// all of the queued messages that were waiting for us.
|
||||
nats.HeadersOnly(),
|
||||
nats.DeliverAll(),
|
||||
// Ensure that NATS doesn't try to resend us something that wasn't done
|
||||
// within the period of time that we might still be processing it.
|
||||
nats.AckWait(MaximumMissingProcessingTime+(time.Second*10)),
|
||||
// It is recommended to disable this for pull consumers as per the docs:
|
||||
// https://docs.nats.io/nats-concepts/jetstream/consumers#note-about-push-and-pull-consumers
|
||||
nats.MaxAckPending(-1),
|
||||
nats.AckAll(),
|
||||
nats.BindStream(r.InputRoomEventTopic),
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
// _next is called by the worker for the room. It must only be called
|
||||
// by the actor embedded into the worker.
|
||||
func (w *worker) _next() {
|
||||
// Look up what the next event is that's waiting to be processed.
|
||||
ctx, cancel := context.WithTimeout(w.r.ProcessContext.Context(), time.Minute)
|
||||
defer cancel()
|
||||
msgs, err := w.subscription.Fetch(1, nats.Context(ctx))
|
||||
switch err {
|
||||
case nil:
|
||||
// Make sure that once we're done here, we queue up another call
|
||||
// to _next in the inbox.
|
||||
defer w.Act(nil, w._next)
|
||||
|
||||
// If no error was reported, but we didn't get exactly one message,
|
||||
// then skip over this and try again on the next iteration.
|
||||
if len(msgs) != 1 {
|
||||
return
|
||||
}
|
||||
|
||||
case context.DeadlineExceeded:
|
||||
// The context exceeded, so we've been waiting for more than a
|
||||
// minute for activity in this room. At this point we will shut
|
||||
// down the subscriber to free up resources. It'll get started
|
||||
// again if new activity happens.
|
||||
if err = w.subscription.Unsubscribe(); err != nil {
|
||||
logrus.WithError(err).Errorf("Failed to unsubscribe to stream for room %q", w.roomID)
|
||||
}
|
||||
w.Lock()
|
||||
w.subscription = nil
|
||||
w.Unlock()
|
||||
return
|
||||
|
||||
default:
|
||||
// Something went wrong while trying to fetch the next event
|
||||
// from the queue. In which case, we'll shut down the subscriber
|
||||
// and wait to be notified about new room activity again. Maybe
|
||||
// the problem will be corrected by then.
|
||||
logrus.WithError(err).Errorf("Failed to get next stream message for room %q", w.roomID)
|
||||
if err = w.subscription.Unsubscribe(); err != nil {
|
||||
logrus.WithError(err).Errorf("Failed to unsubscribe to stream for room %q", w.roomID)
|
||||
}
|
||||
w.Lock()
|
||||
w.subscription = nil
|
||||
w.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Try to unmarshal the input room event. If the JSON unmarshalling
|
||||
// fails then we'll terminate the message — this notifies NATS that
|
||||
// we are done with the message and never want to see it again.
|
||||
msg := msgs[0]
|
||||
var inputRoomEvent api.InputRoomEvent
|
||||
if err = json.Unmarshal(msg.Data, &inputRoomEvent); err != nil {
|
||||
_ = msg.Term()
|
||||
return
|
||||
}
|
||||
|
||||
roomserverInputBackpressure.With(prometheus.Labels{"room_id": w.roomID}).Inc()
|
||||
defer roomserverInputBackpressure.With(prometheus.Labels{"room_id": w.roomID}).Dec()
|
||||
|
||||
// Process the room event. If something goes wrong then we'll tell
|
||||
// NATS to terminate the message. We'll store the error result as
|
||||
// a string, because we might want to return that to the caller if
|
||||
// it was a synchronous request.
|
||||
var errString string
|
||||
if err = w.r.processRoomEvent(w.r.ProcessContext.Context(), &inputRoomEvent); err != nil {
|
||||
if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) {
|
||||
sentry.CaptureException(err)
|
||||
}
|
||||
logrus.WithError(err).WithFields(logrus.Fields{
|
||||
"room_id": w.roomID,
|
||||
"event_id": inputRoomEvent.Event.EventID(),
|
||||
"type": inputRoomEvent.Event.Type(),
|
||||
}).Warn("Roomserver failed to process async event")
|
||||
_ = msg.Term()
|
||||
errString = err.Error()
|
||||
} else {
|
||||
_ = msg.Ack()
|
||||
}
|
||||
|
||||
// If it was a synchronous input request then the "sync" field
|
||||
// will be present in the message. That means that someone is
|
||||
// waiting for a response. The temporary inbox name is present in
|
||||
// that field, so send back the error string (if any). If there
|
||||
// was no error then we'll return a blank message, which means
|
||||
// that everything was OK.
|
||||
if replyTo := msg.Header.Get("sync"); replyTo != "" {
|
||||
if err = w.r.NATSClient.Publish(replyTo, []byte(errString)); err != nil {
|
||||
logrus.WithError(err).WithFields(logrus.Fields{
|
||||
"room_id": w.roomID,
|
||||
"event_id": inputRoomEvent.Event.EventID(),
|
||||
"type": inputRoomEvent.Event.Type(),
|
||||
}).Warn("Roomserver failed to respond for sync event")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// queueInputRoomEvents queues events into the roomserver input
|
||||
// stream in NATS.
|
||||
func (r *Inputer) queueInputRoomEvents(
|
||||
ctx context.Context,
|
||||
request *api.InputRoomEventsRequest,
|
||||
) (replySub *nats.Subscription, err error) {
|
||||
// If the request is synchronous then we need to create a
|
||||
// temporary inbox to wait for responses on, and then create
|
||||
// a subscription to it. If it's asynchronous then we won't
|
||||
// bother, so these values will remain empty.
|
||||
var replyTo string
|
||||
if !request.Asynchronous {
|
||||
replyTo = nats.NewInbox()
|
||||
replySub, err = r.NATSClient.SubscribeSync(replyTo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("r.NATSClient.SubscribeSync: %w", err)
|
||||
}
|
||||
if replySub == nil {
|
||||
// This shouldn't ever happen, but it doesn't hurt to check
|
||||
// because we can potentially avoid a nil pointer panic later
|
||||
// if it did for some reason.
|
||||
return nil, fmt.Errorf("expected a subscription to the temporary inbox")
|
||||
}
|
||||
}
|
||||
|
||||
// For each event, marshal the input room event and then
|
||||
// send it into the input queue.
|
||||
for _, e := range request.InputRoomEvents {
|
||||
roomID := e.Event.RoomID()
|
||||
subj := r.Cfg.Matrix.JetStream.Prefixed(jetstream.InputRoomEventSubj(roomID))
|
||||
msg := &nats.Msg{
|
||||
Subject: subj,
|
||||
Header: nats.Header{},
|
||||
}
|
||||
msg.Header.Set("room_id", roomID)
|
||||
if replyTo != "" {
|
||||
msg.Header.Set("sync", replyTo)
|
||||
}
|
||||
msg.Data, err = json.Marshal(e)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("json.Marshal: %w", err)
|
||||
}
|
||||
if _, err = r.JetStream.PublishMsg(msg, nats.Context(ctx)); err != nil {
|
||||
logrus.WithError(err).WithFields(logrus.Fields{
|
||||
"room_id": roomID,
|
||||
"event_id": e.Event.EventID(),
|
||||
"subj": subj,
|
||||
}).Error("Roomserver failed to queue async event")
|
||||
return nil, fmt.Errorf("r.JetStream.PublishMsg: %w", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// InputRoomEvents implements api.RoomserverInternalAPI
|
||||
func (r *Inputer) InputRoomEvents(
|
||||
ctx context.Context,
|
||||
request *api.InputRoomEventsRequest,
|
||||
response *api.InputRoomEventsResponse,
|
||||
) {
|
||||
var replyTo string
|
||||
var replySub *nats.Subscription
|
||||
if !request.Asynchronous {
|
||||
var err error
|
||||
replyTo = nats.NewInbox()
|
||||
replySub, err = r.NATSClient.SubscribeSync(replyTo)
|
||||
if err != nil {
|
||||
response.ErrMsg = err.Error()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
for _, e := range request.InputRoomEvents {
|
||||
msg := &nats.Msg{
|
||||
Subject: r.InputRoomEventTopic,
|
||||
Header: nats.Header{},
|
||||
Reply: replyTo,
|
||||
}
|
||||
roomID := e.Event.RoomID()
|
||||
msg.Header.Set("room_id", roomID)
|
||||
if replyTo != "" {
|
||||
msg.Header.Set("sync", replyTo)
|
||||
}
|
||||
msg.Data, err = json.Marshal(e)
|
||||
if err != nil {
|
||||
response.ErrMsg = err.Error()
|
||||
return
|
||||
}
|
||||
if _, err = r.JetStream.PublishMsg(msg); err != nil {
|
||||
logrus.WithError(err).WithFields(logrus.Fields{
|
||||
"room_id": roomID,
|
||||
"event_id": e.Event.EventID(),
|
||||
}).Error("Roomserver failed to queue async event")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if request.Asynchronous || replySub == nil {
|
||||
// Queue up the event into the roomserver.
|
||||
replySub, err := r.queueInputRoomEvents(ctx, request)
|
||||
if err != nil {
|
||||
response.ErrMsg = err.Error()
|
||||
return
|
||||
}
|
||||
|
||||
// If we aren't waiting for synchronous responses then we can
|
||||
// give up here, there is nothing further to do.
|
||||
if replySub == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we'll want to sit and wait for the responses
|
||||
// from the roomserver. There will be one response for every
|
||||
// input we submitted. The last error value we receive will
|
||||
// be the one returned as the error string.
|
||||
defer replySub.Drain() // nolint:errcheck
|
||||
for i := 0; i < len(request.InputRoomEvents); i++ {
|
||||
msg, err := replySub.NextMsgWithContext(ctx)
|
||||
|
@ -207,7 +365,6 @@ func (r *Inputer) InputRoomEvents(
|
|||
}
|
||||
if len(msg.Data) > 0 {
|
||||
response.ErrMsg = string(msg.Data)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -212,12 +212,34 @@ func (r *Leaver) performFederatedRejectInvite(
|
|||
ServerNames: []gomatrixserverlib.ServerName{domain},
|
||||
}
|
||||
leaveRes := fsAPI.PerformLeaveResponse{}
|
||||
if err := r.FSAPI.PerformLeave(ctx, &leaveReq, &leaveRes); err != nil {
|
||||
if err = r.FSAPI.PerformLeave(ctx, &leaveReq, &leaveRes); err != nil {
|
||||
// failures in PerformLeave should NEVER stop us from telling other components like the
|
||||
// sync API that the invite was withdrawn. Otherwise we can end up with stuck invites.
|
||||
util.GetLogger(ctx).WithError(err).Errorf("failed to PerformLeave, still retiring invite event")
|
||||
}
|
||||
|
||||
info, err := r.DB.RoomInfo(ctx, req.RoomID)
|
||||
if err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Errorf("failed to get RoomInfo, still retiring invite event")
|
||||
}
|
||||
|
||||
updater, err := r.DB.MembershipUpdater(ctx, req.RoomID, req.UserID, true, info.RoomVersion)
|
||||
if err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Errorf("failed to get MembershipUpdater, still retiring invite event")
|
||||
}
|
||||
if updater != nil {
|
||||
if _, err = updater.SetToLeave(req.UserID, eventID); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Errorf("failed to set membership to leave, still retiring invite event")
|
||||
if err = updater.Rollback(); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Errorf("failed to rollback membership leave, still retiring invite event")
|
||||
}
|
||||
} else {
|
||||
if err = updater.Commit(); err != nil {
|
||||
util.GetLogger(ctx).WithError(err).Errorf("failed to commit membership update, still retiring invite event")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Withdraw the invite, so that the sync API etc are
|
||||
// notified that we rejected it.
|
||||
return []api.OutputEvent{
|
||||
|
|
|
@ -50,12 +50,12 @@ func NewInternalAPI(
|
|||
logrus.WithError(err).Panicf("failed to connect to room server db")
|
||||
}
|
||||
|
||||
js, nc := jetstream.Prepare(&cfg.Matrix.JetStream)
|
||||
js, nc := jetstream.Prepare(base.ProcessContext, &cfg.Matrix.JetStream)
|
||||
|
||||
return internal.NewRoomserverAPI(
|
||||
base.ProcessContext, cfg, roomserverDB, js, nc,
|
||||
cfg.Matrix.JetStream.TopicFor(jetstream.InputRoomEvent),
|
||||
cfg.Matrix.JetStream.TopicFor(jetstream.OutputRoomEvent),
|
||||
cfg.Matrix.JetStream.Prefixed(jetstream.InputRoomEvent),
|
||||
cfg.Matrix.JetStream.Prefixed(jetstream.OutputRoomEvent),
|
||||
base.Caches, perspectiveServerNames,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -151,7 +151,7 @@ func (s *eventStateKeyStatements) BulkSelectEventStateKey(
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer selectPrep.Close()
|
||||
defer internal.CloseAndLogIfError(ctx, selectPrep, "selectPrep.close() failed")
|
||||
stmt := sqlutil.TxStmt(txn, selectPrep)
|
||||
rows, err := stmt.QueryContext(ctx, iEventStateKeyNIDs...)
|
||||
if err != nil {
|
||||
|
|
|
@ -128,7 +128,7 @@ func (s *eventTypeStatements) BulkSelectEventTypeNID(
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer selectPrep.Close()
|
||||
defer internal.CloseAndLogIfError(ctx, selectPrep, "selectPrep.close() failed")
|
||||
stmt := sqlutil.TxStmt(txn, selectPrep)
|
||||
///////////////
|
||||
|
||||
|
|
|
@ -567,7 +567,7 @@ func (s *eventStatements) SelectMaxEventDepth(ctx context.Context, txn *sql.Tx,
|
|||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer sqlPrep.Close()
|
||||
defer internal.CloseAndLogIfError(ctx, sqlPrep, "sqlPrep.close() failed")
|
||||
err = sqlutil.TxStmt(txn, sqlPrep).QueryRowContext(ctx, iEventIDs...).Scan(&result)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("sqlutil.TxStmt.QueryRowContext: %w", err)
|
||||
|
@ -583,7 +583,7 @@ func (s *eventStatements) SelectRoomNIDsForEventNIDs(
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer sqlPrep.Close()
|
||||
defer internal.CloseAndLogIfError(ctx, sqlPrep, "sqlPrep.close() failed")
|
||||
sqlStmt := sqlutil.TxStmt(txn, sqlPrep)
|
||||
iEventNIDs := make([]interface{}, len(eventNIDs))
|
||||
for i, v := range eventNIDs {
|
||||
|
|
|
@ -75,6 +75,7 @@ type BaseDendrite struct {
|
|||
PublicMediaAPIMux *mux.Router
|
||||
PublicWellKnownAPIMux *mux.Router
|
||||
InternalAPIMux *mux.Router
|
||||
DendriteAdminMux *mux.Router
|
||||
SynapseAdminMux *mux.Router
|
||||
UseHTTPAPIs bool
|
||||
apiHttpClient *http.Client
|
||||
|
@ -207,7 +208,8 @@ func NewBaseDendrite(cfg *config.Dendrite, componentName string, options ...Base
|
|||
PublicMediaAPIMux: mux.NewRouter().SkipClean(true).PathPrefix(httputil.PublicMediaPathPrefix).Subrouter().UseEncodedPath(),
|
||||
PublicWellKnownAPIMux: mux.NewRouter().SkipClean(true).PathPrefix(httputil.PublicWellKnownPrefix).Subrouter().UseEncodedPath(),
|
||||
InternalAPIMux: mux.NewRouter().SkipClean(true).PathPrefix(httputil.InternalPathPrefix).Subrouter().UseEncodedPath(),
|
||||
SynapseAdminMux: mux.NewRouter().SkipClean(true).PathPrefix("/_synapse/").Subrouter().UseEncodedPath(),
|
||||
DendriteAdminMux: mux.NewRouter().SkipClean(true).PathPrefix(httputil.DendriteAdminPathPrefix).Subrouter().UseEncodedPath(),
|
||||
SynapseAdminMux: mux.NewRouter().SkipClean(true).PathPrefix(httputil.SynapseAdminPathPrefix).Subrouter().UseEncodedPath(),
|
||||
apiHttpClient: &apiClient,
|
||||
}
|
||||
}
|
||||
|
@ -376,6 +378,17 @@ func (b *BaseDendrite) SetupAndServeHTTP(
|
|||
internalRouter.Handle("/metrics", httputil.WrapHandlerInBasicAuth(promhttp.Handler(), b.Cfg.Global.Metrics.BasicAuth))
|
||||
}
|
||||
|
||||
b.DendriteAdminMux.HandleFunc("/monitor/up", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
})
|
||||
b.DendriteAdminMux.HandleFunc("/monitor/health", func(w http.ResponseWriter, r *http.Request) {
|
||||
if b.ProcessContext.IsDegraded() {
|
||||
w.WriteHeader(503)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(200)
|
||||
})
|
||||
|
||||
var clientHandler http.Handler
|
||||
clientHandler = b.PublicClientAPIMux
|
||||
if b.Cfg.Global.Sentry.Enabled {
|
||||
|
@ -392,12 +405,13 @@ func (b *BaseDendrite) SetupAndServeHTTP(
|
|||
})
|
||||
federationHandler = sentryHandler.Handle(b.PublicFederationAPIMux)
|
||||
}
|
||||
internalRouter.PathPrefix(httputil.DendriteAdminPathPrefix).Handler(b.DendriteAdminMux)
|
||||
externalRouter.PathPrefix(httputil.PublicClientPathPrefix).Handler(clientHandler)
|
||||
if !b.Cfg.Global.DisableFederation {
|
||||
externalRouter.PathPrefix(httputil.PublicKeyPathPrefix).Handler(b.PublicKeyAPIMux)
|
||||
externalRouter.PathPrefix(httputil.PublicFederationPathPrefix).Handler(federationHandler)
|
||||
}
|
||||
externalRouter.PathPrefix("/_synapse/").Handler(b.SynapseAdminMux)
|
||||
externalRouter.PathPrefix(httputil.SynapseAdminPathPrefix).Handler(b.SynapseAdminMux)
|
||||
externalRouter.PathPrefix(httputil.PublicMediaPathPrefix).Handler(b.PublicMediaAPIMux)
|
||||
externalRouter.PathPrefix(httputil.PublicWellKnownPrefix).Handler(b.PublicWellKnownAPIMux)
|
||||
|
||||
|
|
|
@ -209,13 +209,14 @@ func setupRegexps(asAPI *AppServiceAPI, derived *Derived) (err error) {
|
|||
for _, appservice := range derived.ApplicationServices {
|
||||
// The sender_localpart can be considered an exclusive regex for a single user, so let's do that
|
||||
// to simplify the code
|
||||
var senderUserIDSlice = []string{fmt.Sprintf("@%s:%s", appservice.SenderLocalpart, asAPI.Matrix.ServerName)}
|
||||
usersSlice, found := appservice.NamespaceMap["users"]
|
||||
users, found := appservice.NamespaceMap["users"]
|
||||
if !found {
|
||||
usersSlice = []ApplicationServiceNamespace{}
|
||||
appservice.NamespaceMap["users"] = usersSlice
|
||||
users = []ApplicationServiceNamespace{}
|
||||
}
|
||||
appendExclusiveNamespaceRegexs(&senderUserIDSlice, usersSlice)
|
||||
appservice.NamespaceMap["users"] = append(users, ApplicationServiceNamespace{
|
||||
Exclusive: true,
|
||||
Regex: regexp.QuoteMeta(fmt.Sprintf("@%s:%s", appservice.SenderLocalpart, asAPI.Matrix.ServerName)),
|
||||
})
|
||||
|
||||
for key, namespaceSlice := range appservice.NamespaceMap {
|
||||
switch key {
|
||||
|
|
|
@ -19,12 +19,12 @@ type JetStream struct {
|
|||
InMemory bool `yaml:"in_memory"`
|
||||
}
|
||||
|
||||
func (c *JetStream) TopicFor(name string) string {
|
||||
func (c *JetStream) Prefixed(name string) string {
|
||||
return fmt.Sprintf("%s%s", c.TopicPrefix, name)
|
||||
}
|
||||
|
||||
func (c *JetStream) Durable(name string) string {
|
||||
return c.TopicFor(name)
|
||||
return c.Prefixed(name)
|
||||
}
|
||||
|
||||
func (c *JetStream) Defaults(generate bool) {
|
||||
|
|
|
@ -1,11 +1,15 @@
|
|||
package jetstream
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/dendrite/setup/process"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
natsserver "github.com/nats-io/nats-server/v2/server"
|
||||
|
@ -15,10 +19,10 @@ import (
|
|||
var natsServer *natsserver.Server
|
||||
var natsServerMutex sync.Mutex
|
||||
|
||||
func Prepare(cfg *config.JetStream) (natsclient.JetStreamContext, *natsclient.Conn) {
|
||||
func Prepare(process *process.ProcessContext, cfg *config.JetStream) (natsclient.JetStreamContext, *natsclient.Conn) {
|
||||
// check if we need an in-process NATS Server
|
||||
if len(cfg.Addresses) != 0 {
|
||||
return setupNATS(cfg, nil)
|
||||
return setupNATS(process, cfg, nil)
|
||||
}
|
||||
natsServerMutex.Lock()
|
||||
if natsServer == nil {
|
||||
|
@ -35,7 +39,16 @@ func Prepare(cfg *config.JetStream) (natsclient.JetStreamContext, *natsclient.Co
|
|||
panic(err)
|
||||
}
|
||||
natsServer.ConfigureLogger()
|
||||
go natsServer.Start()
|
||||
go func() {
|
||||
process.ComponentStarted()
|
||||
natsServer.Start()
|
||||
}()
|
||||
go func() {
|
||||
<-process.WaitForShutdown()
|
||||
natsServer.Shutdown()
|
||||
natsServer.WaitForShutdown()
|
||||
process.ComponentFinished()
|
||||
}()
|
||||
}
|
||||
natsServerMutex.Unlock()
|
||||
if !natsServer.ReadyForConnections(time.Second * 10) {
|
||||
|
@ -45,10 +58,10 @@ func Prepare(cfg *config.JetStream) (natsclient.JetStreamContext, *natsclient.Co
|
|||
if err != nil {
|
||||
logrus.Fatalln("Failed to create NATS client")
|
||||
}
|
||||
return setupNATS(cfg, nc)
|
||||
return setupNATS(process, cfg, nc)
|
||||
}
|
||||
|
||||
func setupNATS(cfg *config.JetStream, nc *natsclient.Conn) (natsclient.JetStreamContext, *natsclient.Conn) {
|
||||
func setupNATS(process *process.ProcessContext, cfg *config.JetStream, nc *natsclient.Conn) (natsclient.JetStreamContext, *natsclient.Conn) {
|
||||
if nc == nil {
|
||||
var err error
|
||||
nc, err = natsclient.Connect(strings.Join(cfg.Addresses, ","))
|
||||
|
@ -65,14 +78,35 @@ func setupNATS(cfg *config.JetStream, nc *natsclient.Conn) (natsclient.JetStream
|
|||
}
|
||||
|
||||
for _, stream := range streams { // streams are defined in streams.go
|
||||
name := cfg.TopicFor(stream.Name)
|
||||
name := cfg.Prefixed(stream.Name)
|
||||
info, err := s.StreamInfo(name)
|
||||
if err != nil && err != natsclient.ErrStreamNotFound {
|
||||
logrus.WithError(err).Fatal("Unable to get stream info")
|
||||
}
|
||||
subjects := stream.Subjects
|
||||
if len(subjects) == 0 {
|
||||
// By default we want each stream to listen for the subjects
|
||||
// that are either an exact match for the stream name, or where
|
||||
// the first part of the subject is the stream name. ">" is a
|
||||
// wildcard in NATS for one or more subject tokens. In the case
|
||||
// that the stream is called "Foo", this will match any message
|
||||
// with the subject "Foo", "Foo.Bar" or "Foo.Bar.Baz" etc.
|
||||
subjects = []string{name, name + ".>"}
|
||||
}
|
||||
if info != nil {
|
||||
switch {
|
||||
case !reflect.DeepEqual(info.Config.Subjects, subjects):
|
||||
fallthrough
|
||||
case info.Config.Retention != stream.Retention:
|
||||
fallthrough
|
||||
case info.Config.Storage != stream.Storage:
|
||||
if err = s.DeleteStream(name); err != nil {
|
||||
logrus.WithError(err).Fatal("Unable to delete stream")
|
||||
}
|
||||
info = nil
|
||||
}
|
||||
}
|
||||
if info == nil {
|
||||
stream.Subjects = []string{name}
|
||||
|
||||
// If we're trying to keep everything in memory (e.g. unit tests)
|
||||
// then overwrite the storage policy.
|
||||
if cfg.InMemory {
|
||||
|
@ -83,8 +117,42 @@ func setupNATS(cfg *config.JetStream, nc *natsclient.Conn) (natsclient.JetStream
|
|||
// array, otherwise we end up with namespaces on namespaces.
|
||||
namespaced := *stream
|
||||
namespaced.Name = name
|
||||
namespaced.Subjects = subjects
|
||||
if _, err = s.AddStream(&namespaced); err != nil {
|
||||
logrus.WithError(err).WithField("stream", name).Fatal("Unable to add stream")
|
||||
logger := logrus.WithError(err).WithFields(logrus.Fields{
|
||||
"stream": namespaced.Name,
|
||||
"subjects": namespaced.Subjects,
|
||||
})
|
||||
|
||||
// If the stream was supposed to be in-memory to begin with
|
||||
// then an error here is fatal so we'll give up.
|
||||
if namespaced.Storage == natsclient.MemoryStorage {
|
||||
logger.WithError(err).Fatal("Unable to add in-memory stream")
|
||||
}
|
||||
|
||||
// The stream was supposed to be on disk. Let's try starting
|
||||
// Dendrite with the stream in-memory instead. That'll mean that
|
||||
// we can't recover anything that was queued on the disk but we
|
||||
// will still be able to start and run hopefully in the meantime.
|
||||
logger.WithError(err).Error("Unable to add stream")
|
||||
sentry.CaptureException(fmt.Errorf("Unable to add stream %q: %w", namespaced.Name, err))
|
||||
|
||||
namespaced.Storage = natsclient.MemoryStorage
|
||||
if _, err = s.AddStream(&namespaced); err != nil {
|
||||
// We tried to add the stream in-memory instead but something
|
||||
// went wrong. That's an unrecoverable situation so we will
|
||||
// give up at this point.
|
||||
logger.WithError(err).Fatal("Unable to add in-memory stream")
|
||||
}
|
||||
|
||||
if stream.Storage != namespaced.Storage {
|
||||
// We've managed to add the stream in memory. What's on the
|
||||
// disk will be left alone, but our ability to recover from a
|
||||
// future crash will be limited. Yell about it.
|
||||
sentry.CaptureException(fmt.Errorf("Stream %q is running in-memory; this may be due to data corruption in the JetStream storage directory, investigate as soon as possible", namespaced.Name))
|
||||
logrus.Warn("Stream is running in-memory; this may be due to data corruption in the JetStream storage directory, investigate as soon as possible")
|
||||
process.Degraded()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package jetstream
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/nats.go"
|
||||
|
@ -24,10 +26,20 @@ var (
|
|||
OutputReadUpdate = "OutputReadUpdate"
|
||||
)
|
||||
|
||||
var safeCharacters = regexp.MustCompile("[^A-Za-z0-9$]+")
|
||||
|
||||
func Tokenise(str string) string {
|
||||
return safeCharacters.ReplaceAllString(str, "_")
|
||||
}
|
||||
|
||||
func InputRoomEventSubj(roomID string) string {
|
||||
return fmt.Sprintf("%s.%s", InputRoomEvent, Tokenise(roomID))
|
||||
}
|
||||
|
||||
var streams = []*nats.StreamConfig{
|
||||
{
|
||||
Name: InputRoomEvent,
|
||||
Retention: nats.WorkQueuePolicy,
|
||||
Retention: nats.InterestPolicy,
|
||||
Storage: nats.FileStorage,
|
||||
},
|
||||
{
|
||||
|
|
|
@ -57,7 +57,7 @@ type Monolith struct {
|
|||
// AddAllPublicRoutes attaches all public paths to the given router
|
||||
func (m *Monolith) AddAllPublicRoutes(process *process.ProcessContext, csMux, ssMux, keyMux, wkMux, mediaMux, synapseMux *mux.Router) {
|
||||
clientapi.AddPublicRoutes(
|
||||
csMux, synapseMux, &m.Config.ClientAPI, m.AccountDB,
|
||||
process, csMux, synapseMux, &m.Config.ClientAPI,
|
||||
m.FedClient, m.RoomserverAPI,
|
||||
m.EDUInternalAPI, m.AppserviceAPI, transactions.New(),
|
||||
m.FederationAPI, m.UserAPI, m.KeyAPI,
|
||||
|
|
|
@ -283,11 +283,7 @@ func (w *walker) walk() util.JSONResponse {
|
|||
if !roomExists {
|
||||
// attempt to query this room over federation, as either we've never heard of it before
|
||||
// or we've left it and hence are not authorised (but info may be exposed regardless)
|
||||
fedRes, err := w.federatedRoomInfo(rv.roomID, rv.vias)
|
||||
if err != nil {
|
||||
util.GetLogger(w.ctx).WithError(err).WithField("room_id", rv.roomID).Errorf("failed to query federated spaces")
|
||||
continue
|
||||
}
|
||||
fedRes := w.federatedRoomInfo(rv.roomID, rv.vias)
|
||||
if fedRes != nil {
|
||||
discoveredChildEvents = fedRes.Room.ChildrenState
|
||||
discoveredRooms = append(discoveredRooms, fedRes.Room)
|
||||
|
@ -420,15 +416,15 @@ func (w *walker) publicRoomsChunk(roomID string) *gomatrixserverlib.PublicRoom {
|
|||
|
||||
// federatedRoomInfo returns more of the spaces graph from another server. Returns nil if this was
|
||||
// unsuccessful.
|
||||
func (w *walker) federatedRoomInfo(roomID string, vias []string) (*gomatrixserverlib.MSC2946SpacesResponse, error) {
|
||||
func (w *walker) federatedRoomInfo(roomID string, vias []string) *gomatrixserverlib.MSC2946SpacesResponse {
|
||||
// only do federated requests for client requests
|
||||
if w.caller == nil {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
resp, ok := w.cache.GetSpaceSummary(roomID)
|
||||
if ok {
|
||||
util.GetLogger(w.ctx).Debugf("Returning cached response for %s", roomID)
|
||||
return &resp, nil
|
||||
return &resp
|
||||
}
|
||||
util.GetLogger(w.ctx).Debugf("Querying %s via %+v", roomID, vias)
|
||||
ctx := context.Background()
|
||||
|
@ -455,9 +451,9 @@ func (w *walker) federatedRoomInfo(roomID string, vias []string) (*gomatrixserve
|
|||
}
|
||||
w.cache.StoreSpaceSummary(roomID, res)
|
||||
|
||||
return &res, nil
|
||||
return &res
|
||||
}
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) roomExists(roomID string) bool {
|
||||
|
@ -717,23 +713,6 @@ func stripped(ev *gomatrixserverlib.Event) *gomatrixserverlib.MSC2946StrippedEve
|
|||
}
|
||||
}
|
||||
|
||||
func eventKey(event *gomatrixserverlib.MSC2946StrippedEvent) string {
|
||||
return event.RoomID + "|" + event.Type + "|" + event.StateKey
|
||||
}
|
||||
|
||||
func spaceTargetStripped(event *gomatrixserverlib.MSC2946StrippedEvent) string {
|
||||
if event.StateKey == "" {
|
||||
return "" // no-op
|
||||
}
|
||||
switch event.Type {
|
||||
case ConstSpaceParentEventType:
|
||||
return event.StateKey
|
||||
case ConstSpaceChildEventType:
|
||||
return event.StateKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func parseInt(intstr string, defaultVal int) int {
|
||||
i, err := strconv.ParseInt(intstr, 10, 32)
|
||||
if err != nil {
|
||||
|
|
|
@ -2,13 +2,19 @@ package process
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
type ProcessContext struct {
|
||||
wg *sync.WaitGroup // used to wait for components to shutdown
|
||||
ctx context.Context // cancelled when Stop is called
|
||||
shutdown context.CancelFunc // shut down Dendrite
|
||||
degraded atomic.Bool
|
||||
}
|
||||
|
||||
func NewProcessContext() *ProcessContext {
|
||||
|
@ -43,3 +49,14 @@ func (b *ProcessContext) WaitForShutdown() <-chan struct{} {
|
|||
func (b *ProcessContext) WaitForComponentsToFinish() {
|
||||
b.wg.Wait()
|
||||
}
|
||||
|
||||
func (b *ProcessContext) Degraded() {
|
||||
if b.degraded.CAS(false, true) {
|
||||
logrus.Warn("Dendrite is running in a degraded state")
|
||||
sentry.CaptureException(fmt.Errorf("Process is running in a degraded state"))
|
||||
}
|
||||
}
|
||||
|
||||
func (b *ProcessContext) IsDegraded() bool {
|
||||
return b.degraded.Load()
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ func NewOutputClientDataConsumer(
|
|||
return &OutputClientDataConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputClientData),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputClientData),
|
||||
durable: cfg.Matrix.JetStream.Durable("SyncAPIClientAPIConsumer"),
|
||||
db: store,
|
||||
notifier: notifier,
|
||||
|
|
|
@ -62,7 +62,7 @@ func NewOutputReceiptEventConsumer(
|
|||
return &OutputReceiptEventConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputReceiptEvent),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputReceiptEvent),
|
||||
durable: cfg.Matrix.JetStream.Durable("SyncAPIEDUServerReceiptConsumer"),
|
||||
db: store,
|
||||
notifier: notifier,
|
||||
|
|
|
@ -57,7 +57,7 @@ func NewOutputSendToDeviceEventConsumer(
|
|||
return &OutputSendToDeviceEventConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputSendToDeviceEvent),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputSendToDeviceEvent),
|
||||
durable: cfg.Matrix.JetStream.Durable("SyncAPIEDUServerSendToDeviceConsumer"),
|
||||
db: store,
|
||||
serverName: cfg.Matrix.ServerName,
|
||||
|
|
|
@ -56,7 +56,7 @@ func NewOutputTypingEventConsumer(
|
|||
return &OutputTypingEventConsumer{
|
||||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputTypingEvent),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputTypingEvent),
|
||||
durable: cfg.Matrix.JetStream.Durable("SyncAPIEDUServerTypingConsumer"),
|
||||
eduCache: eduCache,
|
||||
notifier: notifier,
|
||||
|
|
|
@ -65,7 +65,7 @@ func NewOutputRoomEventConsumer(
|
|||
ctx: process.Context(),
|
||||
cfg: cfg,
|
||||
jetstream: js,
|
||||
topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputRoomEvent),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputRoomEvent),
|
||||
durable: cfg.Matrix.JetStream.Durable("SyncAPIRoomServerConsumer"),
|
||||
db: store,
|
||||
notifier: notifier,
|
||||
|
|
|
@ -56,7 +56,7 @@ func NewOutputNotificationDataConsumer(
|
|||
ctx: process.Context(),
|
||||
jetstream: js,
|
||||
durable: cfg.Matrix.JetStream.Durable("SyncAPINotificationDataConsumer"),
|
||||
topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputNotificationData),
|
||||
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputNotificationData),
|
||||
db: store,
|
||||
notifier: notifier,
|
||||
stream: stream,
|
||||
|
|
|
@ -41,7 +41,6 @@ type messagesReq struct {
|
|||
roomID string
|
||||
from *types.TopologyToken
|
||||
to *types.TopologyToken
|
||||
fromStream *types.StreamingToken
|
||||
device *userapi.Device
|
||||
wasToProvided bool
|
||||
backwardOrdering bool
|
||||
|
@ -50,7 +49,7 @@ type messagesReq struct {
|
|||
|
||||
type messagesResp struct {
|
||||
Start string `json:"start"`
|
||||
StartStream string `json:"start_stream,omitempty"` // NOTSPEC: so clients can hit /messages then immediately /sync with a latest sync token
|
||||
StartStream string `json:"start_stream,omitempty"` // NOTSPEC: used by Cerulean, so clients can hit /messages then immediately /sync with a latest sync token
|
||||
End string `json:"end"`
|
||||
Chunk []gomatrixserverlib.ClientEvent `json:"chunk"`
|
||||
State []gomatrixserverlib.ClientEvent `json:"state"`
|
||||
|
@ -93,6 +92,7 @@ func OnIncomingMessagesRequest(
|
|||
// Pagination tokens.
|
||||
var fromStream *types.StreamingToken
|
||||
fromQuery := req.URL.Query().Get("from")
|
||||
toQuery := req.URL.Query().Get("to")
|
||||
emptyFromSupplied := fromQuery == ""
|
||||
if emptyFromSupplied {
|
||||
// NOTSPEC: We will pretend they used the latest sync token if no ?from= was provided.
|
||||
|
@ -101,18 +101,6 @@ func OnIncomingMessagesRequest(
|
|||
fromQuery = currPos.String()
|
||||
}
|
||||
|
||||
from, err := types.NewTopologyTokenFromString(fromQuery)
|
||||
if err != nil {
|
||||
fs, err2 := types.NewStreamTokenFromString(fromQuery)
|
||||
fromStream = &fs
|
||||
if err2 != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.InvalidArgumentValue("Invalid from parameter: " + err2.Error()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Direction to return events from.
|
||||
dir := req.URL.Query().Get("dir")
|
||||
if dir != "b" && dir != "f" {
|
||||
|
@ -125,16 +113,43 @@ func OnIncomingMessagesRequest(
|
|||
// to have one of the two accepted values (so dir == "f" <=> !backwardOrdering).
|
||||
backwardOrdering := (dir == "b")
|
||||
|
||||
from, err := types.NewTopologyTokenFromString(fromQuery)
|
||||
if err != nil {
|
||||
var streamToken types.StreamingToken
|
||||
if streamToken, err = types.NewStreamTokenFromString(fromQuery); err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.InvalidArgumentValue("Invalid from parameter: " + err.Error()),
|
||||
}
|
||||
} else {
|
||||
fromStream = &streamToken
|
||||
from, err = db.StreamToTopologicalPosition(req.Context(), roomID, streamToken.PDUPosition, backwardOrdering)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("Failed to get topological position for streaming token %v", streamToken)
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pagination tokens. To is optional, and its default value depends on the
|
||||
// direction ("b" or "f").
|
||||
var to types.TopologyToken
|
||||
wasToProvided := true
|
||||
if s := req.URL.Query().Get("to"); len(s) > 0 {
|
||||
to, err = types.NewTopologyTokenFromString(s)
|
||||
if len(toQuery) > 0 {
|
||||
to, err = types.NewTopologyTokenFromString(toQuery)
|
||||
if err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.InvalidArgumentValue("Invalid to parameter: " + err.Error()),
|
||||
var streamToken types.StreamingToken
|
||||
if streamToken, err = types.NewStreamTokenFromString(toQuery); err != nil {
|
||||
return util.JSONResponse{
|
||||
Code: http.StatusBadRequest,
|
||||
JSON: jsonerror.InvalidArgumentValue("Invalid to parameter: " + err.Error()),
|
||||
}
|
||||
} else {
|
||||
to, err = db.StreamToTopologicalPosition(req.Context(), roomID, streamToken.PDUPosition, !backwardOrdering)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("Failed to get topological position for streaming token %v", streamToken)
|
||||
return jsonerror.InternalServerError()
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -168,7 +183,6 @@ func OnIncomingMessagesRequest(
|
|||
roomID: roomID,
|
||||
from: &from,
|
||||
to: &to,
|
||||
fromStream: fromStream,
|
||||
wasToProvided: wasToProvided,
|
||||
filter: filter,
|
||||
backwardOrdering: backwardOrdering,
|
||||
|
@ -215,7 +229,7 @@ func OnIncomingMessagesRequest(
|
|||
End: end.String(),
|
||||
State: state,
|
||||
}
|
||||
if emptyFromSupplied {
|
||||
if fromStream != nil {
|
||||
res.StartStream = fromStream.String()
|
||||
}
|
||||
|
||||
|
@ -251,17 +265,9 @@ func (r *messagesReq) retrieveEvents() (
|
|||
eventFilter := r.filter
|
||||
|
||||
// Retrieve the events from the local database.
|
||||
var streamEvents []types.StreamEvent
|
||||
if r.fromStream != nil {
|
||||
toStream := r.to.StreamToken()
|
||||
streamEvents, err = r.db.GetEventsInStreamingRange(
|
||||
r.ctx, r.fromStream, &toStream, r.roomID, eventFilter, r.backwardOrdering,
|
||||
)
|
||||
} else {
|
||||
streamEvents, err = r.db.GetEventsInTopologicalRange(
|
||||
r.ctx, r.from, r.to, r.roomID, eventFilter.Limit, r.backwardOrdering,
|
||||
)
|
||||
}
|
||||
streamEvents, err := r.db.GetEventsInTopologicalRange(
|
||||
r.ctx, r.from, r.to, r.roomID, eventFilter.Limit, r.backwardOrdering,
|
||||
)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("GetEventsInRange: %w", err)
|
||||
return
|
||||
|
|
|
@ -103,8 +103,6 @@ type Database interface {
|
|||
// DeletePeek deletes all peeks for a given room by a given user
|
||||
// Returns an error if there was a problem communicating with the database.
|
||||
DeletePeeks(ctx context.Context, RoomID, UserID string) (types.StreamPosition, error)
|
||||
// GetEventsInStreamingRange retrieves all of the events on a given ordering using the given extremities and limit.
|
||||
GetEventsInStreamingRange(ctx context.Context, from, to *types.StreamingToken, roomID string, eventFilter *gomatrixserverlib.RoomEventFilter, backwardOrdering bool) (events []types.StreamEvent, err error)
|
||||
// GetEventsInTopologicalRange retrieves all of the events on a given ordering using the given extremities and limit.
|
||||
GetEventsInTopologicalRange(ctx context.Context, from, to *types.TopologyToken, roomID string, limit int, backwardOrdering bool) (events []types.StreamEvent, err error)
|
||||
// EventPositionInTopology returns the depth and stream position of the given event.
|
||||
|
@ -149,4 +147,6 @@ type Database interface {
|
|||
SelectContextEvent(ctx context.Context, roomID, eventID string) (int, gomatrixserverlib.HeaderedEvent, error)
|
||||
SelectContextBeforeEvent(ctx context.Context, id int, roomID string, filter *gomatrixserverlib.RoomEventFilter) ([]*gomatrixserverlib.HeaderedEvent, error)
|
||||
SelectContextAfterEvent(ctx context.Context, id int, roomID string, filter *gomatrixserverlib.RoomEventFilter) (int, []*gomatrixserverlib.HeaderedEvent, error)
|
||||
|
||||
StreamToTopologicalPosition(ctx context.Context, roomID string, streamPos types.StreamPosition, backwardOrdering bool) (types.TopologyToken, error)
|
||||
}
|
||||
|
|
|
@ -472,7 +472,7 @@ func (s *outputRoomEventsStatements) SelectContextBeforeEvent(
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "rows.close() failed")
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
|
@ -504,7 +504,7 @@ func (s *outputRoomEventsStatements) SelectContextAfterEvent(
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "rows.close() failed")
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
|
|
|
@ -51,7 +51,7 @@ const selectEventIDsInRangeASCSQL = "" +
|
|||
"SELECT event_id FROM syncapi_output_room_events_topology" +
|
||||
" WHERE room_id = $1 AND (" +
|
||||
"(topological_position > $2 AND topological_position < $3) OR" +
|
||||
"(topological_position = $4 AND stream_position <= $5)" +
|
||||
"(topological_position = $4 AND stream_position >= $5)" +
|
||||
") ORDER BY topological_position ASC, stream_position ASC LIMIT $6"
|
||||
|
||||
const selectEventIDsInRangeDESCSQL = "" +
|
||||
|
@ -76,13 +76,21 @@ const selectMaxPositionInTopologySQL = "" +
|
|||
const deleteTopologyForRoomSQL = "" +
|
||||
"DELETE FROM syncapi_output_room_events_topology WHERE room_id = $1"
|
||||
|
||||
const selectStreamToTopologicalPositionAscSQL = "" +
|
||||
"SELECT topological_position FROM syncapi_output_room_events_topology WHERE room_id = $1 AND stream_position >= $2 ORDER BY topological_position ASC LIMIT 1;"
|
||||
|
||||
const selectStreamToTopologicalPositionDescSQL = "" +
|
||||
"SELECT topological_position FROM syncapi_output_room_events_topology WHERE room_id = $1 AND stream_position <= $2 ORDER BY topological_position DESC LIMIT 1;"
|
||||
|
||||
type outputRoomEventsTopologyStatements struct {
|
||||
insertEventInTopologyStmt *sql.Stmt
|
||||
selectEventIDsInRangeASCStmt *sql.Stmt
|
||||
selectEventIDsInRangeDESCStmt *sql.Stmt
|
||||
selectPositionInTopologyStmt *sql.Stmt
|
||||
selectMaxPositionInTopologyStmt *sql.Stmt
|
||||
deleteTopologyForRoomStmt *sql.Stmt
|
||||
insertEventInTopologyStmt *sql.Stmt
|
||||
selectEventIDsInRangeASCStmt *sql.Stmt
|
||||
selectEventIDsInRangeDESCStmt *sql.Stmt
|
||||
selectPositionInTopologyStmt *sql.Stmt
|
||||
selectMaxPositionInTopologyStmt *sql.Stmt
|
||||
deleteTopologyForRoomStmt *sql.Stmt
|
||||
selectStreamToTopologicalPositionAscStmt *sql.Stmt
|
||||
selectStreamToTopologicalPositionDescStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewPostgresTopologyTable(db *sql.DB) (tables.Topology, error) {
|
||||
|
@ -109,6 +117,12 @@ func NewPostgresTopologyTable(db *sql.DB) (tables.Topology, error) {
|
|||
if s.deleteTopologyForRoomStmt, err = db.Prepare(deleteTopologyForRoomSQL); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.selectStreamToTopologicalPositionAscStmt, err = db.Prepare(selectStreamToTopologicalPositionAscSQL); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.selectStreamToTopologicalPositionDescStmt, err = db.Prepare(selectStreamToTopologicalPositionDescSQL); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
|
@ -170,6 +184,19 @@ func (s *outputRoomEventsTopologyStatements) SelectPositionInTopology(
|
|||
return
|
||||
}
|
||||
|
||||
// SelectStreamToTopologicalPosition returns the closest position of a given event
|
||||
// in the topology of the room it belongs to from the given stream position.
|
||||
func (s *outputRoomEventsTopologyStatements) SelectStreamToTopologicalPosition(
|
||||
ctx context.Context, txn *sql.Tx, roomID string, streamPos types.StreamPosition, backwardOrdering bool,
|
||||
) (topoPos types.StreamPosition, err error) {
|
||||
if backwardOrdering {
|
||||
err = s.selectStreamToTopologicalPositionDescStmt.QueryRowContext(ctx, roomID, streamPos).Scan(&topoPos)
|
||||
} else {
|
||||
err = s.selectStreamToTopologicalPositionAscStmt.QueryRowContext(ctx, roomID, streamPos).Scan(&topoPos)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *outputRoomEventsTopologyStatements) SelectMaxPositionInTopology(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) (pos types.StreamPosition, spos types.StreamPosition, err error) {
|
||||
|
|
|
@ -32,7 +32,6 @@ type SyncServerDatasource struct {
|
|||
shared.Database
|
||||
db *sql.DB
|
||||
writer sqlutil.Writer
|
||||
sqlutil.PartitionOffsetStatements
|
||||
}
|
||||
|
||||
// NewDatabase creates a new sync server database
|
||||
|
@ -43,9 +42,6 @@ func NewDatabase(dbProperties *config.DatabaseOptions) (*SyncServerDatasource, e
|
|||
return nil, err
|
||||
}
|
||||
d.writer = sqlutil.NewDummyWriter()
|
||||
if err = d.PartitionOffsetStatements.Prepare(d.db, d.writer, "syncapi"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accountData, err := NewPostgresAccountDataTable(d.db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -155,37 +155,6 @@ func (d *Database) Events(ctx context.Context, eventIDs []string) ([]*gomatrixse
|
|||
return d.StreamEventsToEvents(nil, streamEvents), nil
|
||||
}
|
||||
|
||||
// GetEventsInStreamingRange retrieves all of the events on a given ordering using the
|
||||
// given extremities and limit.
|
||||
func (d *Database) GetEventsInStreamingRange(
|
||||
ctx context.Context,
|
||||
from, to *types.StreamingToken,
|
||||
roomID string, eventFilter *gomatrixserverlib.RoomEventFilter,
|
||||
backwardOrdering bool,
|
||||
) (events []types.StreamEvent, err error) {
|
||||
r := types.Range{
|
||||
From: from.PDUPosition,
|
||||
To: to.PDUPosition,
|
||||
Backwards: backwardOrdering,
|
||||
}
|
||||
if backwardOrdering {
|
||||
// When using backward ordering, we want the most recent events first.
|
||||
if events, _, err = d.OutputEvents.SelectRecentEvents(
|
||||
ctx, nil, roomID, r, eventFilter, false, false,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// When using forward ordering, we want the least recent events first.
|
||||
if events, err = d.OutputEvents.SelectEarlyEvents(
|
||||
ctx, nil, roomID, r, eventFilter,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return events, err
|
||||
}
|
||||
|
||||
func (d *Database) AllJoinedUsersInRooms(ctx context.Context) (map[string][]string, error) {
|
||||
return d.CurrentRoomState.SelectJoinedUsers(ctx)
|
||||
}
|
||||
|
@ -513,6 +482,26 @@ func (d *Database) EventPositionInTopology(
|
|||
return types.TopologyToken{Depth: depth, PDUPosition: stream}, nil
|
||||
}
|
||||
|
||||
func (d *Database) StreamToTopologicalPosition(
|
||||
ctx context.Context, roomID string, streamPos types.StreamPosition, backwardOrdering bool,
|
||||
) (types.TopologyToken, error) {
|
||||
topoPos, err := d.Topology.SelectStreamToTopologicalPosition(ctx, nil, roomID, streamPos, backwardOrdering)
|
||||
switch {
|
||||
case err == sql.ErrNoRows && backwardOrdering: // no events in range, going backward
|
||||
return types.TopologyToken{PDUPosition: streamPos}, nil
|
||||
case err == sql.ErrNoRows && !backwardOrdering: // no events in range, going forward
|
||||
topoPos, streamPos, err = d.Topology.SelectMaxPositionInTopology(ctx, nil, roomID)
|
||||
if err != nil {
|
||||
return types.TopologyToken{}, fmt.Errorf("d.Topology.SelectMaxPositionInTopology: %w", err)
|
||||
}
|
||||
return types.TopologyToken{Depth: topoPos, PDUPosition: streamPos}, nil
|
||||
case err != nil: // some other error happened
|
||||
return types.TopologyToken{}, fmt.Errorf("d.Topology.SelectStreamToTopologicalPosition: %w", err)
|
||||
default:
|
||||
return types.TopologyToken{Depth: topoPos, PDUPosition: streamPos}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Database) GetFilter(
|
||||
ctx context.Context, localpart string, filterID string,
|
||||
) (*gomatrixserverlib.Filter, error) {
|
||||
|
|
|
@ -514,7 +514,7 @@ func (s *outputRoomEventsStatements) SelectContextBeforeEvent(
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "rows.close() failed")
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
|
@ -550,7 +550,7 @@ func (s *outputRoomEventsStatements) SelectContextAfterEvent(
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
defer internal.CloseAndLogIfError(ctx, rows, "rows.close() failed")
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
|
|
|
@ -47,7 +47,7 @@ const selectEventIDsInRangeASCSQL = "" +
|
|||
"SELECT event_id FROM syncapi_output_room_events_topology" +
|
||||
" WHERE room_id = $1 AND (" +
|
||||
"(topological_position > $2 AND topological_position < $3) OR" +
|
||||
"(topological_position = $4 AND stream_position <= $5)" +
|
||||
"(topological_position = $4 AND stream_position >= $5)" +
|
||||
") ORDER BY topological_position ASC, stream_position ASC LIMIT $6"
|
||||
|
||||
const selectEventIDsInRangeDESCSQL = "" +
|
||||
|
@ -65,17 +65,22 @@ const selectMaxPositionInTopologySQL = "" +
|
|||
"SELECT MAX(topological_position), stream_position FROM syncapi_output_room_events_topology" +
|
||||
" WHERE room_id = $1 ORDER BY stream_position DESC"
|
||||
|
||||
const deleteTopologyForRoomSQL = "" +
|
||||
"DELETE FROM syncapi_output_room_events_topology WHERE room_id = $1"
|
||||
const selectStreamToTopologicalPositionAscSQL = "" +
|
||||
"SELECT topological_position FROM syncapi_output_room_events_topology WHERE room_id = $1 AND stream_position >= $2 ORDER BY topological_position ASC LIMIT 1;"
|
||||
|
||||
const selectStreamToTopologicalPositionDescSQL = "" +
|
||||
"SELECT topological_position FROM syncapi_output_room_events_topology WHERE room_id = $1 AND stream_position <= $2 ORDER BY topological_position DESC LIMIT 1;"
|
||||
|
||||
type outputRoomEventsTopologyStatements struct {
|
||||
db *sql.DB
|
||||
insertEventInTopologyStmt *sql.Stmt
|
||||
selectEventIDsInRangeASCStmt *sql.Stmt
|
||||
selectEventIDsInRangeDESCStmt *sql.Stmt
|
||||
selectPositionInTopologyStmt *sql.Stmt
|
||||
selectMaxPositionInTopologyStmt *sql.Stmt
|
||||
deleteTopologyForRoomStmt *sql.Stmt
|
||||
db *sql.DB
|
||||
insertEventInTopologyStmt *sql.Stmt
|
||||
selectEventIDsInRangeASCStmt *sql.Stmt
|
||||
selectEventIDsInRangeDESCStmt *sql.Stmt
|
||||
selectPositionInTopologyStmt *sql.Stmt
|
||||
selectMaxPositionInTopologyStmt *sql.Stmt
|
||||
deleteTopologyForRoomStmt *sql.Stmt
|
||||
selectStreamToTopologicalPositionAscStmt *sql.Stmt
|
||||
selectStreamToTopologicalPositionDescStmt *sql.Stmt
|
||||
}
|
||||
|
||||
func NewSqliteTopologyTable(db *sql.DB) (tables.Topology, error) {
|
||||
|
@ -101,7 +106,10 @@ func NewSqliteTopologyTable(db *sql.DB) (tables.Topology, error) {
|
|||
if s.selectMaxPositionInTopologyStmt, err = db.Prepare(selectMaxPositionInTopologySQL); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.deleteTopologyForRoomStmt, err = db.Prepare(deleteTopologyForRoomSQL); err != nil {
|
||||
if s.selectStreamToTopologicalPositionAscStmt, err = db.Prepare(selectStreamToTopologicalPositionAscSQL); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.selectStreamToTopologicalPositionDescStmt, err = db.Prepare(selectStreamToTopologicalPositionDescSQL); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
|
@ -163,6 +171,19 @@ func (s *outputRoomEventsTopologyStatements) SelectPositionInTopology(
|
|||
return
|
||||
}
|
||||
|
||||
// SelectStreamToTopologicalPosition returns the closest position of a given event
|
||||
// in the topology of the room it belongs to from the given stream position.
|
||||
func (s *outputRoomEventsTopologyStatements) SelectStreamToTopologicalPosition(
|
||||
ctx context.Context, txn *sql.Tx, roomID string, streamPos types.StreamPosition, backwardOrdering bool,
|
||||
) (topoPos types.StreamPosition, err error) {
|
||||
if backwardOrdering {
|
||||
err = s.selectStreamToTopologicalPositionDescStmt.QueryRowContext(ctx, roomID, streamPos).Scan(&topoPos)
|
||||
} else {
|
||||
err = s.selectStreamToTopologicalPositionAscStmt.QueryRowContext(ctx, roomID, streamPos).Scan(&topoPos)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *outputRoomEventsTopologyStatements) SelectMaxPositionInTopology(
|
||||
ctx context.Context, txn *sql.Tx, roomID string,
|
||||
) (pos types.StreamPosition, spos types.StreamPosition, err error) {
|
||||
|
|
|
@ -28,9 +28,8 @@ import (
|
|||
// both the database for PDUs and caches for EDUs.
|
||||
type SyncServerDatasource struct {
|
||||
shared.Database
|
||||
db *sql.DB
|
||||
writer sqlutil.Writer
|
||||
sqlutil.PartitionOffsetStatements
|
||||
db *sql.DB
|
||||
writer sqlutil.Writer
|
||||
streamID streamIDStatements
|
||||
}
|
||||
|
||||
|
@ -50,9 +49,6 @@ func NewDatabase(dbProperties *config.DatabaseOptions) (*SyncServerDatasource, e
|
|||
}
|
||||
|
||||
func (d *SyncServerDatasource) prepare(dbProperties *config.DatabaseOptions) (err error) {
|
||||
if err = d.PartitionOffsetStatements.Prepare(d.db, d.writer, "syncapi"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = d.streamID.prepare(d.db); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -87,6 +87,8 @@ type Topology interface {
|
|||
SelectMaxPositionInTopology(ctx context.Context, txn *sql.Tx, roomID string) (depth types.StreamPosition, spos types.StreamPosition, err error)
|
||||
// DeleteTopologyForRoom removes all topological information for a room. This should only be done when removing the room entirely.
|
||||
DeleteTopologyForRoom(ctx context.Context, txn *sql.Tx, roomID string) (err error)
|
||||
// SelectStreamToTopologicalPosition converts a stream position to a topological position by finding the nearest topological position in the room.
|
||||
SelectStreamToTopologicalPosition(ctx context.Context, txn *sql.Tx, roomID string, streamPos types.StreamPosition, forward bool) (topoPos types.StreamPosition, err error)
|
||||
}
|
||||
|
||||
type CurrentRoomState interface {
|
||||
|
|
|
@ -147,7 +147,6 @@ func (p *PDUStreamProvider) IncrementalSync(
|
|||
To: to,
|
||||
Backwards: from > to,
|
||||
}
|
||||
newPos = to
|
||||
|
||||
var err error
|
||||
var stateDeltas []types.StateDelta
|
||||
|
@ -172,14 +171,26 @@ func (p *PDUStreamProvider) IncrementalSync(
|
|||
req.Rooms[roomID] = gomatrixserverlib.Join
|
||||
}
|
||||
|
||||
if len(stateDeltas) == 0 {
|
||||
return to
|
||||
}
|
||||
|
||||
newPos = from
|
||||
for _, delta := range stateDeltas {
|
||||
if err = p.addRoomDeltaToResponse(ctx, req.Device, r, delta, &eventFilter, req.Response); err != nil {
|
||||
var pos types.StreamPosition
|
||||
if pos, err = p.addRoomDeltaToResponse(ctx, req.Device, r, delta, &eventFilter, req.Response); err != nil {
|
||||
req.Log.WithError(err).Error("d.addRoomDeltaToResponse failed")
|
||||
return newPos
|
||||
return to
|
||||
}
|
||||
switch {
|
||||
case r.Backwards && pos < newPos:
|
||||
fallthrough
|
||||
case !r.Backwards && pos > newPos:
|
||||
newPos = pos
|
||||
}
|
||||
}
|
||||
|
||||
return r.To
|
||||
return newPos
|
||||
}
|
||||
|
||||
func (p *PDUStreamProvider) addRoomDeltaToResponse(
|
||||
|
@ -189,7 +200,7 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
|
|||
delta types.StateDelta,
|
||||
eventFilter *gomatrixserverlib.RoomEventFilter,
|
||||
res *types.Response,
|
||||
) error {
|
||||
) (types.StreamPosition, error) {
|
||||
if delta.MembershipPos > 0 && delta.Membership == gomatrixserverlib.Leave {
|
||||
// make sure we don't leak recent events after the leave event.
|
||||
// TODO: History visibility makes this somewhat complex to handle correctly. For example:
|
||||
|
@ -204,19 +215,42 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
|
|||
eventFilter, true, true,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
return r.From, err
|
||||
}
|
||||
recentEvents := p.DB.StreamEventsToEvents(device, recentStreamEvents)
|
||||
delta.StateEvents = removeDuplicates(delta.StateEvents, recentEvents) // roll back
|
||||
prevBatch, err := p.DB.GetBackwardTopologyPos(ctx, recentStreamEvents)
|
||||
if err != nil {
|
||||
return err
|
||||
return r.From, err
|
||||
}
|
||||
|
||||
// XXX: should we ever get this far if we have no recent events or state in this room?
|
||||
// in practice we do for peeks, but possibly not joins?
|
||||
// If we didn't return any events at all then don't bother doing anything else.
|
||||
if len(recentEvents) == 0 && len(delta.StateEvents) == 0 {
|
||||
return nil
|
||||
return r.To, nil
|
||||
}
|
||||
|
||||
// Sort the events so that we can pick out the latest events from both sections.
|
||||
recentEvents = gomatrixserverlib.HeaderedReverseTopologicalOrdering(recentEvents, gomatrixserverlib.TopologicalOrderByPrevEvents)
|
||||
delta.StateEvents = gomatrixserverlib.HeaderedReverseTopologicalOrdering(delta.StateEvents, gomatrixserverlib.TopologicalOrderByAuthEvents)
|
||||
|
||||
// Work out what the highest stream position is for all of the events in this
|
||||
// room that were returned.
|
||||
latestPosition := r.To
|
||||
updateLatestPosition := func(mostRecentEventID string) {
|
||||
if _, pos, err := p.DB.PositionInTopology(ctx, mostRecentEventID); err == nil {
|
||||
switch {
|
||||
case r.Backwards && pos > latestPosition:
|
||||
fallthrough
|
||||
case !r.Backwards && pos < latestPosition:
|
||||
latestPosition = pos
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(recentEvents) > 0 {
|
||||
updateLatestPosition(recentEvents[len(recentEvents)-1].EventID())
|
||||
}
|
||||
if len(delta.StateEvents) > 0 {
|
||||
updateLatestPosition(delta.StateEvents[len(delta.StateEvents)-1].EventID())
|
||||
}
|
||||
|
||||
switch delta.Membership {
|
||||
|
@ -250,7 +284,7 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
|
|||
res.Rooms.Leave[delta.RoomID] = *lr
|
||||
}
|
||||
|
||||
return nil
|
||||
return latestPosition, nil
|
||||
}
|
||||
|
||||
func (p *PDUStreamProvider) getJoinResponseForCompleteSync(
|
||||
|
|
|
@ -49,7 +49,7 @@ func AddPublicRoutes(
|
|||
federation *gomatrixserverlib.FederationClient,
|
||||
cfg *config.SyncAPI,
|
||||
) {
|
||||
js, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
|
||||
js, _ := jetstream.Prepare(process, &cfg.Matrix.JetStream)
|
||||
|
||||
syncDB, err := storage.NewSyncServerDatasource(&cfg.Database)
|
||||
if err != nil {
|
||||
|
@ -67,18 +67,18 @@ func AddPublicRoutes(
|
|||
|
||||
userAPIStreamEventProducer := &producers.UserAPIStreamEventProducer{
|
||||
JetStream: js,
|
||||
Topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputStreamEvent),
|
||||
Topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputStreamEvent),
|
||||
}
|
||||
|
||||
userAPIReadUpdateProducer := &producers.UserAPIReadProducer{
|
||||
JetStream: js,
|
||||
Topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputReadUpdate),
|
||||
Topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputReadUpdate),
|
||||
}
|
||||
|
||||
_ = userAPIReadUpdateProducer
|
||||
|
||||
keyChangeConsumer := consumers.NewOutputKeyChangeEventConsumer(
|
||||
process, cfg, cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent),
|
||||
process, cfg, cfg.Matrix.JetStream.Prefixed(jetstream.OutputKeyChangeEvent),
|
||||
js, keyAPI, rsAPI, syncDB, notifier,
|
||||
streams.DeviceListStreamProvider,
|
||||
)
|
||||
|
|
|
@ -239,7 +239,6 @@ Inbound federation can query room alias directory
|
|||
Outbound federation can query v2 /send_join
|
||||
Inbound federation can receive v2 /send_join
|
||||
Message history can be paginated
|
||||
Getting messages going forward is limited for a departed room (SPEC-216)
|
||||
Backfill works correctly with history visibility set to joined
|
||||
Guest user cannot call /events globally
|
||||
Guest users can join guest_access rooms
|
||||
|
|
|
@ -27,16 +27,16 @@ import (
|
|||
// UserInternalAPI is the internal API for information about users and devices.
|
||||
type UserInternalAPI interface {
|
||||
LoginTokenInternalAPI
|
||||
UserProfileAPI
|
||||
UserRegisterAPI
|
||||
UserAccountAPI
|
||||
UserThreePIDAPI
|
||||
|
||||
InputAccountData(ctx context.Context, req *InputAccountDataRequest, res *InputAccountDataResponse) error
|
||||
|
||||
PerformAccountCreation(ctx context.Context, req *PerformAccountCreationRequest, res *PerformAccountCreationResponse) error
|
||||
PerformPasswordUpdate(ctx context.Context, req *PerformPasswordUpdateRequest, res *PerformPasswordUpdateResponse) error
|
||||
PerformDeviceCreation(ctx context.Context, req *PerformDeviceCreationRequest, res *PerformDeviceCreationResponse) error
|
||||
PerformDeviceDeletion(ctx context.Context, req *PerformDeviceDeletionRequest, res *PerformDeviceDeletionResponse) error
|
||||
PerformLastSeenUpdate(ctx context.Context, req *PerformLastSeenUpdateRequest, res *PerformLastSeenUpdateResponse) error
|
||||
PerformDeviceUpdate(ctx context.Context, req *PerformDeviceUpdateRequest, res *PerformDeviceUpdateResponse) error
|
||||
PerformAccountDeactivation(ctx context.Context, req *PerformAccountDeactivationRequest, res *PerformAccountDeactivationResponse) error
|
||||
PerformOpenIDTokenCreation(ctx context.Context, req *PerformOpenIDTokenCreationRequest, res *PerformOpenIDTokenCreationResponse) error
|
||||
PerformKeyBackup(ctx context.Context, req *PerformKeyBackupRequest, res *PerformKeyBackupResponse) error
|
||||
PerformPusherSet(ctx context.Context, req *PerformPusherSetRequest, res *struct{}) error
|
||||
|
@ -44,12 +44,10 @@ type UserInternalAPI interface {
|
|||
PerformPushRulesPut(ctx context.Context, req *PerformPushRulesPutRequest, res *struct{}) error
|
||||
|
||||
QueryKeyBackup(ctx context.Context, req *QueryKeyBackupRequest, res *QueryKeyBackupResponse)
|
||||
QueryProfile(ctx context.Context, req *QueryProfileRequest, res *QueryProfileResponse) error
|
||||
QueryAccessToken(ctx context.Context, req *QueryAccessTokenRequest, res *QueryAccessTokenResponse) error
|
||||
QueryDevices(ctx context.Context, req *QueryDevicesRequest, res *QueryDevicesResponse) error
|
||||
QueryAccountData(ctx context.Context, req *QueryAccountDataRequest, res *QueryAccountDataResponse) error
|
||||
QueryDeviceInfos(ctx context.Context, req *QueryDeviceInfosRequest, res *QueryDeviceInfosResponse) error
|
||||
QuerySearchProfiles(ctx context.Context, req *QuerySearchProfilesRequest, res *QuerySearchProfilesResponse) error
|
||||
QueryOpenIDToken(ctx context.Context, req *QueryOpenIDTokenRequest, res *QueryOpenIDTokenResponse) error
|
||||
QueryPushers(ctx context.Context, req *QueryPushersRequest, res *QueryPushersResponse) error
|
||||
QueryPushRules(ctx context.Context, req *QueryPushRulesRequest, res *QueryPushRulesResponse) error
|
||||
|
@ -61,6 +59,37 @@ type UserInternalAPI interface {
|
|||
UpdateServerNoticeRoomID(ctx context.Context, req *UpdateServerNoticeRoomRequest, res *UpdateServerNoticeRoomResponse) (err error)
|
||||
}
|
||||
|
||||
// UserProfileAPI provides functions for getting user profiles
|
||||
type UserProfileAPI interface {
|
||||
QueryProfile(ctx context.Context, req *QueryProfileRequest, res *QueryProfileResponse) error
|
||||
QuerySearchProfiles(ctx context.Context, req *QuerySearchProfilesRequest, res *QuerySearchProfilesResponse) error
|
||||
SetAvatarURL(ctx context.Context, req *PerformSetAvatarURLRequest, res *PerformSetAvatarURLResponse) error
|
||||
SetDisplayName(ctx context.Context, req *PerformUpdateDisplayNameRequest, res *struct{}) error
|
||||
}
|
||||
|
||||
// UserRegisterAPI defines functions for registering accounts
|
||||
type UserRegisterAPI interface {
|
||||
QueryNumericLocalpart(ctx context.Context, res *QueryNumericLocalpartResponse) error
|
||||
QueryAccountAvailability(ctx context.Context, req *QueryAccountAvailabilityRequest, res *QueryAccountAvailabilityResponse) error
|
||||
PerformAccountCreation(ctx context.Context, req *PerformAccountCreationRequest, res *PerformAccountCreationResponse) error
|
||||
PerformDeviceCreation(ctx context.Context, req *PerformDeviceCreationRequest, res *PerformDeviceCreationResponse) error
|
||||
}
|
||||
|
||||
// UserAccountAPI defines functions for changing an account
|
||||
type UserAccountAPI interface {
|
||||
PerformPasswordUpdate(ctx context.Context, req *PerformPasswordUpdateRequest, res *PerformPasswordUpdateResponse) error
|
||||
PerformAccountDeactivation(ctx context.Context, req *PerformAccountDeactivationRequest, res *PerformAccountDeactivationResponse) error
|
||||
QueryAccountByPassword(ctx context.Context, req *QueryAccountByPasswordRequest, res *QueryAccountByPasswordResponse) error
|
||||
}
|
||||
|
||||
// UserThreePIDAPI defines functions for 3PID
|
||||
type UserThreePIDAPI interface {
|
||||
QueryLocalpartForThreePID(ctx context.Context, req *QueryLocalpartForThreePIDRequest, res *QueryLocalpartForThreePIDResponse) error
|
||||
QueryThreePIDsForLocalpart(ctx context.Context, req *QueryThreePIDsForLocalpartRequest, res *QueryThreePIDsForLocalpartResponse) error
|
||||
PerformForgetThreePID(ctx context.Context, req *PerformForgetThreePIDRequest, res *struct{}) error
|
||||
PerformSaveThreePIDAssociation(ctx context.Context, req *PerformSaveThreePIDAssociationRequest, res *struct{}) error
|
||||
}
|
||||
|
||||
type PerformKeyBackupRequest struct {
|
||||
UserID string
|
||||
Version string // optional if modifying a key backup
|
||||
|
@ -559,3 +588,55 @@ type Notification struct {
|
|||
RoomID string `json:"room_id"` // Required.
|
||||
TS gomatrixserverlib.Timestamp `json:"ts"` // Required.
|
||||
}
|
||||
|
||||
type PerformSetAvatarURLRequest struct {
|
||||
Localpart, AvatarURL string
|
||||
}
|
||||
type PerformSetAvatarURLResponse struct{}
|
||||
|
||||
type QueryNumericLocalpartResponse struct {
|
||||
ID int64
|
||||
}
|
||||
|
||||
type QueryAccountAvailabilityRequest struct {
|
||||
Localpart string
|
||||
}
|
||||
|
||||
type QueryAccountAvailabilityResponse struct {
|
||||
Available bool
|
||||
}
|
||||
|
||||
type QueryAccountByPasswordRequest struct {
|
||||
Localpart, PlaintextPassword string
|
||||
}
|
||||
|
||||
type QueryAccountByPasswordResponse struct {
|
||||
Account *Account
|
||||
Exists bool
|
||||
}
|
||||
|
||||
type PerformUpdateDisplayNameRequest struct {
|
||||
Localpart, DisplayName string
|
||||
}
|
||||
|
||||
type QueryLocalpartForThreePIDRequest struct {
|
||||
ThreePID, Medium string
|
||||
}
|
||||
|
||||
type QueryLocalpartForThreePIDResponse struct {
|
||||
Localpart string
|
||||
}
|
||||
|
||||
type QueryThreePIDsForLocalpartRequest struct {
|
||||
Localpart string
|
||||
}
|
||||
|
||||
type QueryThreePIDsForLocalpartResponse struct {
|
||||
ThreePIDs []authtypes.ThreePID
|
||||
}
|
||||
|
||||
type PerformForgetThreePIDRequest QueryLocalpartForThreePIDRequest
|
||||
|
||||
type PerformSaveThreePIDAssociationRequest struct {
|
||||
ThreePID, Localpart, Medium string
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue