Compare commits

..

11 commits

Author SHA1 Message Date
Matthew Hodgson b265937559 switch to sqlite3_js 2020-01-19 03:55:06 +00:00
Neil Alexander a4fdb6fe2d Rethink transactions a lot 2020-01-14 18:46:42 +00:00
Neil Alexander 49ff4d6d8f All sorts of debugging information and tweaks - still no joy 2020-01-14 15:17:14 +00:00
Neil Alexander 7a7be4f0ac Fix state block schema a bit 2020-01-13 14:53:03 +00:00
Neil Alexander 8bb8642560 Separate out INSERT/SELECT statements in place of RETURNING in SQLite 2020-01-10 18:28:07 +00:00
Neil Alexander 3852f5c714 Merge branch 'master' into neilalexander/sqlite-roomserver 2020-01-10 12:31:22 +00:00
Neil Alexander 30057b399b Fix typo 2020-01-09 17:44:21 +00:00
Neil Alexander c40850d218 Merge branch 'master' into neilalexander/sqlite-roomserver 2020-01-09 17:21:31 +00:00
Neil Alexander 6c64aa0685 Merge branch 'master' into neilalexander/sqlite-roomserver 2020-01-09 17:11:36 +00:00
Neil Alexander 7a71a59dc7 Merge branch 'master' into neilalexander/sqlite-roomserver 2020-01-09 17:05:44 +00:00
Neil Alexander c7000f343e Some SQLite support for roomserver 2020-01-09 16:50:11 +00:00
966 changed files with 25966 additions and 139296 deletions

View file

@ -1,2 +0,0 @@
bin
*.wasm

18
.editorconfig Normal file
View file

@ -0,0 +1,18 @@
root = true
[*]
charset = utf-8
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
[*.go]
indent_style = tab
indent_size = 4
[*.md]
trim_trailing_whitespace = false
[*.{yml,yaml}]
indent_style = space

View file

@ -1,59 +0,0 @@
on:
push:
tags:
- 'v*'
env:
GHCR_NAMESPACE: sigb.us
PLATFORMS: linux/amd64
FORGEJO_USER: signaryk
jobs:
monolith:
name: Monolith image
runs-on: docker
container:
image: ghcr.io/catthehacker/ubuntu:act-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Get release tag & build flags
if: github.event_name == 'release' # Only for GitHub releases
run: |
echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to sigb.us container registry
uses: docker/login-action@v3
with:
registry: git.sigb.us
username: ${{ env.FORGEJO_USER }}
password: ${{ secrets.FORGEJO_TOKEN }}
- name: Build main monolith image
id: docker_build_monolith
uses: docker/build-push-action@v3
with:
context: .
platforms: ${{ env.PLATFORMS }}
push: true
tags: |
git.sigb.us/${{ env.GHCR_NAMESPACE }}/dendrite:${{ github.ref_name }}
git.sigb.us/${{ env.GHCR_NAMESPACE }}/dendrite:latest
git.sigb.us/${{ env.GHCR_NAMESPACE }}/dendrite:devel
- name: Build release monolith image
if: github.event_name == 'release' # Only for GitHub releases
id: docker_build_monolith_release
uses: docker/build-push-action@v3
with:
context: .
platforms: ${{ env.PLATFORMS }}
push: true
tags: |
git.sigb.us/${{ env.GHCR_NAMESPACE }}/dendrite:latest
git.sigb.us/${{ env.GHCR_NAMESPACE }}/dendrite:stable
git.sigb.us/${{ env.GHCR_NAMESPACE }}/dendrite:${{ env.RELEASE_VERSION }}

1
.github/CODEOWNERS vendored
View file

@ -1 +0,0 @@
* @matrix-org/dendrite-core

4
.github/FUNDING.yml vendored
View file

@ -1,4 +0,0 @@
# One username per supported platform and one custom link
patreon: matrixdotorg
liberapay: matrixdotorg
custom: https://paypal.me/matrixdotorg

View file

@ -1,67 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
---
<!--
All bug reports must provide the following background information
Text between <!-- and --> marks will be invisible in the report.
IF YOUR ISSUE IS CONSIDERED A SECURITY VULNERABILITY THEN PLEASE STOP
AND DO NOT POST IT AS A GITHUB ISSUE! Please report the issue responsibly by
disclosing in private by email to security@matrix.org instead. For more details, please
see: https://www.matrix.org/security-disclosure-policy/
-->
### Background information
<!-- Please include versions of all software when known e.g database versions, docker versions, client versions -->
- **Dendrite version or git SHA**:
- **SQLite3 or Postgres?**:
- **Running in Docker?**:
- **`go version`**:
- **Client used (if applicable)**:
### Description
- **What** is the problem:
- **Who** is affected:
- **How** is this bug manifesting:
- **When** did this first appear:
<!--
Examples of good descriptions:
- What: "I cannot log in, getting HTTP 500 responses"
- Who: "Clients on my server"
- How: "Errors in the logs saying 500 internal server error"
- When: "After upgrading to 0.3.0"
- What: "Dendrite ran out of memory"
- Who: "Server admin"
- How: "Lots of logs about device change updates"
- When: "After my server joined Matrix HQ"
Examples of bad descriptions:
- What: "Can't send messages" - This is bad because it isn't specfic enough. Which endpoint isn't working and what is the response code? Does the message send but encryption fail?
- Who: "Me" - Who are you? Running the server or a user on a Dendrite server?
- How: "Can't send messages" - Same as "What".
- When: "1 day ago" - It's impossible to know what changed 1 day ago without further input.
-->
### Steps to reproduce
<!-- Please try reproducing this bug before submitting it. Issues which cannot be reproduced risk being closed. -->
- list the steps
- that reproduce the bug
- using hyphens as bullet points
<!--
Describe how what happens differs from what you expected.
If you can identify any relevant log snippets from server logs, please include
those (please be careful to remove any personal or private data). Please surround them with
``` (three backticks, on a line on their own), so that they are formatted legibly.
Alternatively, please send logs to @kegan:matrix.org, @s7evink:matrix.org or @devonh:one.ems.host
with a link to the respective Github issue, thanks!
-->

View file

@ -1,14 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
---
<!--
Please do not open feature requests for missing parts of the Matrix specification.
We are tracking those features under https://github.com/matrix-org/dendrite/issues?q=is%3Aissue+is%3Aopen+label%3Aare-we-synapse-yet
-->
**Description:**
<!-- Describe here the feature you are requesting. -->

View file

@ -1,8 +1,6 @@
### Pull Request Checklist
<!-- Please read https://matrix-org.github.io/dendrite/development/contributing before submitting your pull request -->
<!-- Please read CONTRIBUTING.md before submitting your pull request -->
* [ ] I have added Go unit tests or [Complement integration tests](https://github.com/matrix-org/complement) for this PR _or_ I have justified why this PR doesn't need tests
* [ ] Pull request includes a [sign off below using a legally identifiable name](https://matrix-org.github.io/dendrite/development/contributing#sign-off) _or_ I have already signed off privately
Signed-off-by: `Your Name <your@email.example.org>`
* [ ] I have added any new tests that need to pass to `testfile` as specified in [docs/sytest.md](https://github.com/matrix-org/dendrite/blob/master/docs/sytest.md)
* [ ] Pull request includes a [sign off](https://github.com/matrix-org/dendrite/blob/master/CONTRIBUTING.md#sign-off)

20
.github/codecov.yaml vendored
View file

@ -1,20 +0,0 @@
flag_management:
default_rules:
carryforward: true
coverage:
status:
project:
default:
target: auto
threshold: 0.1%
base: auto
flags:
- unittests
patch:
default:
target: 75%
threshold: 0%
base: auto
flags:
- unittests

View file

@ -1,497 +0,0 @@
name: Dendrite
on:
push:
branches:
- main
paths:
- '**.go' # only execute on changes to go files
- 'go.sum' # or dependency updates
- '.github/workflows/**' # or workflow changes
pull_request:
paths:
- '**.go'
- 'go.sum' # or dependency updates
- '.github/workflows/**'
release:
types: [published]
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
wasm:
name: WASM build test
timeout-minutes: 5
runs-on: ubuntu-latest
if: ${{ false }} # disable for now
steps:
- uses: actions/checkout@v4
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: "stable"
cache: true
- name: Install Node
uses: actions/setup-node@v2
with:
node-version: 14
- uses: actions/cache@v4
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Reconfigure Git to use HTTPS auth for repo packages
run: >
git config --global url."https://github.com/".insteadOf
ssh://git@github.com/
- name: Install test dependencies
working-directory: ./test/wasm
run: npm ci
- name: Test
run: ./test-dendritejs.sh
# Run golangci-lint
lint:
timeout-minutes: 5
name: Linting
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install libolm
run: sudo apt-get install libolm-dev libolm3
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: "stable"
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
# run go test with different go versions
test:
timeout-minutes: 10
name: Unit tests
runs-on: ubuntu-latest
# Service containers to run with `container-job`
services:
# Label used to access the service container
postgres:
# Docker Hub image
image: postgres:13-alpine
# Provide the password for postgres
env:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: dendrite
ports:
# Maps tcp port 5432 on service container to the host
- 5432:5432
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v4
- name: Install libolm
run: sudo apt-get install libolm-dev libolm3
- name: Setup go
uses: actions/setup-go@v4
with:
go-version: "stable"
- uses: actions/cache@v4
# manually set up caches, as they otherwise clash with different steps using setup-go with cache=true
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-stable-unit-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-stable-unit-
- name: Set up gotestfmt
uses: gotesttools/gotestfmt-action@v2
with:
# Optional: pass GITHUB_TOKEN to avoid rate limiting.
token: ${{ secrets.GITHUB_TOKEN }}
- run: go test -json -v ./... 2>&1 | gotestfmt -hide all
env:
POSTGRES_HOST: localhost
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: dendrite
# build Dendrite for linux with different architectures and go versions
build:
name: Build for Linux
timeout-minutes: 10
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
goos: ["linux"]
goarch: ["amd64", "386"]
steps:
- uses: actions/checkout@v4
- name: Setup go
uses: actions/setup-go@v4
with:
go-version: "stable"
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-stable-${{ matrix.goos }}-${{ matrix.goarch }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
key: ${{ runner.os }}-go-stable-${{ matrix.goos }}-${{ matrix.goarch }}-
- name: Install dependencies x86
if: ${{ matrix.goarch == '386' }}
run: sudo apt update && sudo apt-get install -y gcc-multilib
- env:
GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goarch }}
CGO_ENABLED: 1
CGO_CFLAGS: -fno-stack-protector
run: go build -trimpath -v -o "bin/" ./cmd/...
# build for Windows 64-bit
build_windows:
name: Build for Windows
timeout-minutes: 10
runs-on: ubuntu-latest
strategy:
matrix:
goos: ["windows"]
goarch: ["amd64"]
steps:
- uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: "stable"
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-stable-${{ matrix.goos }}-${{ matrix.goarch }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
key: ${{ runner.os }}-go-stable-${{ matrix.goos }}-${{ matrix.goarch }}-
- name: Install dependencies
run: sudo apt update && sudo apt install -y gcc-mingw-w64-x86-64 # install required gcc
- env:
GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goarch }}
CGO_ENABLED: 1
CC: "/usr/bin/x86_64-w64-mingw32-gcc"
run: go build -trimpath -v -o "bin/" ./cmd/...
# Dummy step to gate other tests on without repeating the whole list
initial-tests-done:
name: Initial tests passed
needs: [lint, test, build, build_windows]
runs-on: ubuntu-latest
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
steps:
- name: Check initial tests passed
uses: re-actors/alls-green@release/v1
with:
jobs: ${{ toJSON(needs) }}
# run go test with different go versions
integration:
timeout-minutes: 20
needs: initial-tests-done
name: Integration tests
runs-on: ubuntu-latest
# Service containers to run with `container-job`
services:
# Label used to access the service container
postgres:
# Docker Hub image
image: postgres:13-alpine
# Provide the password for postgres
env:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: dendrite
ports:
# Maps tcp port 5432 on service container to the host
- 5432:5432
# Set health checks to wait until postgres has started
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v4
- name: Install libolm
run: sudo apt-get install libolm-dev libolm3
- name: Setup go
uses: actions/setup-go@v4
with:
go-version: "stable"
- name: Set up gotestfmt
uses: gotesttools/gotestfmt-action@v2
with:
# Optional: pass GITHUB_TOKEN to avoid rate limiting.
token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-stable-test-race-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-stable-test-race-
- run: go test -race -json -v -coverpkg=./... -coverprofile=cover.out $(go list ./... | grep -v /cmd/dendrite*) 2>&1 | gotestfmt -hide all
env:
POSTGRES_HOST: localhost
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: dendrite
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
with:
flags: unittests
fail_ci_if_error: true
token: ${{ secrets.CODECOV_TOKEN }}
# run database upgrade tests
upgrade_test:
name: Upgrade tests
timeout-minutes: 20
needs: initial-tests-done
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup go
uses: actions/setup-go@v4
with:
go-version: "stable"
cache: true
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-upgrade-test-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-upgrade-test-
- name: Docker version
run: docker version
- name: Build upgrade-tests
run: go build ./cmd/dendrite-upgrade-tests
- name: Test upgrade (PostgreSQL)
run: ./dendrite-upgrade-tests --head .
- name: Test upgrade (SQLite)
run: ./dendrite-upgrade-tests --sqlite --head .
# run database upgrade tests, skipping over one version
upgrade_test_direct:
name: Upgrade tests from HEAD-2
timeout-minutes: 20
needs: initial-tests-done
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup go
uses: actions/setup-go@v4
with:
go-version: "stable"
cache: true
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-upgrade-direct-test-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-upgrade-direct-test-
- name: Docker version
run: docker version
- name: Build upgrade-tests
run: go build ./cmd/dendrite-upgrade-tests
- name: Test upgrade (PostgreSQL)
run: ./dendrite-upgrade-tests -direct -from HEAD-2 --head .
- name: Test upgrade (SQLite)
run: ./dendrite-upgrade-tests -direct -from HEAD-2 --head .
# run Sytest in different variations
sytest:
timeout-minutes: 20
needs: initial-tests-done
name: "Sytest (${{ matrix.label }})"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- label: SQLite native
- label: SQLite Cgo
cgo: 1
- label: PostgreSQL
postgres: postgres
container:
image: matrixdotorg/sytest-dendrite
volumes:
- ${{ github.workspace }}:/src
- /root/.cache/go-build:/github/home/.cache/go-build
- /root/.cache/go-mod:/gopath/pkg/mod
env:
POSTGRES: ${{ matrix.postgres && 1}}
SYTEST_BRANCH: ${{ github.head_ref }}
CGO_ENABLED: ${{ matrix.cgo && 1 }}
steps:
- uses: actions/checkout@v4
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
/gopath/pkg/mod
key: ${{ runner.os }}-go-sytest-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-sytest-
- name: Run Sytest
run: /bootstrap.sh dendrite
working-directory: /src
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Sytest List Maintenance
if: ${{ always() }}
run: /src/show-expected-fail-tests.sh /logs/results.tap /src/sytest-whitelist /src/sytest-blacklist
continue-on-error: true # not fatal
- name: Are We Synapse Yet?
if: ${{ always() }}
run: /src/are-we-synapse-yet.py /logs/results.tap -v
continue-on-error: true # not fatal
- name: Upload Sytest logs
uses: actions/upload-artifact@v4
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (Dendrite, ${{ join(matrix.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
# run Complement
complement:
name: "Complement (${{ matrix.label }})"
timeout-minutes: 20
needs: initial-tests-done
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- label: SQLite native
cgo: 0
- label: SQLite Cgo
cgo: 1
- label: PostgreSQL
postgres: Postgres
cgo: 0
steps:
# Env vars are set file a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on env to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
- name: "Set Go Version"
run: |
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
echo "~/go/bin" >> $GITHUB_PATH
- name: "Install Complement Dependencies"
# We don't need to install Go because it is included on the Ubuntu 20.04 image:
# See https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md specifically GOROOT_1_17_X64
run: |
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
- name: Run actions/checkout@v4 for dendrite
uses: actions/checkout@v4
with:
path: dendrite
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to main.
- name: Checkout complement
shell: bash
run: |
mkdir -p complement
# Attempt to use the version of complement which best matches the current
# build. Depending on whether this is a PR or release, etc. we need to
# use different fallbacks.
#
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
# for pull requests, otherwise GITHUB_REF).
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
# (GITHUB_BASE_REF for pull requests).
# 3. Use the default complement branch ("master").
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "master"; do
# Skip empty branch names and merge commits.
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
continue
fi
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done
# Build initial Dendrite image
- run: docker build --build-arg=CGO=${{ matrix.cgo }} -t complement-dendrite:${{ matrix.postgres }}${{ matrix.cgo }} -f build/scripts/Complement${{ matrix.postgres }}.Dockerfile .
working-directory: dendrite
env:
DOCKER_BUILDKIT: 1
# Run Complement
- run: |
set -o pipefail &&
go test -v -json -tags dendrite_blacklist ./tests ./tests/csapi 2>&1 | gotestfmt -hide all
shell: bash
name: Run Complement Tests
env:
COMPLEMENT_BASE_IMAGE: complement-dendrite:${{ matrix.postgres }}${{ matrix.cgo }}
COMPLEMENT_SHARE_ENV_PREFIX: COMPLEMENT_DENDRITE_
working-directory: complement
integration-tests-done:
name: Integration tests passed
needs:
[
initial-tests-done,
upgrade_test,
upgrade_test_direct,
sytest,
complement,
integration
]
runs-on: ubuntu-latest
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
steps:
- name: Check integration tests passed
uses: re-actors/alls-green@release/v1
with:
jobs: ${{ toJSON(needs) }}
update-docker-images:
name: Update Docker images
permissions:
packages: write
contents: read
security-events: write # To upload Trivy sarif files
if: github.repository == 'matrix-org/dendrite' && github.ref_name == 'main'
needs: [integration-tests-done]
uses: matrix-org/dendrite/.github/workflows/docker.yml@main
secrets:
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}

View file

@ -1,213 +0,0 @@
# Based on https://github.com/docker/build-push-action
name: "Docker"
on:
release: # A GitHub release was published
types: [published]
workflow_dispatch: # A build was manually requested
workflow_call: # Another pipeline called us
secrets:
DOCKER_TOKEN:
required: true
env:
DOCKER_NAMESPACE: matrixdotorg
DOCKER_HUB_USER: dendritegithub
GHCR_NAMESPACE: matrix-org
PLATFORMS: linux/amd64,linux/arm64,linux/arm/v7
jobs:
monolith:
name: Monolith image
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
security-events: write # To upload Trivy sarif files
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Get release tag & build flags
if: github.event_name == 'release' # Only for GitHub releases
run: |
echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ env.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Login to GitHub Containers
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build main monolith image
if: github.ref_name == 'main'
id: docker_build_monolith
uses: docker/build-push-action@v3
with:
cache-from: type=registry,ref=ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:buildcache
cache-to: type=registry,ref=ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:buildcache,mode=max
context: .
platforms: ${{ env.PLATFORMS }}
push: true
tags: |
${{ env.DOCKER_NAMESPACE }}/dendrite-monolith:${{ github.ref_name }}
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:${{ github.ref_name }}
- name: Build release monolith image
if: github.event_name == 'release' # Only for GitHub releases
id: docker_build_monolith_release
uses: docker/build-push-action@v3
with:
cache-from: type=gha
cache-to: type=gha,mode=max
context: .
platforms: ${{ env.PLATFORMS }}
push: true
tags: |
${{ env.DOCKER_NAMESPACE }}/dendrite-monolith:latest
${{ env.DOCKER_NAMESPACE }}/dendrite-monolith:${{ env.RELEASE_VERSION }}
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:latest
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:${{ env.RELEASE_VERSION }}
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
image-ref: ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-monolith:${{ github.ref_name }}
format: "sarif"
output: "trivy-results.sarif"
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@v2
with:
sarif_file: "trivy-results.sarif"
demo-pinecone:
name: Pinecone demo image
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Get release tag & build flags
if: github.event_name == 'release' # Only for GitHub releases
run: |
echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ env.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Login to GitHub Containers
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build main Pinecone demo image
if: github.ref_name == 'main'
id: docker_build_demo_pinecone
uses: docker/build-push-action@v3
with:
cache-from: type=gha
cache-to: type=gha,mode=max
context: .
file: ./build/docker/Dockerfile.demo-pinecone
platforms: ${{ env.PLATFORMS }}
push: true
tags: |
${{ env.DOCKER_NAMESPACE }}/dendrite-demo-pinecone:${{ github.ref_name }}
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-demo-pinecone:${{ github.ref_name }}
- name: Build release Pinecone demo image
if: github.event_name == 'release' # Only for GitHub releases
id: docker_build_demo_pinecone_release
uses: docker/build-push-action@v3
with:
cache-from: type=gha
cache-to: type=gha,mode=max
context: .
file: ./build/docker/Dockerfile.demo-pinecone
platforms: ${{ env.PLATFORMS }}
push: true
tags: |
${{ env.DOCKER_NAMESPACE }}/dendrite-demo-yggdrasil:latest
${{ env.DOCKER_NAMESPACE }}/dendrite-demo-yggdrasil:${{ env.RELEASE_VERSION }}
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-demo-yggdrasil:latest
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-demo-yggdrasil:${{ env.RELEASE_VERSION }}
demo-yggdrasil:
name: Yggdrasil demo image
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Get release tag & build flags
if: github.event_name == 'release' # Only for GitHub releases
run: |
echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ env.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Login to GitHub Containers
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build main Yggdrasil demo image
if: github.ref_name == 'main'
id: docker_build_demo_yggdrasil
uses: docker/build-push-action@v3
with:
cache-from: type=gha
cache-to: type=gha,mode=max
context: .
file: ./build/docker/Dockerfile.demo-yggdrasil
platforms: ${{ env.PLATFORMS }}
push: true
tags: |
${{ env.DOCKER_NAMESPACE }}/dendrite-demo-yggdrasil:${{ github.ref_name }}
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-demo-yggdrasil:${{ github.ref_name }}
- name: Build release Yggdrasil demo image
if: github.event_name == 'release' # Only for GitHub releases
id: docker_build_demo_yggdrasil_release
uses: docker/build-push-action@v3
with:
cache-from: type=gha
cache-to: type=gha,mode=max
context: .
file: ./build/docker/Dockerfile.demo-yggdrasil
platforms: ${{ env.PLATFORMS }}
push: true
tags: |
${{ env.DOCKER_NAMESPACE }}/dendrite-demo-yggdrasil:latest
${{ env.DOCKER_NAMESPACE }}/dendrite-demo-yggdrasil:${{ env.RELEASE_VERSION }}
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-demo-yggdrasil:latest
ghcr.io/${{ env.GHCR_NAMESPACE }}/dendrite-demo-yggdrasil:${{ env.RELEASE_VERSION }}

View file

@ -1,52 +0,0 @@
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
name: Deploy GitHub Pages dependencies preinstalled
on:
# Runs on pushes targeting the default branch
push:
branches: ["gh-pages"]
paths:
- 'docs/**' # only execute if we have docs changes
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions:
contents: read
pages: write
id-token: write
# Allow one concurrent deployment
concurrency:
group: "pages"
cancel-in-progress: true
jobs:
# Build job
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Pages
uses: actions/configure-pages@v2
- name: Build with Jekyll
uses: actions/jekyll-build-pages@v1
with:
source: ./docs
destination: ./_site
- name: Upload artifact
uses: actions/upload-pages-artifact@v1
# Deployment job
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
needs: build
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v1

View file

@ -1,41 +0,0 @@
name: Release Charts
on:
push:
branches:
- main
paths:
- 'helm/**' # only execute if we have helm chart changes
workflow_dispatch:
jobs:
release:
# depending on default permission settings for your org (contents being read-only or read-write for workloads), you will have to add permissions
# see: https://docs.github.com/en/actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v3
with:
version: v3.10.0
- name: Run chart-releaser
uses: helm/chart-releaser-action@ed43eb303604cbc0eeec8390544f7748dc6c790d # specific commit, since `mark_as_latest` is not yet in a release
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
with:
config: helm/cr.yaml
charts_dir: helm/
mark_as_latest: false

View file

@ -1,91 +0,0 @@
name: k8s
on:
push:
branches: ["main"]
paths:
- 'helm/**' # only execute if we have helm chart changes
pull_request:
branches: ["main"]
paths:
- 'helm/**'
jobs:
lint:
name: Lint Helm chart
runs-on: ubuntu-latest
outputs:
changed: ${{ steps.list-changed.outputs.changed }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: azure/setup-helm@v3
with:
version: v3.10.0
- uses: actions/setup-python@v4
with:
python-version: 3.11
check-latest: true
- uses: helm/chart-testing-action@v2.3.1
- name: Get changed status
id: list-changed
run: |
changed=$(ct list-changed --config helm/ct.yaml --target-branch ${{ github.event.repository.default_branch }})
if [[ -n "$changed" ]]; then
echo "::set-output name=changed::true"
fi
- name: Run lint
run: ct lint --config helm/ct.yaml
# only bother to run if lint step reports a change to the helm chart
install:
needs:
- lint
if: ${{ needs.lint.outputs.changed == 'true' }}
name: Install Helm charts
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ inputs.checkoutCommit }}
- name: Install Kubernetes tools
uses: yokawasa/action-setup-kube-tools@v0.8.2
with:
setup-tools: |
helmv3
helm: "3.10.3"
- uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.3.1
- name: Create k3d cluster
uses: nolar/setup-k3d-k3s@v1
with:
version: v1.28
- name: Remove node taints
run: |
kubectl taint --all=true nodes node.cloudprovider.kubernetes.io/uninitialized- || true
- name: Run chart-testing (install)
run: ct install --config helm/ct.yaml
# Install the chart using helm directly and test with create-account
- name: Install chart
run: |
helm install --values helm/dendrite/ci/ct-postgres-sharedsecret-values.yaml dendrite helm/dendrite
- name: Wait for Postgres and Dendrite to be up
run: |
kubectl wait --for=condition=ready --timeout=90s pod -l app.kubernetes.io/name=postgresql || kubectl get pods -A
kubectl wait --for=condition=ready --timeout=90s pod -l app.kubernetes.io/name=dendrite || kubectl get pods -A
kubectl get pods -A
kubectl get services
kubectl get ingress
kubectl logs -l app.kubernetes.io/name=dendrite
- name: Run create account
run: |
podName=$(kubectl get pods -l app.kubernetes.io/name=dendrite -o name)
kubectl exec "${podName}" -- /usr/bin/create-account -username alice -password somerandompassword

View file

@ -1,322 +0,0 @@
name: Scheduled
on:
schedule:
- cron: '0 0 * * *' # every day at midnight
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
check_date: # https://stackoverflow.com/questions/63014786/how-to-schedule-a-github-actions-nightly-build-but-run-it-only-when-there-where
runs-on: ubuntu-latest
name: Check latest commit
outputs:
should_run: ${{ steps.should_run.outputs.should_run }}
steps:
- uses: actions/checkout@v4
- name: print latest_commit
run: echo ${{ github.sha }}
- id: should_run
continue-on-error: true
name: check latest commit is less than a day
if: ${{ github.event_name == 'schedule' }}
run: test -z $(git rev-list --after="24 hours" ${{ github.sha }}) && echo "::set-output name=should_run::false"
# run Sytest in different variations
sytest:
needs: check_date
if: ${{ needs.check_date.outputs.should_run != 'false' }}
timeout-minutes: 60
name: "Sytest (${{ matrix.label }})"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- label: SQLite native
- label: SQLite Cgo
cgo: 1
- label: PostgreSQL
postgres: postgres
container:
image: matrixdotorg/sytest-dendrite:latest
volumes:
- ${{ github.workspace }}:/src
- /root/.cache/go-build:/github/home/.cache/go-build
- /root/.cache/go-mod:/gopath/pkg/mod
env:
POSTGRES: ${{ matrix.postgres && 1}}
SYTEST_BRANCH: ${{ github.head_ref }}
RACE_DETECTION: 1
COVER: 1
steps:
- uses: actions/checkout@v4
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
/gopath/pkg/mod
key: ${{ runner.os }}-go-sytest-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-sytest-
- name: Run Sytest
run: /bootstrap.sh dendrite
working-directory: /src
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Sytest List Maintenance
if: ${{ always() }}
run: /src/show-expected-fail-tests.sh /logs/results.tap /src/sytest-whitelist /src/sytest-blacklist
continue-on-error: true # not fatal
- name: Are We Synapse Yet?
if: ${{ always() }}
run: /src/are-we-synapse-yet.py /logs/results.tap -v
continue-on-error: true # not fatal
- name: Upload Sytest logs
uses: actions/upload-artifact@v4
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (Dendrite ${{ join(matrix.*, ' ') }})
path: |
/logs/results.tap
/logs/**/*.log*
/logs/**/covdatafiles/**
sytest-coverage:
timeout-minutes: 5
name: "Sytest Coverage"
runs-on: ubuntu-latest
needs: [ sytest, check_date ] # only run once Sytest is done and there was a commit
if: ${{ always() && needs.check_date.outputs.should_run != 'false' }}
steps:
- uses: actions/checkout@v4
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: 'stable'
cache: true
- name: Download all artifacts
uses: actions/download-artifact@v4
- name: Collect coverage
run: |
go tool covdata textfmt -i="$(find Sytest* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" -o sytest.cov
grep -Ev 'relayapi|setup/mscs|api_trace' sytest.cov > final.cov
go tool covdata func -i="$(find Sytest* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)"
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
with:
files: ./final.cov
flags: sytest
fail_ci_if_error: true
token: ${{ secrets.CODECOV_TOKEN }}
# run Complement
complement:
needs: check_date
if: ${{ needs.check_date.outputs.should_run != 'false' }}
name: "Complement (${{ matrix.label }})"
timeout-minutes: 60
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- label: SQLite native
cgo: 0
- label: SQLite Cgo
cgo: 1
- label: PostgreSQL
postgres: Postgres
cgo: 0
steps:
# Env vars are set file a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on env to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
- name: "Set Go Version"
run: |
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
echo "~/go/bin" >> $GITHUB_PATH
- name: "Install Complement Dependencies"
# We don't need to install Go because it is included on the Ubuntu 20.04 image:
# See https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md specifically GOROOT_1_17_X64
run: |
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
- name: Run actions/checkout@v4 for dendrite
uses: actions/checkout@v4
with:
path: dendrite
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to main.
- name: Checkout complement
shell: bash
run: |
mkdir -p complement
# Attempt to use the version of complement which best matches the current
# build. Depending on whether this is a PR or release, etc. we need to
# use different fallbacks.
#
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
# for pull requests, otherwise GITHUB_REF).
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
# (GITHUB_BASE_REF for pull requests).
# 3. Use the default complement branch ("master").
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "master"; do
# Skip empty branch names and merge commits.
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
continue
fi
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done
# Build initial Dendrite image
- run: docker build --build-arg=CGO=${{ matrix.cgo }} -t complement-dendrite:${{ matrix.postgres }}${{ matrix.cgo }} -f build/scripts/Complement${{ matrix.postgres }}.Dockerfile .
working-directory: dendrite
env:
DOCKER_BUILDKIT: 1
- name: Create post test script
run: |
cat <<EOF > /tmp/posttest.sh
#!/bin/bash
mkdir -p /tmp/Complement/logs/\$2/\$1/
docker cp \$1:/tmp/covdatafiles/. /tmp/Complement/logs/\$2/\$1/
EOF
chmod +x /tmp/posttest.sh
# Run Complement
- run: |
set -o pipefail &&
go test -v -json -tags dendrite_blacklist ./tests ./tests/csapi 2>&1 | gotestfmt -hide all
shell: bash
name: Run Complement Tests
env:
COMPLEMENT_BASE_IMAGE: complement-dendrite:${{ matrix.postgres }}${{ matrix.cgo }}
COMPLEMENT_SHARE_ENV_PREFIX: COMPLEMENT_DENDRITE_
COMPLEMENT_DENDRITE_COVER: 1
COMPLEMENT_POST_TEST_SCRIPT: /tmp/posttest.sh
working-directory: complement
- name: Upload Complement logs
uses: actions/upload-artifact@v4
if: ${{ always() }}
with:
name: Complement Logs - (Dendrite ${{ join(matrix.*, ' ') }})
path: |
/tmp/Complement/logs/**
complement-coverage:
timeout-minutes: 5
name: "Complement Coverage"
runs-on: ubuntu-latest
needs: [ complement, check_date ] # only run once Complements is done and there was a commit
if: ${{ always() && needs.check_date.outputs.should_run != 'false' }}
steps:
- uses: actions/checkout@v4
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: 'stable'
cache: true
- name: Download all artifacts
uses: actions/download-artifact@v4
- name: Collect coverage
run: |
go tool covdata textfmt -i="$(find Complement* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)" -o complement.cov
grep -Ev 'relayapi|setup/mscs|api_trace' complement.cov > final.cov
go tool covdata func -i="$(find Complement* -name 'covmeta*' -type f -exec dirname {} \; | uniq | paste -s -d ',' -)"
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
with:
files: ./final.cov
flags: complement
fail_ci_if_error: true
token: ${{ secrets.CODECOV_TOKEN }} # required
element-web:
if: ${{ false }} # disable for now, as Cypress has been replaced by Playwright
timeout-minutes: 120
runs-on: ubuntu-latest
steps:
- uses: tecolicom/actions-use-apt-tools@v1
with:
# Our test suite includes some screenshot tests with unusual diacritics, which are
# supposed to be covered by STIXGeneral.
tools: fonts-stix
- uses: actions/checkout@v4
with:
repository: matrix-org/matrix-react-sdk
- uses: actions/setup-node@v3
with:
cache: 'yarn'
- name: Fetch layered build
run: scripts/ci/layered.sh
- name: Copy config
run: cp element.io/develop/config.json config.json
working-directory: ./element-web
- name: Build
env:
CI_PACKAGE: true
NODE_OPTIONS: "--openssl-legacy-provider"
run: yarn build
working-directory: ./element-web
- name: Edit Test Config
run: |
sed -i '/HOMESERVER/c\ HOMESERVER: "dendrite",' cypress.config.ts
- name: "Run cypress tests"
uses: cypress-io/github-action@v4.1.1
with:
browser: chrome
start: npx serve -p 8080 ./element-web/webapp
wait-on: 'http://localhost:8080'
env:
PUPPETEER_SKIP_CHROMIUM_DOWNLOAD: true
TMPDIR: ${{ runner.temp }}
element-web-pinecone:
if: ${{ false }} # disable for now, as Cypress has been replaced by Playwright
timeout-minutes: 120
runs-on: ubuntu-latest
steps:
- uses: tecolicom/actions-use-apt-tools@v1
with:
# Our test suite includes some screenshot tests with unusual diacritics, which are
# supposed to be covered by STIXGeneral.
tools: fonts-stix
- uses: actions/checkout@v4
with:
repository: matrix-org/matrix-react-sdk
- uses: actions/setup-node@v3
with:
cache: 'yarn'
- name: Fetch layered build
run: scripts/ci/layered.sh
- name: Copy config
run: cp element.io/develop/config.json config.json
working-directory: ./element-web
- name: Build
env:
CI_PACKAGE: true
NODE_OPTIONS: "--openssl-legacy-provider"
run: yarn build
working-directory: ./element-web
- name: Edit Test Config
run: |
sed -i '/HOMESERVER/c\ HOMESERVER: "dendritePinecone",' cypress.config.ts
- name: "Run cypress tests"
uses: cypress-io/github-action@v4.1.1
with:
browser: chrome
start: npx serve -p 8080 ./element-web/webapp
wait-on: 'http://localhost:8080'
env:
PUPPETEER_SKIP_CHROMIUM_DOWNLOAD: true
TMPDIR: ${{ runner.temp }}

39
.gitignore vendored
View file

@ -3,10 +3,6 @@
# Hidden files
.*
# Allow GitHub config
!.github
!.forgejo
# Downloads
/.downloads
@ -23,8 +19,6 @@
/_test
/vendor/bin
/docker/build
/logs
/jetstream
# Architecture specific extensions/prefixes
*.[568vq]
@ -41,11 +35,6 @@ _testmain.go
*.exe
*.test
*.prof
*.wasm
*.aar
*.jar
*.framework
*.xcframework
# Generated keys
*.pem
@ -54,31 +43,3 @@ _testmain.go
# Default configuration file
dendrite.yaml
# Database files
*.db
*.db-journal
# Log files
*.log*
# Generated code
cmd/dendrite-demo-yggdrasil/embed/fs*.go
# Test dependencies
test/wasm/node_modules
# Ignore complement folder when running locally
complement/
# Stuff from GitHub Pages
docs/_site
media_store/
build
# golang workspaces
go.work*
# helm chart
helm/dendrite/charts/

View file

@ -6,7 +6,7 @@ run:
concurrency: 4
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 5m
deadline: 30m
# exit code when at least one issue was found, default is 1
issues-exit-code: 1
@ -18,6 +18,24 @@ run:
#build-tags:
# - mytag
# which dirs to skip: they won't be analyzed;
# can use regexp here: generated.*, regexp is applied on full path;
# default value is empty list, but next dirs are always skipped independently
# from this option's value:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
skip-dirs:
- bin
- docs
# which files to skip: they will be analyzed, but issues from them
# won't be reported. Default value is empty list, but there is
# no need to include all autogenerated files, we confidently recognize
# autogenerated files. If it's not please let us know.
skip-files:
- ".*\\.md$"
- ".*\\.sh$"
- "^cmd/syncserver-integration-tests/testdata.go$"
# by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules":
# If invoked with -mod=readonly, the go command is disallowed from the implicit
# automatic updating of go.mod described above. Instead, it fails when any changes
@ -32,8 +50,7 @@ run:
# output configuration options
output:
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
formats:
- format: colored-line-number
format: colored-line-number
# print lines of code with issue, default is true
print-issued-lines: true
@ -62,8 +79,9 @@ linters-settings:
# see https://github.com/kisielk/errcheck#excluding-functions for details
#exclude: /path/to/file.txt
govet:
enable:
- shadow
# report about shadowed variables
check-shadowing: true
# settings per analyzer
settings:
printf: # analyzer name, run `go tool vet help` to see all analyzers
@ -84,7 +102,7 @@ linters-settings:
#local-prefixes: github.com/org/project
gocyclo:
# minimal code complexity to report, 30 by default (but we recommend 10-20)
min-complexity: 25
min-complexity: 12
maligned:
# print struct with more effective memory layout or not, false by default
suggest-new: true
@ -161,18 +179,21 @@ linters-settings:
linters:
enable:
- deadcode
- errcheck
- goconst
- gocyclo
- goimports # Does everything gofmt does
- gosimple
- govet
- ineffassign
- megacheck
- misspell # Check code comments, whereas misspell in CI checks *.md files
- nakedret
- staticcheck
- structcheck
- unparam
- unused
- varcheck
enable-all: false
disable:
- bodyclose
@ -192,31 +213,12 @@ linters:
- stylecheck
- typecheck # Should turn back on soon
- unconvert # Should turn back on soon
- goconst # Slightly annoying, as it reports "issues" in SQL statements
disable-all: false
presets:
fast: false
issues:
# which files to skip: they will be analyzed, but issues from them
# won't be reported. Default value is empty list, but there is
# no need to include all autogenerated files, we confidently recognize
# autogenerated files. If it's not please let us know.
exclude-files:
- ".*\\.md$"
- ".*\\.sh$"
- "^cmd/syncserver-integration-tests/testdata.go$"
# which dirs to skip: they won't be analyzed;
# can use regexp here: generated.*, regexp is applied on full path;
# default value is empty list, but next dirs are always skipped independently
# from this option's value:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
exclude-dirs:
- bin
- docs
# List of regexps of issue texts to exclude, empty list by default.
# But independently from this option we use default exclude patterns,
# it can be disabled by `exclude-use-default: false`. To list all

1380
CHANGES.md

File diff suppressed because it is too large Load diff

92
CODE_STYLE.md Normal file
View file

@ -0,0 +1,92 @@
# Code Style
We follow the standard Go style using goimports, but with a few extra
considerations.
## Linters
We use `golangci-lint` to run a number of linters, the exact list can be found
under linters in [.golangci.yml](.golangci.yml).
[Installation](https://github.com/golangci/golangci-lint#install) and [Editor
Integration](https://github.com/golangci/golangci-lint#editor-integration) for
it can be found in the readme of golangci-lint.
For rare cases where a linter is giving a spurious warning, it can be disabled
for that line or statement using a [comment
directive](https://github.com/golangci/golangci-lint#nolint), e.g. `var
bad_name int //nolint:golint,unused`. This should be used sparingly and only
when its clear that the lint warning is spurious.
The linters can be run using [scripts/find-lint.sh](scripts/find-lint.sh)
(see file for docs) or as part of a build/test/lint cycle using
[scripts/build-test-lint.sh](scripts/build-test-lint.sh).
## HTTP Error Handling
Unfortunately, converting errors into HTTP responses with the correct status
code and message can be done in a number of ways in golang:
1. Having functions return `JSONResponse` directly, which can then either set
it to an error response or a `200 OK`.
2. Have the HTTP handler try and cast error values to types that are handled
differently.
3. Have the HTTP handler call functions whose errors can only be interpreted
one way, for example if a `validate(...)` call returns an error then handler
knows to respond with a `400 Bad Request`.
We attempt to always use option #3, as it more naturally fits with the way that
golang generally does error handling. In particular, option #1 effectively
requires reinventing a new error handling scheme just for HTTP handlers.
## Line length
We strive for a line length of roughly 80 characters, though less than 100 is
acceptable if necessary. Longer lines are fine if there is nothing of interest
after the first 80-100 characters (e.g. long string literals).
## TODOs and FIXMEs
The majority of TODOs and FIXMEs should have an associated tracking issue on
github. These can be added just before merging of the PR to master, and the
issue number should be added to the comment, e.g. `// TODO(#324): ...`
## Logging
We generally prefer to log with static log messages and include any dynamic
information in fields.
```golang
logger := util.GetLogger(ctx)
// Not recommended
logger.Infof("Finished processing keys for %s, number of keys %d", name, numKeys)
// Recommended
logger.WithFields(logrus.Fields{
"numberOfKeys": numKeys,
"entityName": name,
}).Info("Finished processing keys")
```
This is useful when logging to systems that natively understand log fields, as
it allows people to search and process the fields without having to parse the
log message.
## Visual Studio Code
If you use VSCode then the following is an example of a workspace setting that
sets up linting correctly:
```json
{
"go.lintTool":"golangci-lint",
"go.lintFlags": [
"--fast"
]
}
```

87
CONTRIBUTING.md Normal file
View file

@ -0,0 +1,87 @@
# Contributing to Dendrite
Everyone is welcome to contribute to Dendrite! We aim to make it as easy as
possible to get started.
Please ensure that you sign off your contributions! See [Sign Off](#sign-off)
section below.
## Getting up and running
See [INSTALL.md](INSTALL.md) for instructions on setting up a running dev
instance of dendrite, and [CODE_STYLE.md](CODE_STYLE.md) for the code style
guide.
As of May 2019, we're not using `gb` anymore, which is the tool we had been
using for managing our dependencies. We're now using Go modules. To build
Dendrite, run the `build.sh` script at the root of this repository (which runs
`go install` under the hood), and to run unit tests, run `go test ./...` (which
should pick up any unit test and run it). There are also [scripts](scripts) for
[linting](scripts/find-lint.sh) and doing a [build/test/lint
run](scripts/build-test-lint.sh).
## Continuous Integration
When a Pull Request is submitted, continuous integration jobs are run
automatically to ensure the code builds and is relatively well-written. The
jobs are run on [Buildkite](https://buildkite.com/matrix-dot-org/dendrite/),
and the Buildkite pipeline configuration can be found in Matrix.org's
[pipelines repository](https://github.com/matrix-org/pipelines).
If a job fails, click the "details" button and you should be taken to the job's
logs.
![Click the details button on the failing build step](docs/images/details-button-location.jpg)
Scroll down to the failing step and you should see some log output. Scan
the logs until you find what it's complaining about, fix it, submit a new
commit, then rinse and repeat until CI passes.
### Running CI Tests Locally
To save waiting for CI to finish after every commit, it is ideal to run the
checks locally before pushing, fixing errors first. This also saves other
people time as only so many PRs can be tested at a given time.
To execute what Buildkite tests, first run `./scripts/build-test-lint.sh`;
this script will build the code, lint it, and run `go test ./...` with race
condition checking enabled. If something needs to be changed, fix it and then
run the script again until it no longer complains. Be warned that the linting
can take a significant amount of CPU and RAM.
Once the code builds, run [Sytest](https://github.com/matrix-org/sytest)
according to the guide in
[docs/sytest.md](https://github.com/matrix-org/dendrite/blob/master/docs/sytest.md#using-a-sytest-docker-image)
so you can see whether something is being broken and whether there are newly
passing tests.
If these two steps report no problems, the code should be able to pass the CI
tests.
## Picking Things To Do
If you're new then feel free to pick up an issue labelled [good first issue](https://github.com/matrix-org/dendrite/labels/good%20first%20issue).
These should be well-contained, small pieces of work that can be picked up to
help you get familiar with the code base.
Once you're comfortable with hacking on Dendrite there are issues lablled as
[help wanted](https://github.com/matrix-org/dendrite/labels/help%20wanted), these
are often slightly larger or more complicated pieces of work but are hopefully
nonetheless fairly well-contained.
We ask people who are familiar with Dendrite to leave the [good first issue](https://github.com/matrix-org/dendrite/labels/good%20first%20issue)
issues so that there is always a way for new people to come and get involved.
## Getting Help
For questions related to developing on Dendrite we have a dedicated room on
Matrix [#dendrite-dev:matrix.org](https://matrix.to/#/#dendrite-dev:matrix.org)
where we're happy to help.
For more general questions please use [#dendrite:matrix.org](https://matrix.to/#/#dendrite:matrix.org).
## Sign off
We ask that everyone who contributes to the project signs off their
contributions, in accordance with the [DCO](https://github.com/matrix-org/matrix-doc/blob/master/CONTRIBUTING.rst#sign-off).

140
DESIGN.md Normal file
View file

@ -0,0 +1,140 @@
# Design
## Log Based Architecture
### Decomposition and Decoupling
A matrix homeserver can be built around append-only event logs built from the
messages, receipts, presence, typing notifications, device messages and other
events sent by users on the homeservers or by other homeservers.
The server would then decompose into two categories: writers that add new
entries to the logs and readers that read those entries.
The event logs then serve to decouple the two components, the writers and
readers need only agree on the format of the entries in the event log.
This format could be largely derived from the wire format of the events used
in the client and federation protocols:
C-S API +---------+ Event Log +---------+ C-S API
---------> | |+ (e.g. kafka) | |+ --------->
| Writers || =============> | Readers ||
---------> | || | || --------->
S-S API +---------+| +---------+| S-S API
+---------+ +---------+
However the way matrix handles state events in a room creates a few
complications for this model.
1) Writers require the room state at an event to check if it is allowed.
2) Readers require the room state at an event to determine the users and
servers that are allowed to see the event.
3) A client can query the current state of the room from a reader.
The writers and readers cannot extract the necessary information directly from
the event logs because it would take too long to extract the information as the
state is built up by collecting individual state events from the event history.
The writers and readers therefore need access to something that stores copies
of the event state in a form that can be efficiently queried. One possibility
would be for the readers and writers to maintain copies of the current state
in local databases. A second possibility would be to add a dedicated component
that maintained the state of the room and exposed an API that the readers and
writers could query to get the state. The second has the advantage that the
state is calculated and stored in a single location.
C-S API +---------+ Log +--------+ Log +---------+ C-S API
---------> | |+ ======> | | ======> | |+ --------->
| Writers || | Room | | Readers ||
---------> | || <------ | Server | ------> | || --------->
S-S API +---------+| Query | | Query +---------+| S-S API
+---------+ +--------+ +---------+
The room server can annotate the events it logs to the readers with room state
so that the readers can avoid querying the room server unnecessarily.
[This architecture can be extended to cover most of the APIs.](WIRING.md)
## How things are supposed to work.
### Local client sends an event in an existing room.
0) The client sends a PUT `/_matrix/client/r0/rooms/{roomId}/send` request
and an HTTP loadbalancer routes the request to a ClientAPI.
1) The ClientAPI:
* Authenticates the local user using the `access_token` sent in the HTTP
request.
* Checks if it has already processed or is processing a request with the
same `txnID`.
* Calculates which state events are needed to auth the request.
* Queries the necessary state events and the latest events in the room
from the RoomServer.
* Confirms that the room exists and checks whether the event is allowed by
the auth checks.
* Builds and signs the events.
* Writes the event to a "InputRoomEvent" kafka topic.
* Send a `200 OK` response to the client.
2) The RoomServer reads the event from "InputRoomEvent" kafka topic:
* Checks if it has already has a copy of the event.
* Checks if the event is allowed by the auth checks using the auth events
at the event.
* Calculates the room state at the event.
* Works out what the latest events in the room after processing this event
are.
* Calculate how the changes in the latest events affect the current state
of the room.
* TODO: Workout what events determine the visibility of this event to other
users
* Writes the event along with the changes in current state to an
"OutputRoomEvent" kafka topic. It writes all the events for a room to
the same kafka partition.
3a) The ClientSync reads the event from the "OutputRoomEvent" kafka topic:
* Updates its copy of the current state for the room.
* Works out which users need to be notified about the event.
* Wakes up any pending `/_matrix/client/r0/sync` requests for those users.
* Adds the event to the recent timeline events for the room.
3b) The FederationSender reads the event from the "OutputRoomEvent" kafka topic:
* Updates its copy of the current state for the room.
* Works out which remote servers need to be notified about the event.
* Sends a `/_matrix/federation/v1/send` request to those servers.
* Or if there is a request in progress then add the event to a queue to be
sent when the previous request finishes.
### Remote server sends an event in an existing room.
0) The remote server sends a `PUT /_matrix/federation/v1/send` request and an
HTTP loadbalancer routes the request to a FederationReceiver.
1) The FederationReceiver:
* Authenticates the remote server using the "X-Matrix" authorisation header.
* Checks if it has already processed or is processing a request with the
same `txnID`.
* Checks the signatures for the events.
Fetches the ed25519 keys for the event senders if necessary.
* Queries the RoomServer for a copy of the state of the room at each event.
* If the RoomServer doesn't know the state of the room at an event then
query the state of the room at the event from the remote server using
`GET /_matrix/federation/v1/state_ids` falling back to
`GET /_matrix/federation/v1/state` if necessary.
* Once the state at each event is known check whether the events are
allowed by the auth checks against the state at each event.
* For each event that is allowed write the event to the "InputRoomEvent"
kafka topic.
* Send a 200 OK response to the remote server listing which events were
successfully processed and which events failed
2) The RoomServer processes the event the same as it would a local event.
3a) The ClientSync processes the event the same as it would a local event.

View file

@ -1,49 +0,0 @@
#syntax=docker/dockerfile:1.2
#
# base installs required dependencies and runs go mod download to cache dependencies
#
# Pinned to alpine3.18 until https://github.com/mattn/go-sqlite3/issues/1164 is solved
FROM --platform=${BUILDPLATFORM} docker.io/golang:1.21-alpine3.18 AS base
RUN apk --update --no-cache add bash build-base curl git
#
# build creates all needed binaries
#
FROM --platform=${BUILDPLATFORM} base AS build
WORKDIR /src
ARG TARGETOS
ARG TARGETARCH
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
USERARCH=`go env GOARCH` \
GOARCH="$TARGETARCH" \
GOOS="linux" \
CGO_ENABLED=$([ "$TARGETARCH" = "$USERARCH" ] && echo "1" || echo "0") \
go build -v -trimpath -o /out/ ./cmd/...
#
# Builds the Dendrite image containing all required binaries
#
FROM alpine:latest
RUN apk --update --no-cache add curl
LABEL org.opencontainers.image.title="Dendrite"
LABEL org.opencontainers.image.description="Next-generation Matrix homeserver written in Go"
LABEL org.opencontainers.image.source="https://github.com/matrix-org/dendrite"
LABEL org.opencontainers.image.licenses="Apache-2.0"
LABEL org.opencontainers.image.documentation="https://matrix-org.github.io/dendrite/"
LABEL org.opencontainers.image.vendor="The Matrix.org Foundation C.I.C."
COPY --from=build /out/create-account /usr/bin/create-account
COPY --from=build /out/generate-config /usr/bin/generate-config
COPY --from=build /out/generate-keys /usr/bin/generate-keys
COPY --from=build /out/dendrite /usr/bin/dendrite
VOLUME /etc/dendrite
WORKDIR /etc/dendrite
ENTRYPOINT ["/usr/bin/dendrite"]
EXPOSE 8008 8448

267
INSTALL.md Normal file
View file

@ -0,0 +1,267 @@
# Installing Dendrite
Dendrite can be run in one of two configurations:
* A cluster of individual components, dealing with different aspects of the
Matrix protocol (see [WIRING.md](./WIRING.md)). Components communicate with
one another via [Apache Kafka](https://kafka.apache.org).
* A monolith server, in which all components run in the same process. In this
configuration, Kafka can be replaced with an in-process implementation
called [naffka](https://github.com/matrix-org/naffka).
## Requirements
- Go 1.11+
- Postgres 9.5+
- For Kafka (optional if using the monolith server):
- Unix-based system (https://kafka.apache.org/documentation/#os)
- JDK 1.8+ / OpenJDK 1.8+
- Apache Kafka 0.10.2+ (see [scripts/install-local-kafka.sh](scripts/install-local-kafka.sh) for up-to-date version numbers)
## Setting up a development environment
Assumes Go 1.10+ and JDK 1.8+ are already installed and are on PATH.
```bash
# Get the code
git clone https://github.com/matrix-org/dendrite
cd dendrite
# Build it
./build.sh
```
If using Kafka, install and start it (c.f. [scripts/install-local-kafka.sh](scripts/install-local-kafka.sh)):
```bash
KAFKA_URL=http://archive.apache.org/dist/kafka/2.1.0/kafka_2.11-2.1.0.tgz
# Only download the kafka if it isn't already downloaded.
test -f kafka.tgz || wget $KAFKA_URL -O kafka.tgz
# Unpack the kafka over the top of any existing installation
mkdir -p kafka && tar xzf kafka.tgz -C kafka --strip-components 1
# Start the zookeeper running in the background.
# By default the zookeeper listens on localhost:2181
kafka/bin/zookeeper-server-start.sh -daemon kafka/config/zookeeper.properties
# Start the kafka server running in the background.
# By default the kafka listens on localhost:9092
kafka/bin/kafka-server-start.sh -daemon kafka/config/server.properties
```
On MacOS, you can use [homebrew](https://brew.sh/) for easier setup of kafka
```bash
brew install kafka
brew services start zookeeper
brew services start kafka
```
## Configuration
### Postgres database setup
Dendrite requires a postgres database engine, version 9.5 or later.
* Create role:
```bash
sudo -u postgres createuser -P dendrite # prompts for password
```
* Create databases:
```bash
for i in account device mediaapi syncapi roomserver serverkey federationsender publicroomsapi appservice naffka; do
sudo -u postgres createdb -O dendrite dendrite_$i
done
```
(On macOS, omit `sudo -u postgres` from the above commands.)
### Crypto key generation
Generate the keys:
```bash
# Generate a self-signed SSL cert for federation:
test -f server.key || openssl req -x509 -newkey rsa:4096 -keyout server.key -out server.crt -days 3650 -nodes -subj /CN=localhost
# generate ed25519 signing key
test -f matrix_key.pem || ./bin/generate-keys -private-key matrix_key.pem
```
### Configuration
Create config file, based on `dendrite-config.yaml`. Call it `dendrite.yaml`. Things that will need editing include *at least*:
* `server_name`
* `database/*` (All lines in the database section must have the username and password of the user created with the `createuser` command above. eg:`dendrite:password@localhost`)
## Starting a monolith server
It is possible to use 'naffka' as an in-process replacement to Kafka when using
the monolith server. To do this, set `use_naffka: true` in `dendrite.yaml` and uncomment
the necessary line related to naffka in the `database` section. Be sure to update the
database username and password if needed.
The monolith server can be started as shown below. By default it listens for
HTTP connections on port 8008, so point your client at
`http://localhost:8008`. If you set `--tls-cert` and `--tls-key` as shown
below, it will also listen for HTTPS connections on port 8448.
```bash
./bin/dendrite-monolith-server --tls-cert=server.crt --tls-key=server.key
```
## Starting a multiprocess server
The following contains scripts which will run all the required processes in order to point a Matrix client at Dendrite. Conceptually, you are wiring together to form the following diagram:
```
/media +---------------------------+
+----------->+------------->| dendrite-media-api-server |
^ ^ +---------------------------+
| | :7774
| |
| |
| | /directory +----------------------------------+
| | +--------->| dendrite-public-rooms-api-server |<========++
| | | +----------------------------------+ ||
| | | :7775 | ||
| | | +<-----------+ ||
| | | | ||
| | | /sync +--------------------------+ ||
| | +--------->| dendrite-sync-api-server |<================++
| | | | +--------------------------+ ||
| | | | :7773 | ^^ ||
Matrix +------------------+ | | | | || client_data ||
Clients --->| client-api-proxy |-------+ +<-----------+ ++=============++ ||
+------------------+ | | | || ||
:8008 | | CS API +----------------------------+ || ||
| +--------->| dendrite-client-api-server |==++ ||
| | +----------------------------+ ||
| | :7771 | ||
| | | ||
| +<-----------+ ||
| | ||
| | ||
| | +----------------------+ room_event ||
| +---------->| dendrite-room-server |===============++
| | +----------------------+ ||
| | :7770 ||
| | ++==========================++
| +<------------+ ||
| | | VV
| | +-----------------------------------+ Matrix
| | | dendrite-federation-sender-server |------------> Servers
| | +-----------------------------------+
| | :7776
| |
+---------->+ +<-----------+
| |
Matrix +----------------------+ SS API +--------------------------------+
Servers --->| federation-api-proxy |--------->| dendrite-federation-api-server |
+----------------------+ +--------------------------------+
:8448 :7772
A --> B = HTTP requests (A = client, B = server)
A ==> B = Kafka (A = producer, B = consumer)
```
### Run a client api proxy
This is what Matrix clients will talk to. If you use the script below, point your client at `http://localhost:8008`.
```bash
./bin/client-api-proxy \
--bind-address ":8008" \
--client-api-server-url "http://localhost:7771" \
--sync-api-server-url "http://localhost:7773" \
--media-api-server-url "http://localhost:7774" \
--public-rooms-api-server-url "http://localhost:7775" \
```
### Run a client api
This is what implements message sending. Clients talk to this via the proxy in order to send messages.
```bash
./bin/dendrite-client-api-server --config=dendrite.yaml
```
(If this fails with `pq: syntax error at or near "ON"`, check you are using at least postgres 9.5.)
### Run a room server
This is what implements the room DAG. Clients do not talk to this.
```bash
./bin/dendrite-room-server --config=dendrite.yaml
```
### Run a sync server
This is what implements `/sync` requests. Clients talk to this via the proxy in order to receive messages.
```bash
./bin/dendrite-sync-api-server --config dendrite.yaml
```
### Run a media server
This implements `/media` requests. Clients talk to this via the proxy in order to upload and retrieve media.
```bash
./bin/dendrite-media-api-server --config dendrite.yaml
```
### Run public room server
This implements `/directory` requests. Clients talk to this via the proxy in order to retrieve room directory listings.
```bash
./bin/dendrite-public-rooms-api-server --config dendrite.yaml
```
### Run a federation api proxy
This is what Matrix servers will talk to. This is only required if you want to support federation.
```bash
./bin/federation-api-proxy \
--bind-address ":8448" \
--federation-api-url "http://localhost:7772" \
--media-api-server-url "http://localhost:7774" \
```
### Run a federation api server
This implements federation requests. Servers talk to this via the proxy in
order to send transactions. This is only required if you want to support
federation.
```bash
./bin/dendrite-federation-api-server --config dendrite.yaml
```
### Run a federation sender server
This sends events from our users to other servers. This is only required if
you want to support federation.
```bash
./bin/dendrite-federation-sender-server --config dendrite.yaml
```
### Run an appservice server
This sends events from the network to [application
services](https://matrix.org/docs/spec/application_service/unstable.html)
running locally. This is only required if you want to support running
application services on your homeserver.
```bash
./bin/dendrite-appservice-server --config dendrite.yaml
```

134
README.md
View file

@ -1,127 +1,27 @@
# Dendrite
# Dendrite [![Build Status](https://badge.buildkite.com/4be40938ab19f2bbc4a6c6724517353ee3ec1422e279faf374.svg?branch=master)](https://buildkite.com/matrix-dot-org/dendrite) [![Dendrite Dev on Matrix](https://img.shields.io/matrix/dendrite-dev:matrix.org.svg?label=%23dendrite-dev%3Amatrix.org&logo=matrix&server_fqdn=matrix.org)](https://matrix.to/#/#dendrite-dev:matrix.org) [![Dendrite on Matrix](https://img.shields.io/matrix/dendrite:matrix.org.svg?label=%23dendrite%3Amatrix.org&logo=matrix&server_fqdn=matrix.org)](https://matrix.to/#/#dendrite:matrix.org)
[![Build status](https://github.com/matrix-org/dendrite/actions/workflows/dendrite.yml/badge.svg?event=push)](https://github.com/matrix-org/dendrite/actions/workflows/dendrite.yml) [![Dendrite](https://img.shields.io/matrix/dendrite:matrix.org.svg?label=%23dendrite%3Amatrix.org&logo=matrix&server_fqdn=matrix.org)](https://matrix.to/#/#dendrite:matrix.org) [![Dendrite Dev](https://img.shields.io/matrix/dendrite-dev:matrix.org.svg?label=%23dendrite-dev%3Amatrix.org&logo=matrix&server_fqdn=matrix.org)](https://matrix.to/#/#dendrite-dev:matrix.org)
Dendrite will be a matrix homeserver written in go.
Dendrite is a second-generation Matrix homeserver written in Go.
It intends to provide an **efficient**, **reliable** and **scalable** alternative to [Synapse](https://github.com/matrix-org/synapse):
It's still very much a work in progress, but installation instructions can
be found in [INSTALL.md](INSTALL.md)
- Efficient: A small memory footprint with better baseline performance than an out-of-the-box Synapse.
- Reliable: Implements the Matrix specification as written, using the
[same test suite](https://github.com/matrix-org/sytest) as Synapse as well as
a [brand new Go test suite](https://github.com/matrix-org/complement).
- Scalable: can run on multiple machines and eventually scale to massive homeserver deployments.
An overview of the design can be found in [DESIGN.md](DESIGN.md)
Dendrite is **beta** software, which means:
# Contributing
- Dendrite is ready for early adopters. We recommend running Dendrite with a PostgreSQL database.
- Dendrite has periodic releases. We intend to release new versions as we fix bugs and land significant features.
- Dendrite supports database schema upgrades between releases. This means you should never lose your messages when upgrading Dendrite.
Everyone is welcome to help out and contribute! See [CONTRIBUTING.md](CONTRIBUTING.md)
to get started!
This does not mean:
We aim to try and make it as easy as possible to jump in.
- Dendrite is bug-free. It has not yet been battle-tested in the real world and so will be error prone initially.
- Dendrite is feature-complete. There may be client or federation APIs that are not implemented.
- Dendrite is ready for massive homeserver deployments. There is no high-availability/clustering support.
# Discussion
Currently, we expect Dendrite to function well for small (10s/100s of users) homeserver deployments as well as P2P Matrix nodes in-browser or on mobile devices.
For questions about Dendrite we have a dedicated room on Matrix
[#dendrite:matrix.org](https://matrix.to/#/#dendrite:matrix.org).
Development discussion should happen in
[#dendrite-dev:matrix.org](https://matrix.to/#/#dendrite-dev:matrix.org).
If you have further questions, please take a look at [our FAQ](docs/FAQ.md) or join us in:
# Progress
- **[#dendrite:matrix.org](https://matrix.to/#/#dendrite:matrix.org)** - General chat about the Dendrite project, for users and server admins alike
- **[#dendrite-dev:matrix.org](https://matrix.to/#/#dendrite-dev:matrix.org)** - The place for developers, where all Dendrite development discussion happens
- **[#dendrite-alerts:matrix.org](https://matrix.to/#/#dendrite-alerts:matrix.org)** - Release notifications and important info, highly recommended for all Dendrite server admins
## Requirements
See the [Planning your Installation](https://matrix-org.github.io/dendrite/installation/planning) page for
more information on requirements.
To build Dendrite, you will need Go 1.20 or later.
For a usable federating Dendrite deployment, you will also need:
- A domain name (or subdomain)
- A valid TLS certificate issued by a trusted authority for that domain
- SRV records or a well-known file pointing to your deployment
Also recommended are:
- A PostgreSQL database engine, which will perform better than SQLite with many users and/or larger rooms
- A reverse proxy server, such as nginx, configured [like this sample](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/dendrite-sample.conf)
The [Federation Tester](https://federationtester.matrix.org) can be used to verify your deployment.
## Get started
If you wish to build a fully-federating Dendrite instance, see [the Installation documentation](https://matrix-org.github.io/dendrite/installation). For running in Docker, see [build/docker](build/docker).
The following instructions are enough to get Dendrite started as a non-federating test deployment using self-signed certificates and SQLite databases:
```bash
$ git clone https://github.com/matrix-org/dendrite
$ cd dendrite
$ go build -o bin/ ./cmd/...
# Generate a Matrix signing key for federation (required)
$ ./bin/generate-keys --private-key matrix_key.pem
# Generate a self-signed certificate (optional, but a valid TLS certificate is normally
# needed for Matrix federation/clients to work properly!)
$ ./bin/generate-keys --tls-cert server.crt --tls-key server.key
# Copy and modify the config file - you'll need to set a server name and paths to the keys
# at the very least, along with setting up the database connection strings.
$ cp dendrite-sample.yaml dendrite.yaml
# Build and run the server:
$ ./bin/dendrite --tls-cert server.crt --tls-key server.key --config dendrite.yaml
# Create an user account (add -admin for an admin user).
# Specify the localpart only, e.g. 'alice' for '@alice:domain.com'
$ ./bin/create-account --config dendrite.yaml --username alice
```
Then point your favourite Matrix client at `http://localhost:8008` or `https://localhost:8448`.
## Progress
We use a script called "Are We Synapse Yet" which checks Sytest compliance rates. Sytest is a black-box homeserver
test rig with around 900 tests. The script works out how many of these tests are passing on Dendrite and it
updates with CI. As of January 2023, we have 100% server-server parity with Synapse, and the client-server parity is at 93% , though check
CI for the latest numbers. In practice, this means you can communicate locally and via federation with Synapse
servers such as matrix.org reasonably well, although there are still some missing features (like SSO and Third-party ID APIs).
We are prioritising features that will benefit single-user homeservers first (e.g Receipts, E2E) rather
than features that massive deployments may be interested in (OpenID, Guests, Admin APIs, AS API).
This means Dendrite supports amongst others:
- Core room functionality (creating rooms, invites, auth rules)
- Room versions 1 to 10 supported
- Backfilling locally and via federation
- Accounts, profiles and devices
- Published room lists
- Typing
- Media APIs
- Redaction
- Tagging
- Context
- E2E keys and device lists
- Receipts
- Push
- Guests
- User Directory
- Presence
- Fulltext search
## Contributing
We would be grateful for any help on issues marked as
[Are We Synapse Yet](https://github.com/matrix-org/dendrite/labels/are-we-synapse-yet). These issues
all have related Sytests which need to pass in order for the issue to be closed. Once you've written your
code, you can quickly run Sytest to ensure that the test names are now passing.
If you're new to the project, see our
[Contributing page](https://matrix-org.github.io/dendrite/development/contributing) to get up to speed, then
look for [Good First Issues](https://github.com/matrix-org/dendrite/labels/good%20first%20issue). If you're
familiar with the project, look for [Help Wanted](https://github.com/matrix-org/dendrite/labels/help-wanted)
issues.
There's plenty still to do to make Dendrite usable! We're tracking progress in
a [project board](https://github.com/matrix-org/dendrite/projects/2).

229
WIRING.md Normal file
View file

@ -0,0 +1,229 @@
# Wiring
The diagram is incomplete. The following things aren't shown on the diagram:
* Device Messages
* User Profiles
* Notification Counts
* Sending federation.
* Querying federation.
* Other things that aren't shown on the diagram.
Diagram:
W -> Writer
S -> Server/Store/Service/Something/Stuff
R -> Reader
+---+ +---+ +---+
+----------| W | +----------| S | +--------| R |
| +---+ | Receipts +---+ | Client +---+
| Federation |>=========================================>| Server |>=====================>| Sync |
| Receiver | | | | |
| | +---+ | | | |
| | +--------| W | | | | |
| | | Client +---+ | | | |
| | | Receipt |>=====>| | | |
| | | Updater | | | | |
| | +----------+ | | | |
| | | | | |
| | +---+ +---+ | | +---+ | |
| | +------------| W | +------| S | | | +--------| R | | |
| | | Federation +---+ | Room +---+ | | | Client +---+ | |
| | | Backfill |>=====>| Server |>=====>| |>=====>| Push | | |
| | +--------------+ | | +------------+ | | | |
| | | | | | | |
| | | |>==========================>| | | |
| | | | +----------+ | |
| | | | +---+ | |
| | | | +-------------| R | | |
| | | |>=====>| Application +---+ | |
| | | | | Services | | |
| | | | +--------------+ | |
| | | | +---+ | |
| | | | +--------| R | | |
| | | | | Client +---+ | |
| |>========================>| |>==========================>| Search | | |
| | | | | | | |
| | | | +----------+ | |
| | | | | |
| | | |>==========================================>| |
| | | | | |
| | +---+ | | +---+ | |
| | +--------| W | | | +----------| S | | |
| | | Client +---+ | | | Presence +---+ | |
| | | API |>=====>| |>=====>| Server |>=====================>| |
| | | /send | +--------+ | | | |
| | | | | | | |
| | | |>======================>| |<=====================<| |
| | +----------+ | | | |
| | | | | |
| | +---+ | | | |
| | +--------| W | | | | |
| | | Client +---+ | | | |
| | | Presence |>=====>| | | |
| | | Setter | | | | |
| | +----------+ | | | |
| | | | | |
| | | | | |
| |>=========================================>| | | |
| | +------------+ | |
| | | |
| | +---+ | |
| | +----------| S | | |
| | | Typing +---+ | |
| |>=========================================>| Server |>=====================>| |
+------------+ | | +----------+
+---+ | |
+--------| W | | |
| Client +---+ | |
| Typing |>=====>| |
| Setter | | |
+----------+ +------------+
# Component Descriptions
Many of the components are logical rather than physical. For example it is
possible that all of the client API writers will end up being glued together
and always deployed as a single unit.
Outbound federation requests will probably need to be funnelled through a
choke-point to implement ratelimiting and backoff correctly.
## Federation Send
* Handles `/federation/v1/send/` requests.
* Fetches missing ``prev_events`` from the remote server if needed.
* Fetches missing room state from the remote server if needed.
* Checks signatures on remote events, downloading keys if needed.
* Queries information needed to process events from the Room Server.
* Writes room events to logs.
* Writes presence updates to logs.
* Writes receipt updates to logs.
* Writes typing updates to logs.
* Writes other updates to logs.
## Client API /send
* Handles puts to `/client/v1/rooms/` that create room events.
* Queries information needed to process events from the Room Server.
* Talks to remote servers if needed for joins and invites.
* Writes room event pdus.
* Writes presence updates to logs.
## Client Presence Setter
* Handles puts to the [client API presence paths](https://matrix.org/docs/spec/client_server/unstable.html#id41).
* Writes presence updates to logs.
## Client Typing Setter
* Handles puts to the [client API typing paths](https://matrix.org/docs/spec/client_server/unstable.html#id32).
* Writes typing updates to logs.
## Client Receipt Updater
* Handles puts to the [client API receipt paths](https://matrix.org/docs/spec/client_server/unstable.html#id36).
* Writes receipt updates to logs.
## Federation Backfill
* Backfills events from other servers
* Writes the resulting room events to logs.
* Is a different component from the room server itself cause it'll
be easier if the room server component isn't making outbound HTTP requests
to remote servers
## Room Server
* Reads new and backfilled room events from the logs written by FS, FB and CRS.
* Tracks the current state of the room and the state at each event.
* Probably does auth checks on the incoming events.
* Handles state resolution as part of working out the current state and the
state at each event.
* Writes updates to the current state and new events to logs.
* Shards by room ID.
## Receipt Server
* Reads new updates to receipts from the logs written by the FS and CRU.
* Somehow learns enough information from the room server to workout how the
current receipt markers move with each update.
* Writes the new marker positions to logs
* Shards by room ID?
* It may be impossible to implement without folding it into the Room Server
forever coupling the components together.
## Typing Server
* Reads new updates to typing from the logs written by the FS and CTS.
* Updates the current list of people typing in a room.
* Writes the current list of people typing in a room to the logs.
* Shards by room ID?
## Presence Server
* Reads the current state of the rooms from the logs to track the intersection
of room membership between users.
* Reads updates to presence from the logs written by the FS and the CPS.
* Reads when clients sync from the logs from the Client Sync.
* Tracks any timers for users.
* Writes the changes to presence state to the logs.
* Shards by user ID somehow?
## Client Sync
* Handle /client/v2/sync requests.
* Reads new events and the current state of the rooms from logs written by the Room Server.
* Reads new receipts positions from the logs written by the Receipts Server.
* Reads changes to presence from the logs written by the Presence Server.
* Reads changes to typing from the logs written by the Typing Server.
* Writes when a client starts and stops syncing to the logs.
## Client Search
* Handle whatever the client API path for event search is?
* Reads new events and the current state of the rooms from logs writeen by the Room Server.
* Maintains a full text search index of somekind.
## Client Push
* Pushes unread messages to remote push servers.
* Reads new events and the current state of the rooms from logs writeen by the Room Server.
* Reads the position of the read marker from the Receipts Server.
* Makes outbound HTTP hits to the push server for the client device.
## Application Service
* Receives events from the Room Server.
* Filters events and sends them to each registered application service.
* Runs a separate goroutine for each application service.
# Internal Component API
Some dendrite components use internal APIs to communicate information back
and forth between each other. There are two implementations of each API, one
that uses HTTP requests and one that does not. The HTTP implementation is
used in multi-process mode, so processes on separate computers may still
communicate, whereas in single-process or Monolith mode, the direct
implementation is used. HTTP is preferred here to kafka streams as it allows
for request responses.
Running `dendrite-monolith-server` will set up direct connections between
components, whereas running each individual component (which are only run in
multi-process mode) will set up HTTP-based connections.
The functions that make HTTP requests to internal APIs of a component are
located in `/<component name>/api/<name>.go`, named according to what
functionality they cover. Each of these requests are handled in `/<component
name>/<name>/<name>.go`.
As an example, the `appservices` component allows other Dendrite components
to query external application services via its internal API. A component
would call the desired function in `/appservices/api/query.go`. In
multi-process mode, this would send an internal HTTP request, which would
be handled by a function in `/appservices/query/query.go`. In single-process
mode, no internal HTTP request occurs, instead functions are simply called
directly, thus requiring no changes on the calling component's end.

10
appservice/README.md Normal file
View file

@ -0,0 +1,10 @@
# Application Service
This component interfaces with external [Application
Services](https://matrix.org/docs/spec/application_service/unstable.html).
This includes any HTTP endpoints that application services call, as well as talking
to any HTTP endpoints that application services provide themselves.
## Consumers
This component consumes and filters events from the Roomserver Kafka stream, passing on any necessary events to subscribing application services.

View file

@ -19,34 +19,18 @@ package api
import (
"context"
"encoding/json"
"errors"
"database/sql"
"net/http"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
userapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/dendrite/clientapi/auth/storage/accounts"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/dendrite/common"
commonHTTP "github.com/matrix-org/dendrite/common/http"
opentracing "github.com/opentracing/opentracing-go"
)
// AppServiceInternalAPI is used to query user and room alias data from application
// services
type AppServiceInternalAPI interface {
// Check whether a room alias exists within any application service namespaces
RoomAliasExists(
ctx context.Context,
req *RoomAliasExistsRequest,
resp *RoomAliasExistsResponse,
) error
// Check whether a user ID exists within any application service namespaces
UserIDExists(
ctx context.Context,
req *UserIDExistsRequest,
resp *UserIDExistsResponse,
) error
Locations(ctx context.Context, req *LocationRequest, resp *LocationResponse) error
User(ctx context.Context, request *UserRequest, response *UserResponse) error
Protocols(ctx context.Context, req *ProtocolRequest, resp *ProtocolResponse) error
}
// RoomAliasExistsRequest is a request to an application service
// about whether a room alias exists
type RoomAliasExistsRequest struct {
@ -81,97 +65,93 @@ type UserIDExistsResponse struct {
UserIDExists bool `json:"exists"`
}
const (
ASProtocolLegacyPath = "/_matrix/app/unstable/thirdparty/protocol/"
ASUserLegacyPath = "/_matrix/app/unstable/thirdparty/user"
ASLocationLegacyPath = "/_matrix/app/unstable/thirdparty/location"
ASRoomAliasExistsLegacyPath = "/rooms/"
ASUserExistsLegacyPath = "/users/"
ASProtocolPath = "/_matrix/app/v1/thirdparty/protocol/"
ASUserPath = "/_matrix/app/v1/thirdparty/user"
ASLocationPath = "/_matrix/app/v1/thirdparty/location"
ASRoomAliasExistsPath = "/_matrix/app/v1/rooms/"
ASUserExistsPath = "/_matrix/app/v1/users/"
)
type ProtocolRequest struct {
Protocol string `json:"protocol,omitempty"`
// AppServiceQueryAPI is used to query user and room alias data from application
// services
type AppServiceQueryAPI interface {
// Check whether a room alias exists within any application service namespaces
RoomAliasExists(
ctx context.Context,
req *RoomAliasExistsRequest,
resp *RoomAliasExistsResponse,
) error
// Check whether a user ID exists within any application service namespaces
UserIDExists(
ctx context.Context,
req *UserIDExistsRequest,
resp *UserIDExistsResponse,
) error
}
type ProtocolResponse struct {
Protocols map[string]ASProtocolResponse `json:"protocols"`
Exists bool `json:"exists"`
// AppServiceRoomAliasExistsPath is the HTTP path for the RoomAliasExists API
const AppServiceRoomAliasExistsPath = "/api/appservice/RoomAliasExists"
// AppServiceUserIDExistsPath is the HTTP path for the UserIDExists API
const AppServiceUserIDExistsPath = "/api/appservice/UserIDExists"
// httpAppServiceQueryAPI contains the URL to an appservice query API and a
// reference to a httpClient used to reach it
type httpAppServiceQueryAPI struct {
appserviceURL string
httpClient *http.Client
}
type ASProtocolResponse struct {
FieldTypes map[string]FieldType `json:"field_types,omitempty"` // NOTSPEC: field_types is required by the spec
Icon string `json:"icon"`
Instances []ProtocolInstance `json:"instances"`
LocationFields []string `json:"location_fields"`
UserFields []string `json:"user_fields"`
// NewAppServiceQueryAPIHTTP creates a AppServiceQueryAPI implemented by talking
// to a HTTP POST API.
// If httpClient is nil then it uses http.DefaultClient
func NewAppServiceQueryAPIHTTP(
appserviceURL string,
httpClient *http.Client,
) AppServiceQueryAPI {
if httpClient == nil {
httpClient = http.DefaultClient
}
return &httpAppServiceQueryAPI{appserviceURL, httpClient}
}
type FieldType struct {
Placeholder string `json:"placeholder"`
Regexp string `json:"regexp"`
// RoomAliasExists implements AppServiceQueryAPI
func (h *httpAppServiceQueryAPI) RoomAliasExists(
ctx context.Context,
request *RoomAliasExistsRequest,
response *RoomAliasExistsResponse,
) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "appserviceRoomAliasExists")
defer span.Finish()
apiURL := h.appserviceURL + AppServiceRoomAliasExistsPath
return commonHTTP.PostJSON(ctx, span, h.httpClient, apiURL, request, response)
}
type ProtocolInstance struct {
Description string `json:"desc"`
Icon string `json:"icon,omitempty"`
NetworkID string `json:"network_id,omitempty"` // NOTSPEC: network_id is required by the spec
Fields json.RawMessage `json:"fields,omitempty"` // NOTSPEC: fields is required by the spec
}
// UserIDExists implements AppServiceQueryAPI
func (h *httpAppServiceQueryAPI) UserIDExists(
ctx context.Context,
request *UserIDExistsRequest,
response *UserIDExistsResponse,
) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "appserviceUserIDExists")
defer span.Finish()
type UserRequest struct {
Protocol string `json:"protocol"`
Params string `json:"params"`
apiURL := h.appserviceURL + AppServiceUserIDExistsPath
return commonHTTP.PostJSON(ctx, span, h.httpClient, apiURL, request, response)
}
type UserResponse struct {
Users []ASUserResponse `json:"users,omitempty"`
Exists bool `json:"exists,omitempty"`
}
type ASUserResponse struct {
Protocol string `json:"protocol"`
UserID string `json:"userid"`
Fields json.RawMessage `json:"fields"`
}
type LocationRequest struct {
Protocol string `json:"protocol"`
Params string `json:"params"`
}
type LocationResponse struct {
Locations []ASLocationResponse `json:"locations,omitempty"`
Exists bool `json:"exists,omitempty"`
}
type ASLocationResponse struct {
Alias string `json:"alias"`
Protocol string `json:"protocol"`
Fields json.RawMessage `json:"fields"`
}
// ErrProfileNotExists is returned when trying to lookup a user's profile that
// doesn't exist locally.
var ErrProfileNotExists = errors.New("no known profile for given user ID")
// RetrieveUserProfile is a wrapper that queries both the local database and
// application services for a given user's profile
// TODO: Remove this, it's called from federationapi and clientapi but is a pure function
func RetrieveUserProfile(
ctx context.Context,
userID string,
asAPI AppServiceInternalAPI,
profileAPI userapi.ProfileAPI,
asAPI AppServiceQueryAPI,
accountDB *accounts.Database,
) (*authtypes.Profile, error) {
localpart, _, err := gomatrixserverlib.SplitID('@', userID)
if err != nil {
return nil, err
}
// Try to query the user from the local database
profile, err := profileAPI.QueryProfile(ctx, userID)
if err == nil {
profile, err := accountDB.GetProfileByLocalpart(ctx, localpart)
if err != nil && err != sql.ErrNoRows {
return nil, err
} else if profile != nil {
return profile, nil
}
@ -184,11 +164,11 @@ func RetrieveUserProfile(
// If no user exists, return
if !userResp.UserIDExists {
return nil, ErrProfileNotExists
return nil, common.ErrProfileNoExists
}
// Try to query the user from the local database again
profile, err = profileAPI.QueryProfile(ctx, userID)
profile, err = accountDB.GetProfileByLocalpart(ctx, localpart)
if err != nil {
return nil, err
}

View file

@ -16,96 +16,117 @@ package appservice
import (
"context"
"net/http"
"sync"
"github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/sirupsen/logrus"
"time"
appserviceAPI "github.com/matrix-org/dendrite/appservice/api"
"github.com/matrix-org/dendrite/appservice/consumers"
"github.com/matrix-org/dendrite/appservice/query"
"github.com/matrix-org/dendrite/appservice/routing"
"github.com/matrix-org/dendrite/appservice/storage"
"github.com/matrix-org/dendrite/appservice/types"
"github.com/matrix-org/dendrite/appservice/workers"
"github.com/matrix-org/dendrite/clientapi/auth/storage/accounts"
"github.com/matrix-org/dendrite/clientapi/auth/storage/devices"
"github.com/matrix-org/dendrite/common/basecomponent"
"github.com/matrix-org/dendrite/common/config"
"github.com/matrix-org/dendrite/common/transactions"
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/setup/config"
userapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib"
"github.com/sirupsen/logrus"
)
// NewInternalAPI returns a concerete implementation of the internal API. Callers
// can call functions directly on the returned API or via an HTTP interface using AddInternalRoutes.
func NewInternalAPI(
processContext *process.ProcessContext,
cfg *config.Dendrite,
natsInstance *jetstream.NATSInstance,
userAPI userapi.AppserviceUserAPI,
rsAPI roomserverAPI.RoomserverInternalAPI,
) appserviceAPI.AppServiceInternalAPI {
// Create appserivce query API with an HTTP client that will be used for all
// outbound and inbound requests (inbound only for the internal API)
appserviceQueryAPI := &query.AppServiceQueryAPI{
Cfg: &cfg.AppServiceAPI,
ProtocolCache: map[string]appserviceAPI.ASProtocolResponse{},
CacheMu: sync.Mutex{},
}
if len(cfg.Derived.ApplicationServices) == 0 {
return appserviceQueryAPI
// SetupAppServiceAPIComponent sets up and registers HTTP handlers for the AppServices
// component.
func SetupAppServiceAPIComponent(
base *basecomponent.BaseDendrite,
accountsDB *accounts.Database,
deviceDB *devices.Database,
federation *gomatrixserverlib.FederationClient,
roomserverAliasAPI roomserverAPI.RoomserverAliasAPI,
roomserverQueryAPI roomserverAPI.RoomserverQueryAPI,
transactionsCache *transactions.Cache,
) appserviceAPI.AppServiceQueryAPI {
// Create a connection to the appservice postgres DB
appserviceDB, err := storage.NewDatabase(string(base.Cfg.Database.AppService))
if err != nil {
logrus.WithError(err).Panicf("failed to connect to appservice db")
}
// Wrap application services in a type that relates the application service and
// a sync.Cond object that can be used to notify workers when there are new
// events to be sent out.
for _, appservice := range cfg.Derived.ApplicationServices {
workerStates := make([]types.ApplicationServiceWorkerState, len(base.Cfg.Derived.ApplicationServices))
for i, appservice := range base.Cfg.Derived.ApplicationServices {
m := sync.Mutex{}
ws := types.ApplicationServiceWorkerState{
AppService: appservice,
Cond: sync.NewCond(&m),
}
workerStates[i] = ws
// Create bot account for this AS if it doesn't already exist
if err := generateAppServiceAccount(userAPI, appservice, cfg.Global.ServerName); err != nil {
if err = generateAppServiceAccount(accountsDB, deviceDB, appservice); err != nil {
logrus.WithFields(logrus.Fields{
"appservice": appservice.ID,
}).WithError(err).Panicf("failed to generate bot account for appservice")
}
}
// Only consume if we actually have ASes to track, else we'll just chew cycles needlessly.
// We can't add ASes at runtime so this is safe to do.
js, _ := natsInstance.Prepare(processContext, &cfg.Global.JetStream)
// Create appserivce query API with an HTTP client that will be used for all
// outbound and inbound requests (inbound only for the internal API)
appserviceQueryAPI := query.AppServiceQueryAPI{
HTTPClient: &http.Client{
Timeout: time.Second * 30,
},
Cfg: base.Cfg,
}
appserviceQueryAPI.SetupHTTP(http.DefaultServeMux)
consumer := consumers.NewOutputRoomEventConsumer(
processContext, &cfg.AppServiceAPI,
js, rsAPI,
base.Cfg, base.KafkaConsumer, accountsDB, appserviceDB,
roomserverQueryAPI, roomserverAliasAPI, workerStates,
)
if err := consumer.Start(); err != nil {
logrus.WithError(err).Panicf("failed to start appservice roomserver consumer")
}
return appserviceQueryAPI
// Create application service transaction workers
if err := workers.SetupTransactionWorkers(appserviceDB, workerStates); err != nil {
logrus.WithError(err).Panicf("failed to start app service transaction workers")
}
// Set up HTTP Endpoints
routing.Setup(
base.APIMux, *base.Cfg, roomserverQueryAPI, roomserverAliasAPI,
accountsDB, federation, transactionsCache,
)
return &appserviceQueryAPI
}
// generateAppServiceAccounts creates a dummy account based off the
// `sender_localpart` field of each application service if it doesn't
// exist already
func generateAppServiceAccount(
userAPI userapi.AppserviceUserAPI,
accountsDB *accounts.Database,
deviceDB *devices.Database,
as config.ApplicationService,
serverName spec.ServerName,
) error {
var accRes userapi.PerformAccountCreationResponse
err := userAPI.PerformAccountCreation(context.Background(), &userapi.PerformAccountCreationRequest{
AccountType: userapi.AccountTypeAppService,
Localpart: as.SenderLocalpart,
ServerName: serverName,
AppServiceID: as.ID,
OnConflict: userapi.ConflictUpdate,
}, &accRes)
ctx := context.Background()
// Create an account for the application service
acc, err := accountsDB.CreateAccount(ctx, as.SenderLocalpart, "", as.ID)
if err != nil {
return err
} else if acc == nil {
// This account already exists
return nil
}
var devRes userapi.PerformDeviceCreationResponse
err = userAPI.PerformDeviceCreation(context.Background(), &userapi.PerformDeviceCreationRequest{
Localpart: as.SenderLocalpart,
ServerName: serverName,
AccessToken: as.ASToken,
DeviceID: &as.SenderLocalpart,
DeviceDisplayName: &as.SenderLocalpart,
NoDeviceListUpdate: true,
}, &devRes)
// Create a dummy device with a dummy token for the application service
_, err = deviceDB.CreateDevice(ctx, as.SenderLocalpart, nil, as.ASToken, &as.SenderLocalpart)
return err
}

View file

@ -1,606 +0,0 @@
package appservice_test
import (
"context"
"encoding/json"
"fmt"
"net"
"net/http"
"net/http/httptest"
"path"
"reflect"
"regexp"
"strings"
"testing"
"time"
"github.com/matrix-org/dendrite/clientapi"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
"github.com/matrix-org/dendrite/federationapi/statistics"
"github.com/matrix-org/dendrite/internal/httputil"
"github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/dendrite/syncapi"
uapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
"github.com/nats-io/nats.go"
"github.com/stretchr/testify/assert"
"github.com/tidwall/gjson"
"github.com/matrix-org/dendrite/appservice"
"github.com/matrix-org/dendrite/appservice/api"
"github.com/matrix-org/dendrite/appservice/consumers"
"github.com/matrix-org/dendrite/internal/caching"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/roomserver"
rsapi "github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/test"
"github.com/matrix-org/dendrite/userapi"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/dendrite/test/testrig"
)
var testIsBlacklistedOrBackingOff = func(s spec.ServerName) (*statistics.ServerStatistics, error) {
return &statistics.ServerStatistics{}, nil
}
func TestAppserviceInternalAPI(t *testing.T) {
// Set expected results
existingProtocol := "irc"
wantLocationResponse := []api.ASLocationResponse{{Protocol: existingProtocol, Fields: []byte("{}")}}
wantUserResponse := []api.ASUserResponse{{Protocol: existingProtocol, Fields: []byte("{}")}}
wantProtocolResponse := api.ASProtocolResponse{Instances: []api.ProtocolInstance{{Fields: []byte("{}")}}}
wantProtocolResult := map[string]api.ASProtocolResponse{
existingProtocol: wantProtocolResponse,
}
// create a dummy AS url, handling some cases
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch {
case strings.Contains(r.URL.Path, "location"):
// Check if we've got an existing protocol, if so, return a proper response.
if r.URL.Path[len(r.URL.Path)-len(existingProtocol):] == existingProtocol {
if err := json.NewEncoder(w).Encode(wantLocationResponse); err != nil {
t.Fatalf("failed to encode response: %s", err)
}
return
}
if err := json.NewEncoder(w).Encode([]api.ASLocationResponse{}); err != nil {
t.Fatalf("failed to encode response: %s", err)
}
return
case strings.Contains(r.URL.Path, "user"):
if r.URL.Path[len(r.URL.Path)-len(existingProtocol):] == existingProtocol {
if err := json.NewEncoder(w).Encode(wantUserResponse); err != nil {
t.Fatalf("failed to encode response: %s", err)
}
return
}
if err := json.NewEncoder(w).Encode([]api.UserResponse{}); err != nil {
t.Fatalf("failed to encode response: %s", err)
}
return
case strings.Contains(r.URL.Path, "protocol"):
if r.URL.Path[len(r.URL.Path)-len(existingProtocol):] == existingProtocol {
if err := json.NewEncoder(w).Encode(wantProtocolResponse); err != nil {
t.Fatalf("failed to encode response: %s", err)
}
return
}
if err := json.NewEncoder(w).Encode(nil); err != nil {
t.Fatalf("failed to encode response: %s", err)
}
return
default:
t.Logf("hit location: %s", r.URL.Path)
}
}))
// The test cases to run
runCases := func(t *testing.T, testAPI api.AppServiceInternalAPI) {
t.Run("UserIDExists", func(t *testing.T) {
testUserIDExists(t, testAPI, "@as-testing:test", true)
testUserIDExists(t, testAPI, "@as1-testing:test", false)
})
t.Run("AliasExists", func(t *testing.T) {
testAliasExists(t, testAPI, "@asroom-testing:test", true)
testAliasExists(t, testAPI, "@asroom1-testing:test", false)
})
t.Run("Locations", func(t *testing.T) {
testLocations(t, testAPI, existingProtocol, wantLocationResponse)
testLocations(t, testAPI, "abc", nil)
})
t.Run("User", func(t *testing.T) {
testUser(t, testAPI, existingProtocol, wantUserResponse)
testUser(t, testAPI, "abc", nil)
})
t.Run("Protocols", func(t *testing.T) {
testProtocol(t, testAPI, existingProtocol, wantProtocolResult)
testProtocol(t, testAPI, existingProtocol, wantProtocolResult) // tests the cache
testProtocol(t, testAPI, "", wantProtocolResult) // tests getting all protocols
testProtocol(t, testAPI, "abc", nil)
})
}
test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) {
cfg, ctx, close := testrig.CreateConfig(t, dbType)
defer close()
// Create a dummy application service
as := &config.ApplicationService{
ID: "someID",
URL: srv.URL,
ASToken: "",
HSToken: "",
SenderLocalpart: "senderLocalPart",
NamespaceMap: map[string][]config.ApplicationServiceNamespace{
"users": {{RegexpObject: regexp.MustCompile("as-.*")}},
"aliases": {{RegexpObject: regexp.MustCompile("asroom-.*")}},
},
Protocols: []string{existingProtocol},
}
as.CreateHTTPClient(cfg.AppServiceAPI.DisableTLSValidation)
cfg.AppServiceAPI.Derived.ApplicationServices = []config.ApplicationService{*as}
t.Cleanup(func() {
ctx.ShutdownDendrite()
ctx.WaitForShutdown()
})
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
// Create required internal APIs
natsInstance := jetstream.NATSInstance{}
cm := sqlutil.NewConnectionManager(ctx, cfg.Global.DatabaseOptions)
rsAPI := roomserver.NewInternalAPI(ctx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
rsAPI.SetFederationAPI(nil, nil)
usrAPI := userapi.NewInternalAPI(ctx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
asAPI := appservice.NewInternalAPI(ctx, cfg, &natsInstance, usrAPI, rsAPI)
runCases(t, asAPI)
})
}
func TestAppserviceInternalAPI_UnixSocket_Simple(t *testing.T) {
// Set expected results
existingProtocol := "irc"
wantLocationResponse := []api.ASLocationResponse{{Protocol: existingProtocol, Fields: []byte("{}")}}
wantUserResponse := []api.ASUserResponse{{Protocol: existingProtocol, Fields: []byte("{}")}}
wantProtocolResponse := api.ASProtocolResponse{Instances: []api.ProtocolInstance{{Fields: []byte("{}")}}}
// create a dummy AS url, handling some cases
srv := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch {
case strings.Contains(r.URL.Path, "location"):
// Check if we've got an existing protocol, if so, return a proper response.
if r.URL.Path[len(r.URL.Path)-len(existingProtocol):] == existingProtocol {
if err := json.NewEncoder(w).Encode(wantLocationResponse); err != nil {
t.Fatalf("failed to encode response: %s", err)
}
return
}
if err := json.NewEncoder(w).Encode([]api.ASLocationResponse{}); err != nil {
t.Fatalf("failed to encode response: %s", err)
}
return
case strings.Contains(r.URL.Path, "user"):
if r.URL.Path[len(r.URL.Path)-len(existingProtocol):] == existingProtocol {
if err := json.NewEncoder(w).Encode(wantUserResponse); err != nil {
t.Fatalf("failed to encode response: %s", err)
}
return
}
if err := json.NewEncoder(w).Encode([]api.UserResponse{}); err != nil {
t.Fatalf("failed to encode response: %s", err)
}
return
case strings.Contains(r.URL.Path, "protocol"):
if r.URL.Path[len(r.URL.Path)-len(existingProtocol):] == existingProtocol {
if err := json.NewEncoder(w).Encode(wantProtocolResponse); err != nil {
t.Fatalf("failed to encode response: %s", err)
}
return
}
if err := json.NewEncoder(w).Encode(nil); err != nil {
t.Fatalf("failed to encode response: %s", err)
}
return
default:
t.Logf("hit location: %s", r.URL.Path)
}
}))
tmpDir := t.TempDir()
socket := path.Join(tmpDir, "socket")
l, err := net.Listen("unix", socket)
assert.NoError(t, err)
_ = srv.Listener.Close()
srv.Listener = l
srv.Start()
defer srv.Close()
cfg, ctx, tearDown := testrig.CreateConfig(t, test.DBTypeSQLite)
defer tearDown()
// Create a dummy application service
as := &config.ApplicationService{
ID: "someID",
URL: fmt.Sprintf("unix://%s", socket),
ASToken: "",
HSToken: "",
SenderLocalpart: "senderLocalPart",
NamespaceMap: map[string][]config.ApplicationServiceNamespace{
"users": {{RegexpObject: regexp.MustCompile("as-.*")}},
"aliases": {{RegexpObject: regexp.MustCompile("asroom-.*")}},
},
Protocols: []string{existingProtocol},
}
as.CreateHTTPClient(cfg.AppServiceAPI.DisableTLSValidation)
cfg.AppServiceAPI.Derived.ApplicationServices = []config.ApplicationService{*as}
t.Cleanup(func() {
ctx.ShutdownDendrite()
ctx.WaitForShutdown()
})
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
// Create required internal APIs
natsInstance := jetstream.NATSInstance{}
cm := sqlutil.NewConnectionManager(ctx, cfg.Global.DatabaseOptions)
rsAPI := roomserver.NewInternalAPI(ctx, cfg, cm, &natsInstance, caches, caching.DisableMetrics)
rsAPI.SetFederationAPI(nil, nil)
usrAPI := userapi.NewInternalAPI(ctx, cfg, cm, &natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
asAPI := appservice.NewInternalAPI(ctx, cfg, &natsInstance, usrAPI, rsAPI)
t.Run("UserIDExists", func(t *testing.T) {
testUserIDExists(t, asAPI, "@as-testing:test", true)
testUserIDExists(t, asAPI, "@as1-testing:test", false)
})
}
func testUserIDExists(t *testing.T, asAPI api.AppServiceInternalAPI, userID string, wantExists bool) {
ctx := context.Background()
userResp := &api.UserIDExistsResponse{}
if err := asAPI.UserIDExists(ctx, &api.UserIDExistsRequest{
UserID: userID,
}, userResp); err != nil {
t.Errorf("failed to get userID: %s", err)
}
if userResp.UserIDExists != wantExists {
t.Errorf("unexpected result for UserIDExists(%s): %v, expected %v", userID, userResp.UserIDExists, wantExists)
}
}
func testAliasExists(t *testing.T, asAPI api.AppServiceInternalAPI, alias string, wantExists bool) {
ctx := context.Background()
aliasResp := &api.RoomAliasExistsResponse{}
if err := asAPI.RoomAliasExists(ctx, &api.RoomAliasExistsRequest{
Alias: alias,
}, aliasResp); err != nil {
t.Errorf("failed to get alias: %s", err)
}
if aliasResp.AliasExists != wantExists {
t.Errorf("unexpected result for RoomAliasExists(%s): %v, expected %v", alias, aliasResp.AliasExists, wantExists)
}
}
func testLocations(t *testing.T, asAPI api.AppServiceInternalAPI, proto string, wantResult []api.ASLocationResponse) {
ctx := context.Background()
locationResp := &api.LocationResponse{}
if err := asAPI.Locations(ctx, &api.LocationRequest{
Protocol: proto,
}, locationResp); err != nil {
t.Errorf("failed to get locations: %s", err)
}
if !reflect.DeepEqual(locationResp.Locations, wantResult) {
t.Errorf("unexpected result for Locations(%s): %+v, expected %+v", proto, locationResp.Locations, wantResult)
}
}
func testUser(t *testing.T, asAPI api.AppServiceInternalAPI, proto string, wantResult []api.ASUserResponse) {
ctx := context.Background()
userResp := &api.UserResponse{}
if err := asAPI.User(ctx, &api.UserRequest{
Protocol: proto,
}, userResp); err != nil {
t.Errorf("failed to get user: %s", err)
}
if !reflect.DeepEqual(userResp.Users, wantResult) {
t.Errorf("unexpected result for User(%s): %+v, expected %+v", proto, userResp.Users, wantResult)
}
}
func testProtocol(t *testing.T, asAPI api.AppServiceInternalAPI, proto string, wantResult map[string]api.ASProtocolResponse) {
ctx := context.Background()
protoResp := &api.ProtocolResponse{}
if err := asAPI.Protocols(ctx, &api.ProtocolRequest{
Protocol: proto,
}, protoResp); err != nil {
t.Errorf("failed to get Protocols: %s", err)
}
if !reflect.DeepEqual(protoResp.Protocols, wantResult) {
t.Errorf("unexpected result for Protocols(%s): %+v, expected %+v", proto, protoResp.Protocols[proto], wantResult)
}
}
// Tests that the roomserver consumer only receives one invite
func TestRoomserverConsumerOneInvite(t *testing.T) {
alice := test.NewUser(t)
bob := test.NewUser(t)
room := test.NewRoom(t, alice)
// Invite Bob
room.CreateAndInsert(t, alice, spec.MRoomMember, map[string]interface{}{
"membership": "invite",
}, test.WithStateKey(bob.ID))
test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) {
cfg, processCtx, closeDB := testrig.CreateConfig(t, dbType)
defer closeDB()
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
natsInstance := &jetstream.NATSInstance{}
evChan := make(chan struct{})
// create a dummy AS url, handling the events
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var txn consumers.ApplicationServiceTransaction
err := json.NewDecoder(r.Body).Decode(&txn)
if err != nil {
t.Fatal(err)
}
for _, ev := range txn.Events {
if ev.Type != spec.MRoomMember {
continue
}
// Usually we would check the event content for the membership, but since
// we only invited bob, this should be fine for this test.
if ev.StateKey != nil && *ev.StateKey == bob.ID {
evChan <- struct{}{}
}
}
}))
defer srv.Close()
as := &config.ApplicationService{
ID: "someID",
URL: srv.URL,
ASToken: "",
HSToken: "",
SenderLocalpart: "senderLocalPart",
NamespaceMap: map[string][]config.ApplicationServiceNamespace{
"users": {{RegexpObject: regexp.MustCompile(bob.ID)}},
"aliases": {{RegexpObject: regexp.MustCompile(room.ID)}},
},
}
as.CreateHTTPClient(cfg.AppServiceAPI.DisableTLSValidation)
// Create a dummy application service
cfg.AppServiceAPI.Derived.ApplicationServices = []config.ApplicationService{*as}
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
// Create required internal APIs
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, natsInstance, caches, caching.DisableMetrics)
rsAPI.SetFederationAPI(nil, nil)
usrAPI := userapi.NewInternalAPI(processCtx, cfg, cm, natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
// start the consumer
appservice.NewInternalAPI(processCtx, cfg, natsInstance, usrAPI, rsAPI)
// Create the room
if err := rsapi.SendEvents(context.Background(), rsAPI, rsapi.KindNew, room.Events(), "test", "test", "test", nil, false); err != nil {
t.Fatalf("failed to send events: %v", err)
}
var seenInvitesForBob int
waitLoop:
for {
select {
case <-time.After(time.Millisecond * 50): // wait for the AS to process the events
break waitLoop
case <-evChan:
seenInvitesForBob++
if seenInvitesForBob != 1 {
t.Fatalf("received unexpected invites: %d", seenInvitesForBob)
}
}
}
close(evChan)
})
}
// Note: If this test panics, it is because we timed out waiting for the
// join event to come through to the appservice and we close the DB/shutdown Dendrite. This makes the
// syncAPI unhappy, as it is unable to write to the database.
func TestOutputAppserviceEvent(t *testing.T) {
alice := test.NewUser(t)
bob := test.NewUser(t)
test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) {
cfg, processCtx, closeDB := testrig.CreateConfig(t, dbType)
defer closeDB()
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
natsInstance := &jetstream.NATSInstance{}
evChan := make(chan struct{})
caches := caching.NewRistrettoCache(128*1024*1024, time.Hour, caching.DisableMetrics)
// Create required internal APIs
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, natsInstance, caches, caching.DisableMetrics)
rsAPI.SetFederationAPI(nil, nil)
// Create the router, so we can hit `/joined_members`
routers := httputil.NewRouters()
accessTokens := map[*test.User]userDevice{
bob: {},
}
usrAPI := userapi.NewInternalAPI(processCtx, cfg, cm, natsInstance, rsAPI, nil, caching.DisableMetrics, testIsBlacklistedOrBackingOff)
clientapi.AddPublicRoutes(processCtx, routers, cfg, natsInstance, nil, rsAPI, nil, nil, nil, usrAPI, nil, nil, caching.DisableMetrics)
createAccessTokens(t, accessTokens, usrAPI, processCtx.Context(), routers)
room := test.NewRoom(t, alice)
// Invite Bob
room.CreateAndInsert(t, alice, spec.MRoomMember, map[string]interface{}{
"membership": "invite",
}, test.WithStateKey(bob.ID))
// create a dummy AS url, handling the events
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var txn consumers.ApplicationServiceTransaction
err := json.NewDecoder(r.Body).Decode(&txn)
if err != nil {
t.Fatal(err)
}
for _, ev := range txn.Events {
if ev.Type != spec.MRoomMember {
continue
}
if ev.StateKey != nil && *ev.StateKey == bob.ID {
membership := gjson.GetBytes(ev.Content, "membership").Str
t.Logf("Processing membership: %s", membership)
switch membership {
case spec.Invite:
// Accept the invite
joinEv := room.CreateAndInsert(t, bob, spec.MRoomMember, map[string]interface{}{
"membership": "join",
}, test.WithStateKey(bob.ID))
if err := rsapi.SendEvents(context.Background(), rsAPI, rsapi.KindNew, []*types.HeaderedEvent{joinEv}, "test", "test", "test", nil, false); err != nil {
t.Fatalf("failed to send events: %v", err)
}
case spec.Join: // the AS has received the join event, now hit `/joined_members` to validate that
rec := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodGet, "/_matrix/client/v3/rooms/"+room.ID+"/joined_members", nil)
req.Header.Set("Authorization", "Bearer "+accessTokens[bob].accessToken)
routers.Client.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
t.Fatalf("expected HTTP 200, got %d: %s", rec.Code, rec.Body.String())
}
// Both Alice and Bob should be joined. If not, we have a race condition
if !gjson.GetBytes(rec.Body.Bytes(), "joined."+alice.ID).Exists() {
t.Errorf("Alice is not joined to the room") // in theory should not happen
}
if !gjson.GetBytes(rec.Body.Bytes(), "joined."+bob.ID).Exists() {
t.Errorf("Bob is not joined to the room")
}
evChan <- struct{}{}
default:
t.Fatalf("Unexpected membership: %s", membership)
}
}
}
}))
defer srv.Close()
as := &config.ApplicationService{
ID: "someID",
URL: srv.URL,
ASToken: "",
HSToken: "",
SenderLocalpart: "senderLocalPart",
NamespaceMap: map[string][]config.ApplicationServiceNamespace{
"users": {{RegexpObject: regexp.MustCompile(bob.ID)}},
"aliases": {{RegexpObject: regexp.MustCompile(room.ID)}},
},
}
as.CreateHTTPClient(cfg.AppServiceAPI.DisableTLSValidation)
// Create a dummy application service
cfg.AppServiceAPI.Derived.ApplicationServices = []config.ApplicationService{*as}
// Prepare AS Streams on the old topic to validate that they get deleted
jsCtx, _ := natsInstance.Prepare(processCtx, &cfg.Global.JetStream)
token := jetstream.Tokenise(as.ID)
if err := jetstream.JetStreamConsumer(
processCtx.Context(), jsCtx, cfg.Global.JetStream.Prefixed(jetstream.OutputRoomEvent),
cfg.Global.JetStream.Durable("Appservice_"+token),
50, // maximum number of events to send in a single transaction
func(ctx context.Context, msgs []*nats.Msg) bool {
return true
},
); err != nil {
t.Fatal(err)
}
// Start the syncAPI to have `/joined_members` available
syncapi.AddPublicRoutes(processCtx, routers, cfg, cm, natsInstance, usrAPI, rsAPI, caches, caching.DisableMetrics)
// start the consumer
appservice.NewInternalAPI(processCtx, cfg, natsInstance, usrAPI, rsAPI)
// At this point, the old JetStream consumers should be deleted
for consumer := range jsCtx.Consumers(cfg.Global.JetStream.Prefixed(jetstream.OutputRoomEvent)) {
if consumer.Name == cfg.Global.JetStream.Durable("Appservice_"+token)+"Pull" {
t.Fatalf("Consumer still exists")
}
}
// Create the room, this triggers the AS to receive an invite for Bob.
if err := rsapi.SendEvents(context.Background(), rsAPI, rsapi.KindNew, room.Events(), "test", "test", "test", nil, false); err != nil {
t.Fatalf("failed to send events: %v", err)
}
select {
// Pretty generous timeout duration...
case <-time.After(time.Millisecond * 1000): // wait for the AS to process the events
t.Errorf("Timed out waiting for join event")
case <-evChan:
}
close(evChan)
})
}
type userDevice struct {
accessToken string
deviceID string
password string
}
func createAccessTokens(t *testing.T, accessTokens map[*test.User]userDevice, userAPI uapi.UserInternalAPI, ctx context.Context, routers httputil.Routers) {
t.Helper()
for u := range accessTokens {
localpart, serverName, _ := gomatrixserverlib.SplitID('@', u.ID)
userRes := &uapi.PerformAccountCreationResponse{}
password := util.RandomString(8)
if err := userAPI.PerformAccountCreation(ctx, &uapi.PerformAccountCreationRequest{
AccountType: u.AccountType,
Localpart: localpart,
ServerName: serverName,
Password: password,
}, userRes); err != nil {
t.Errorf("failed to create account: %s", err)
}
req := test.NewRequest(t, http.MethodPost, "/_matrix/client/v3/login", test.WithJSONBody(t, map[string]interface{}{
"type": authtypes.LoginTypePassword,
"identifier": map[string]interface{}{
"type": "m.id.user",
"user": u.ID,
},
"password": password,
}))
rec := httptest.NewRecorder()
routers.Client.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
t.Fatalf("failed to login: %s", rec.Body.String())
}
accessTokens[u] = userDevice{
accessToken: gjson.GetBytes(rec.Body.Bytes(), "access_token").String(),
deviceID: gjson.GetBytes(rec.Body.Bytes(), "device_id").String(),
password: password,
}
}
}

View file

@ -15,269 +15,186 @@
package consumers
import (
"bytes"
"context"
"encoding/json"
"fmt"
"math"
"net/http"
"net/url"
"strconv"
"time"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/nats-io/nats.go"
"github.com/matrix-org/dendrite/appservice/storage"
"github.com/matrix-org/dendrite/appservice/types"
"github.com/matrix-org/dendrite/clientapi/auth/storage/accounts"
"github.com/matrix-org/dendrite/common"
"github.com/matrix-org/dendrite/common/config"
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/dendrite/syncapi/synctypes"
"github.com/matrix-org/gomatrixserverlib"
log "github.com/sirupsen/logrus"
sarama "gopkg.in/Shopify/sarama.v1"
)
// ApplicationServiceTransaction is the transaction that is sent off to an
// application service.
type ApplicationServiceTransaction struct {
Events []synctypes.ClientEvent `json:"events"`
}
// OutputRoomEventConsumer consumes events that originated in the room server.
type OutputRoomEventConsumer struct {
ctx context.Context
cfg *config.AppServiceAPI
jetstream nats.JetStreamContext
topic string
rsAPI api.AppserviceRoomserverAPI
}
type appserviceState struct {
*config.ApplicationService
backoff int
roomServerConsumer *common.ContinualConsumer
db *accounts.Database
asDB *storage.Database
query api.RoomserverQueryAPI
alias api.RoomserverAliasAPI
serverName string
workerStates []types.ApplicationServiceWorkerState
}
// NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call
// Start() to begin consuming from room servers.
func NewOutputRoomEventConsumer(
process *process.ProcessContext,
cfg *config.AppServiceAPI,
js nats.JetStreamContext,
rsAPI api.AppserviceRoomserverAPI,
cfg *config.Dendrite,
kafkaConsumer sarama.Consumer,
store *accounts.Database,
appserviceDB *storage.Database,
queryAPI api.RoomserverQueryAPI,
aliasAPI api.RoomserverAliasAPI,
workerStates []types.ApplicationServiceWorkerState,
) *OutputRoomEventConsumer {
return &OutputRoomEventConsumer{
ctx: process.Context(),
cfg: cfg,
jetstream: js,
topic: cfg.Matrix.JetStream.Prefixed(jetstream.OutputAppserviceEvent),
rsAPI: rsAPI,
consumer := common.ContinualConsumer{
Topic: string(cfg.Kafka.Topics.OutputRoomEvent),
Consumer: kafkaConsumer,
PartitionStore: store,
}
s := &OutputRoomEventConsumer{
roomServerConsumer: &consumer,
db: store,
asDB: appserviceDB,
query: queryAPI,
alias: aliasAPI,
serverName: string(cfg.Matrix.ServerName),
workerStates: workerStates,
}
consumer.ProcessMessage = s.onMessage
return s
}
// Start consuming from room servers
func (s *OutputRoomEventConsumer) Start() error {
durableNames := make([]string, 0, len(s.cfg.Derived.ApplicationServices))
for _, as := range s.cfg.Derived.ApplicationServices {
appsvc := as
state := &appserviceState{
ApplicationService: &appsvc,
}
token := jetstream.Tokenise(as.ID)
if err := jetstream.JetStreamConsumer(
s.ctx, s.jetstream, s.topic,
s.cfg.Matrix.JetStream.Durable("Appservice_"+token),
50, // maximum number of events to send in a single transaction
func(ctx context.Context, msgs []*nats.Msg) bool {
return s.onMessage(ctx, state, msgs)
},
nats.DeliverNew(), nats.ManualAck(),
); err != nil {
return fmt.Errorf("failed to create %q consumer: %w", token, err)
}
durableNames = append(durableNames, s.cfg.Matrix.JetStream.Durable("Appservice_"+token))
}
// Cleanup any consumers still existing on the OutputRoomEvent stream
// to avoid messages not being deleted
for _, consumerName := range durableNames {
err := s.jetstream.DeleteConsumer(s.cfg.Matrix.JetStream.Prefixed(jetstream.OutputRoomEvent), consumerName+"Pull")
if err != nil && err != nats.ErrConsumerNotFound {
return err
}
}
return nil
return s.roomServerConsumer.Start()
}
// onMessage is called when the appservice component receives a new event from
// the room server output log.
func (s *OutputRoomEventConsumer) onMessage(
ctx context.Context, state *appserviceState, msgs []*nats.Msg,
) bool {
log.WithField("appservice", state.ID).Tracef("Appservice worker received %d message(s) from roomserver", len(msgs))
events := make([]*types.HeaderedEvent, 0, len(msgs))
for _, msg := range msgs {
// Only handle events we care about
receivedType := api.OutputType(msg.Header.Get(jetstream.RoomEventType))
if receivedType != api.OutputTypeNewRoomEvent && receivedType != api.OutputTypeNewInviteEvent {
continue
}
// Parse out the event JSON
var output api.OutputEvent
if err := json.Unmarshal(msg.Data, &output); err != nil {
// If the message was invalid, log it and move on to the next message in the stream
log.WithField("appservice", state.ID).WithError(err).Errorf("Appservice failed to parse message, ignoring")
continue
}
switch output.Type {
case api.OutputTypeNewRoomEvent:
if output.NewRoomEvent == nil || !s.appserviceIsInterestedInEvent(ctx, output.NewRoomEvent.Event, state.ApplicationService) {
continue
}
events = append(events, output.NewRoomEvent.Event)
if len(output.NewRoomEvent.AddsStateEventIDs) > 0 {
newEventID := output.NewRoomEvent.Event.EventID()
eventsReq := &api.QueryEventsByIDRequest{
RoomID: output.NewRoomEvent.Event.RoomID().String(),
EventIDs: make([]string, 0, len(output.NewRoomEvent.AddsStateEventIDs)),
}
eventsRes := &api.QueryEventsByIDResponse{}
for _, eventID := range output.NewRoomEvent.AddsStateEventIDs {
if eventID != newEventID {
eventsReq.EventIDs = append(eventsReq.EventIDs, eventID)
}
}
if len(eventsReq.EventIDs) > 0 {
if err := s.rsAPI.QueryEventsByID(s.ctx, eventsReq, eventsRes); err != nil {
log.WithError(err).Errorf("s.rsAPI.QueryEventsByID failed")
return false
}
events = append(events, eventsRes.Events...)
}
}
default:
continue
}
func (s *OutputRoomEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
// Parse out the event JSON
var output api.OutputEvent
if err := json.Unmarshal(msg.Value, &output); err != nil {
// If the message was invalid, log it and move on to the next message in the stream
log.WithError(err).Errorf("roomserver output log: message parse failure")
return nil
}
// If there are no events selected for sending then we should
// ack the messages so that we don't get sent them again in the
// future.
if len(events) == 0 {
return true
if output.Type != api.OutputTypeNewRoomEvent {
log.WithField("type", output.Type).Debug(
"roomserver output log: ignoring unknown output type",
)
return nil
}
txnID := ""
// Try to get the message metadata, if we're able to, use the timestamp as the txnID
metadata, err := msgs[0].Metadata()
if err == nil {
txnID = strconv.Itoa(int(metadata.Timestamp.UnixNano()))
}
ev := output.NewRoomEvent.Event
log.WithFields(log.Fields{
"event_id": ev.EventID(),
"room_id": ev.RoomID(),
"type": ev.Type(),
}).Info("appservice received an event from roomserver")
// Send event to any relevant application services. If we hit
// an error here, return false, so that we negatively ack.
log.WithField("appservice", state.ID).Debugf("Appservice worker sending %d events(s) from roomserver", len(events))
return s.sendEvents(ctx, state, events, txnID) == nil
missingEvents, err := s.lookupMissingStateEvents(output.NewRoomEvent.AddsStateEventIDs, ev)
if err != nil {
return err
}
events := append(missingEvents, ev)
// Send event to any relevant application services
return s.filterRoomserverEvents(context.TODO(), events)
}
// sendEvents passes events to the appservice by using the transactions
// endpoint. It will block for the backoff period if necessary.
func (s *OutputRoomEventConsumer) sendEvents(
ctx context.Context, state *appserviceState,
events []*types.HeaderedEvent,
txnID string,
// lookupMissingStateEvents looks up the state events that are added by a new event,
// and returns any not already present.
func (s *OutputRoomEventConsumer) lookupMissingStateEvents(
addsStateEventIDs []string, event gomatrixserverlib.Event,
) ([]gomatrixserverlib.Event, error) {
// Fast path if there aren't any new state events.
if len(addsStateEventIDs) == 0 {
return []gomatrixserverlib.Event{}, nil
}
// Fast path if the only state event added is the event itself.
if len(addsStateEventIDs) == 1 && addsStateEventIDs[0] == event.EventID() {
return []gomatrixserverlib.Event{}, nil
}
result := []gomatrixserverlib.Event{}
missing := []string{}
for _, id := range addsStateEventIDs {
if id != event.EventID() {
// If the event isn't the current one, add it to the list of events
// to retrieve from the roomserver
missing = append(missing, id)
}
}
// Request the missing events from the roomserver
eventReq := api.QueryEventsByIDRequest{EventIDs: missing}
var eventResp api.QueryEventsByIDResponse
if err := s.query.QueryEventsByID(context.TODO(), &eventReq, &eventResp); err != nil {
return nil, err
}
result = append(result, eventResp.Events...)
return result, nil
}
// filterRoomserverEvents takes in events and decides whether any of them need
// to be passed on to an external application service. It does this by checking
// each namespace of each registered application service, and if there is a
// match, adds the event to the queue for events to be sent to a particular
// application service.
func (s *OutputRoomEventConsumer) filterRoomserverEvents(
ctx context.Context,
events []gomatrixserverlib.Event,
) error {
// Create the transaction body.
transaction, err := json.Marshal(
ApplicationServiceTransaction{
Events: synctypes.ToClientEvents(gomatrixserverlib.ToPDUs(events), synctypes.FormatAll, func(roomID spec.RoomID, senderID spec.SenderID) (*spec.UserID, error) {
return s.rsAPI.QueryUserIDForSender(ctx, roomID, senderID)
}),
},
)
if err != nil {
return err
for _, ws := range s.workerStates {
for _, event := range events {
// Check if this event is interesting to this application service
if s.appserviceIsInterestedInEvent(ctx, event, ws.AppService) {
// Queue this event to be sent off to the application service
if err := s.asDB.StoreEvent(ctx, ws.AppService.ID, &event); err != nil {
log.WithError(err).Warn("failed to insert incoming event into appservices database")
} else {
// Tell our worker to send out new messages by updating remaining message
// count and waking them up with a broadcast
ws.NotifyNewEvents()
}
}
}
}
// If txnID is not defined, generate one from the events.
if txnID == "" {
txnID = fmt.Sprintf("%d_%d", events[0].PDU.OriginServerTS(), len(transaction))
}
// Send the transaction to the appservice.
// https://spec.matrix.org/v1.9/application-service-api/#pushing-events
path := "_matrix/app/v1/transactions"
if s.cfg.LegacyPaths {
path = "transactions"
}
address := fmt.Sprintf("%s/%s/%s", state.RequestUrl(), path, txnID)
if s.cfg.LegacyAuth {
address += "?access_token=" + url.QueryEscape(state.HSToken)
}
req, err := http.NewRequestWithContext(ctx, "PUT", address, bytes.NewBuffer(transaction))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", state.HSToken))
resp, err := state.HTTPClient.Do(req)
if err != nil {
return state.backoffAndPause(err)
}
// If the response was fine then we can clear any backoffs in place and
// report that everything was OK. Otherwise, back off for a while.
switch resp.StatusCode {
case http.StatusOK:
state.backoff = 0
default:
return state.backoffAndPause(fmt.Errorf("received HTTP status code %d from appservice url %s", resp.StatusCode, address))
}
return nil
}
// backoff pauses the calling goroutine for a 2^some backoff exponent seconds
func (s *appserviceState) backoffAndPause(err error) error {
if s.backoff < 6 {
s.backoff++
}
duration := time.Second * time.Duration(math.Pow(2, float64(s.backoff)))
log.WithField("appservice", s.ID).WithError(err).Errorf("Unable to send transaction to appservice, backing off for %s", duration.String())
time.Sleep(duration)
return err
}
// appserviceIsInterestedInEvent returns a boolean depending on whether a given
// event falls within one of a given application service's namespaces.
//
// TODO: This should be cached, see https://github.com/matrix-org/dendrite/issues/1682
func (s *OutputRoomEventConsumer) appserviceIsInterestedInEvent(ctx context.Context, event *types.HeaderedEvent, appservice *config.ApplicationService) bool {
user := ""
userID, err := s.rsAPI.QueryUserIDForSender(ctx, event.RoomID(), event.SenderID())
if err == nil {
user = userID.String()
}
switch {
case appservice.URL == "":
func (s *OutputRoomEventConsumer) appserviceIsInterestedInEvent(ctx context.Context, event gomatrixserverlib.Event, appservice config.ApplicationService) bool {
// No reason to queue events if they'll never be sent to the application
// service
if appservice.URL == "" {
return false
case appservice.IsInterestedInUserID(user):
return true
case appservice.IsInterestedInRoomID(event.RoomID().String()):
return true
}
if event.Type() == spec.MRoomMember && event.StateKey() != nil {
if appservice.IsInterestedInUserID(*event.StateKey()) {
return true
}
// Check Room ID and Sender of the event
if appservice.IsInterestedInUserID(event.Sender()) ||
appservice.IsInterestedInRoomID(event.RoomID()) {
return true
}
// Check all known room aliases of the room the event came from
queryReq := api.GetAliasesForRoomIDRequest{RoomID: event.RoomID().String()}
queryReq := api.GetAliasesForRoomIDRequest{RoomID: event.RoomID()}
var queryRes api.GetAliasesForRoomIDResponse
if err := s.rsAPI.GetAliasesForRoomID(ctx, &queryReq, &queryRes); err == nil {
if err := s.alias.GetAliasesForRoomID(ctx, &queryReq, &queryRes); err == nil {
for _, alias := range queryRes.Aliases {
if appservice.IsInterestedInRoomAlias(alias) {
return true
@ -285,54 +202,9 @@ func (s *OutputRoomEventConsumer) appserviceIsInterestedInEvent(ctx context.Cont
}
} else {
log.WithFields(log.Fields{
"appservice": appservice.ID,
"room_id": event.RoomID().String(),
"room_id": event.RoomID(),
}).WithError(err).Errorf("Unable to get aliases for room")
}
// Check if any of the members in the room match the appservice
return s.appserviceJoinedAtEvent(ctx, event, appservice)
}
// appserviceJoinedAtEvent returns a boolean depending on whether a given
// appservice has membership at the time a given event was created.
func (s *OutputRoomEventConsumer) appserviceJoinedAtEvent(ctx context.Context, event *types.HeaderedEvent, appservice *config.ApplicationService) bool {
// TODO: This is only checking the current room state, not the state at
// the event in question. Pretty sure this is what Synapse does too, but
// until we have a lighter way of checking the state before the event that
// doesn't involve state res, then this is probably OK.
membershipReq := &api.QueryMembershipsForRoomRequest{
RoomID: event.RoomID().String(),
JoinedOnly: true,
}
membershipRes := &api.QueryMembershipsForRoomResponse{}
// XXX: This could potentially race if the state for the event is not known yet
// e.g. the event came over federation but we do not have the full state persisted.
if err := s.rsAPI.QueryMembershipsForRoom(ctx, membershipReq, membershipRes); err == nil {
for _, ev := range membershipRes.JoinEvents {
switch {
case ev.StateKey == nil:
continue
case ev.Type != spec.MRoomMember:
continue
}
var membership gomatrixserverlib.MemberContent
err = json.Unmarshal(ev.Content, &membership)
switch {
case err != nil:
continue
case membership.Membership == spec.Join:
if appservice.IsInterestedInUserID(*ev.StateKey) {
return true
}
}
}
} else {
log.WithFields(log.Fields{
"appservice": appservice.ID,
"room_id": event.RoomID().String(),
}).WithError(err).Errorf("Unable to get membership for room")
}
return false
}

View file

@ -19,24 +19,25 @@ package query
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"sync"
log "github.com/sirupsen/logrus"
"time"
"github.com/matrix-org/dendrite/appservice/api"
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/common"
"github.com/matrix-org/dendrite/common/config"
"github.com/matrix-org/util"
opentracing "github.com/opentracing/opentracing-go"
log "github.com/sirupsen/logrus"
)
const roomAliasExistsPath = "/rooms/"
const userIDExistsPath = "/users/"
// AppServiceQueryAPI is an implementation of api.AppServiceQueryAPI
type AppServiceQueryAPI struct {
Cfg *config.AppServiceAPI
ProtocolCache map[string]api.ASProtocolResponse
CacheMu sync.Mutex
HTTPClient *http.Client
Cfg *config.Dendrite
}
// RoomAliasExists performs a request to '/room/{roomAlias}' on all known
@ -46,29 +47,21 @@ func (a *AppServiceQueryAPI) RoomAliasExists(
request *api.RoomAliasExistsRequest,
response *api.RoomAliasExistsResponse,
) error {
trace, ctx := internal.StartRegion(ctx, "ApplicationServiceRoomAlias")
defer trace.EndRegion()
span, ctx := opentracing.StartSpanFromContext(ctx, "ApplicationServiceRoomAlias")
defer span.Finish()
// Create an HTTP client if one does not already exist
if a.HTTPClient == nil {
a.HTTPClient = makeHTTPClient()
}
// Determine which application service should handle this request
for _, appservice := range a.Cfg.Derived.ApplicationServices {
if appservice.URL != "" && appservice.IsInterestedInRoomAlias(request.Alias) {
path := api.ASRoomAliasExistsPath
if a.Cfg.LegacyPaths {
path = api.ASRoomAliasExistsLegacyPath
}
// The full path to the rooms API, includes hs token
URL, err := url.Parse(appservice.RequestUrl() + path)
if err != nil {
return err
}
URL, err := url.Parse(appservice.URL + roomAliasExistsPath)
URL.Path += request.Alias
if a.Cfg.LegacyAuth {
q := URL.Query()
q.Set("access_token", appservice.HSToken)
URL.RawQuery = q.Encode()
}
apiURL := URL.String()
apiURL := URL.String() + "?access_token=" + appservice.HSToken
// Send a request to each application service. If one responds that it has
// created the room, immediately return.
@ -76,10 +69,9 @@ func (a *AppServiceQueryAPI) RoomAliasExists(
if err != nil {
return err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", appservice.HSToken))
req = req.WithContext(ctx)
resp, err := appservice.HTTPClient.Do(req)
resp, err := a.HTTPClient.Do(req)
if resp != nil {
defer func() {
err = resp.Body.Close()
@ -123,28 +115,21 @@ func (a *AppServiceQueryAPI) UserIDExists(
request *api.UserIDExistsRequest,
response *api.UserIDExistsResponse,
) error {
trace, ctx := internal.StartRegion(ctx, "ApplicationServiceUserID")
defer trace.EndRegion()
span, ctx := opentracing.StartSpanFromContext(ctx, "ApplicationServiceUserID")
defer span.Finish()
// Create an HTTP client if one does not already exist
if a.HTTPClient == nil {
a.HTTPClient = makeHTTPClient()
}
// Determine which application service should handle this request
for _, appservice := range a.Cfg.Derived.ApplicationServices {
if appservice.URL != "" && appservice.IsInterestedInUserID(request.UserID) {
// The full path to the rooms API, includes hs token
path := api.ASUserExistsPath
if a.Cfg.LegacyPaths {
path = api.ASUserExistsLegacyPath
}
URL, err := url.Parse(appservice.RequestUrl() + path)
if err != nil {
return err
}
URL, err := url.Parse(appservice.URL + userIDExistsPath)
URL.Path += request.UserID
if a.Cfg.LegacyAuth {
q := URL.Query()
q.Set("access_token", appservice.HSToken)
URL.RawQuery = q.Encode()
}
apiURL := URL.String()
apiURL := URL.String() + "?access_token=" + appservice.HSToken
// Send a request to each application service. If one responds that it has
// created the user, immediately return.
@ -152,8 +137,7 @@ func (a *AppServiceQueryAPI) UserIDExists(
if err != nil {
return err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", appservice.HSToken))
resp, err := appservice.HTTPClient.Do(req.WithContext(ctx))
resp, err := a.HTTPClient.Do(req.WithContext(ctx))
if resp != nil {
defer func() {
err = resp.Body.Close()
@ -189,190 +173,42 @@ func (a *AppServiceQueryAPI) UserIDExists(
return nil
}
type thirdpartyResponses interface {
api.ASProtocolResponse | []api.ASUserResponse | []api.ASLocationResponse
// makeHTTPClient creates an HTTP client with certain options that will be used for all query requests to application services
func makeHTTPClient() *http.Client {
return &http.Client{
Timeout: time.Second * 30,
}
}
func requestDo[T thirdpartyResponses](as *config.ApplicationService, url string, response *T) error {
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", as.HSToken))
resp, err := as.HTTPClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close() // nolint: errcheck
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
return json.Unmarshal(body, &response)
}
func (a *AppServiceQueryAPI) Locations(
ctx context.Context,
req *api.LocationRequest,
resp *api.LocationResponse,
) error {
params, err := url.ParseQuery(req.Params)
if err != nil {
return err
}
path := api.ASLocationPath
if a.Cfg.LegacyPaths {
path = api.ASLocationLegacyPath
}
for _, as := range a.Cfg.Derived.ApplicationServices {
var asLocations []api.ASLocationResponse
if a.Cfg.LegacyAuth {
params.Set("access_token", as.HSToken)
}
url := as.RequestUrl() + path
if req.Protocol != "" {
url += "/" + req.Protocol
}
if err := requestDo[[]api.ASLocationResponse](&as, url+"?"+params.Encode(), &asLocations); err != nil {
log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'locations' from application service")
continue
}
resp.Locations = append(resp.Locations, asLocations...)
}
if len(resp.Locations) == 0 {
resp.Exists = false
return nil
}
resp.Exists = true
return nil
}
func (a *AppServiceQueryAPI) User(
ctx context.Context,
req *api.UserRequest,
resp *api.UserResponse,
) error {
params, err := url.ParseQuery(req.Params)
if err != nil {
return err
}
path := api.ASUserPath
if a.Cfg.LegacyPaths {
path = api.ASUserLegacyPath
}
for _, as := range a.Cfg.Derived.ApplicationServices {
var asUsers []api.ASUserResponse
if a.Cfg.LegacyAuth {
params.Set("access_token", as.HSToken)
}
url := as.RequestUrl() + path
if req.Protocol != "" {
url += "/" + req.Protocol
}
if err := requestDo[[]api.ASUserResponse](&as, url+"?"+params.Encode(), &asUsers); err != nil {
log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'user' from application service")
continue
}
resp.Users = append(resp.Users, asUsers...)
}
if len(resp.Users) == 0 {
resp.Exists = false
return nil
}
resp.Exists = true
return nil
}
func (a *AppServiceQueryAPI) Protocols(
ctx context.Context,
req *api.ProtocolRequest,
resp *api.ProtocolResponse,
) error {
protocolPath := api.ASProtocolPath
if a.Cfg.LegacyPaths {
protocolPath = api.ASProtocolLegacyPath
}
// get a single protocol response
if req.Protocol != "" {
a.CacheMu.Lock()
defer a.CacheMu.Unlock()
if proto, ok := a.ProtocolCache[req.Protocol]; ok {
resp.Exists = true
resp.Protocols = map[string]api.ASProtocolResponse{
req.Protocol: proto,
}
return nil
}
response := api.ASProtocolResponse{}
for _, as := range a.Cfg.Derived.ApplicationServices {
var proto api.ASProtocolResponse
if err := requestDo[api.ASProtocolResponse](&as, as.RequestUrl()+protocolPath+req.Protocol, &proto); err != nil {
log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'protocol' from application service")
continue
}
if len(response.Instances) != 0 {
response.Instances = append(response.Instances, proto.Instances...)
} else {
response = proto
}
}
if len(response.Instances) == 0 {
resp.Exists = false
return nil
}
resp.Exists = true
resp.Protocols = map[string]api.ASProtocolResponse{
req.Protocol: response,
}
a.ProtocolCache[req.Protocol] = response
return nil
}
response := make(map[string]api.ASProtocolResponse, len(a.Cfg.Derived.ApplicationServices))
for _, as := range a.Cfg.Derived.ApplicationServices {
for _, p := range as.Protocols {
var proto api.ASProtocolResponse
if err := requestDo[api.ASProtocolResponse](&as, as.RequestUrl()+protocolPath+p, &proto); err != nil {
log.WithError(err).WithField("application_service", as.ID).Error("unable to get 'protocol' from application service")
continue
}
existing, ok := response[p]
if !ok {
response[p] = proto
continue
}
existing.Instances = append(existing.Instances, proto.Instances...)
response[p] = existing
}
}
if len(response) == 0 {
resp.Exists = false
return nil
}
a.CacheMu.Lock()
defer a.CacheMu.Unlock()
a.ProtocolCache = response
resp.Exists = true
resp.Protocols = response
return nil
// SetupHTTP adds the AppServiceQueryPAI handlers to the http.ServeMux. This
// handles and muxes incoming api requests the to internal AppServiceQueryAPI.
func (a *AppServiceQueryAPI) SetupHTTP(servMux *http.ServeMux) {
servMux.Handle(
api.AppServiceRoomAliasExistsPath,
common.MakeInternalAPI("appserviceRoomAliasExists", func(req *http.Request) util.JSONResponse {
var request api.RoomAliasExistsRequest
var response api.RoomAliasExistsResponse
if err := json.NewDecoder(req.Body).Decode(&request); err != nil {
return util.ErrorResponse(err)
}
if err := a.RoomAliasExists(req.Context(), &request, &response); err != nil {
return util.ErrorResponse(err)
}
return util.JSONResponse{Code: http.StatusOK, JSON: &response}
}),
)
servMux.Handle(
api.AppServiceUserIDExistsPath,
common.MakeInternalAPI("appserviceUserIDExists", func(req *http.Request) util.JSONResponse {
var request api.UserIDExistsRequest
var response api.UserIDExistsResponse
if err := json.NewDecoder(req.Body).Decode(&request); err != nil {
return util.ErrorResponse(err)
}
if err := a.UserIDExists(req.Context(), &request, &response); err != nil {
return util.ErrorResponse(err)
}
return util.JSONResponse{Code: http.StatusOK, JSON: &response}
}),
)
}

View file

@ -0,0 +1,65 @@
// Copyright 2018 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package routing
import (
"net/http"
"github.com/gorilla/mux"
"github.com/matrix-org/dendrite/clientapi/auth/storage/accounts"
"github.com/matrix-org/dendrite/common"
"github.com/matrix-org/dendrite/common/config"
"github.com/matrix-org/dendrite/common/transactions"
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
)
const pathPrefixApp = "/_matrix/app/v1"
// Setup registers HTTP handlers with the given ServeMux. It also supplies the given http.Client
// to clients which need to make outbound HTTP requests.
//
// Due to Setup being used to call many other functions, a gocyclo nolint is
// applied:
// nolint: gocyclo
func Setup(
apiMux *mux.Router, cfg config.Dendrite, // nolint: unparam
queryAPI api.RoomserverQueryAPI, aliasAPI api.RoomserverAliasAPI, // nolint: unparam
accountDB *accounts.Database, // nolint: unparam
federation *gomatrixserverlib.FederationClient, // nolint: unparam
transactionsCache *transactions.Cache, // nolint: unparam
) {
appMux := apiMux.PathPrefix(pathPrefixApp).Subrouter()
appMux.Handle("/alias",
common.MakeExternalAPI("alias", func(req *http.Request) util.JSONResponse {
// TODO: Implement
return util.JSONResponse{
Code: http.StatusOK,
JSON: nil,
}
}),
).Methods(http.MethodGet, http.MethodOptions)
appMux.Handle("/user",
common.MakeExternalAPI("user", func(req *http.Request) util.JSONResponse {
// TODO: Implement
return util.JSONResponse{
Code: http.StatusOK,
JSON: nil,
}
}),
).Methods(http.MethodGet, http.MethodOptions)
}

View file

@ -0,0 +1,248 @@
// Copyright 2018 New Vector Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"database/sql"
"encoding/json"
"time"
"github.com/matrix-org/gomatrixserverlib"
log "github.com/sirupsen/logrus"
)
const appserviceEventsSchema = `
-- Stores events to be sent to application services
CREATE TABLE IF NOT EXISTS appservice_events (
-- An auto-incrementing id unique to each event in the table
id BIGSERIAL NOT NULL PRIMARY KEY,
-- The ID of the application service the event will be sent to
as_id TEXT NOT NULL,
-- JSON representation of the event
event_json TEXT NOT NULL,
-- The ID of the transaction that this event is a part of
txn_id BIGINT NOT NULL
);
CREATE INDEX IF NOT EXISTS appservice_events_as_id ON appservice_events(as_id);
`
const selectEventsByApplicationServiceIDSQL = "" +
"SELECT id, event_json, txn_id " +
"FROM appservice_events WHERE as_id = $1 ORDER BY txn_id DESC, id ASC"
const countEventsByApplicationServiceIDSQL = "" +
"SELECT COUNT(id) FROM appservice_events WHERE as_id = $1"
const insertEventSQL = "" +
"INSERT INTO appservice_events(as_id, event_json, txn_id) " +
"VALUES ($1, $2, $3)"
const updateTxnIDForEventsSQL = "" +
"UPDATE appservice_events SET txn_id = $1 WHERE as_id = $2 AND id <= $3"
const deleteEventsBeforeAndIncludingIDSQL = "" +
"DELETE FROM appservice_events WHERE as_id = $1 AND id <= $2"
const (
// A transaction ID number that no transaction should ever have. Used for
// checking again the default value.
invalidTxnID = -2
)
type eventsStatements struct {
selectEventsByApplicationServiceIDStmt *sql.Stmt
countEventsByApplicationServiceIDStmt *sql.Stmt
insertEventStmt *sql.Stmt
updateTxnIDForEventsStmt *sql.Stmt
deleteEventsBeforeAndIncludingIDStmt *sql.Stmt
}
func (s *eventsStatements) prepare(db *sql.DB) (err error) {
_, err = db.Exec(appserviceEventsSchema)
if err != nil {
return
}
if s.selectEventsByApplicationServiceIDStmt, err = db.Prepare(selectEventsByApplicationServiceIDSQL); err != nil {
return
}
if s.countEventsByApplicationServiceIDStmt, err = db.Prepare(countEventsByApplicationServiceIDSQL); err != nil {
return
}
if s.insertEventStmt, err = db.Prepare(insertEventSQL); err != nil {
return
}
if s.updateTxnIDForEventsStmt, err = db.Prepare(updateTxnIDForEventsSQL); err != nil {
return
}
if s.deleteEventsBeforeAndIncludingIDStmt, err = db.Prepare(deleteEventsBeforeAndIncludingIDSQL); err != nil {
return
}
return
}
// selectEventsByApplicationServiceID takes in an application service ID and
// returns a slice of events that need to be sent to that application service,
// as well as an int later used to remove these same events from the database
// once successfully sent to an application service.
func (s *eventsStatements) selectEventsByApplicationServiceID(
ctx context.Context,
applicationServiceID string,
limit int,
) (
txnID, maxID int,
events []gomatrixserverlib.Event,
eventsRemaining bool,
err error,
) {
// Retrieve events from the database. Unsuccessfully sent events first
eventRows, err := s.selectEventsByApplicationServiceIDStmt.QueryContext(ctx, applicationServiceID)
if err != nil {
return
}
defer func() {
err = eventRows.Close()
if err != nil {
log.WithFields(log.Fields{
"appservice": applicationServiceID,
}).WithError(err).Fatalf("appservice unable to select new events to send")
}
}()
events, maxID, txnID, eventsRemaining, err = retrieveEvents(eventRows, limit)
if err != nil {
return
}
return
}
func retrieveEvents(eventRows *sql.Rows, limit int) (events []gomatrixserverlib.Event, maxID, txnID int, eventsRemaining bool, err error) {
// Get current time for use in calculating event age
nowMilli := time.Now().UnixNano() / int64(time.Millisecond)
// Iterate through each row and store event contents
// If txn_id changes dramatically, we've switched from collecting old events to
// new ones. Send back those events first.
lastTxnID := invalidTxnID
for eventsProcessed := 0; eventRows.Next(); {
var event gomatrixserverlib.Event
var eventJSON []byte
var id int
err = eventRows.Scan(
&id,
&eventJSON,
&txnID,
)
if err != nil {
return nil, 0, 0, false, err
}
// Unmarshal eventJSON
if err = json.Unmarshal(eventJSON, &event); err != nil {
return nil, 0, 0, false, err
}
// If txnID has changed on this event from the previous event, then we've
// reached the end of a transaction's events. Return only those events.
if lastTxnID > invalidTxnID && lastTxnID != txnID {
return events, maxID, lastTxnID, true, nil
}
lastTxnID = txnID
// Limit events that aren't part of an old transaction
if txnID == -1 {
// Return if we've hit the limit
if eventsProcessed++; eventsProcessed > limit {
return events, maxID, lastTxnID, true, nil
}
}
if id > maxID {
maxID = id
}
// Portion of the event that is unsigned due to rapid change
// TODO: Consider removing age as not many app services use it
if err = event.SetUnsignedField("age", nowMilli-int64(event.OriginServerTS())); err != nil {
return nil, 0, 0, false, err
}
events = append(events, event)
}
return
}
// countEventsByApplicationServiceID inserts an event mapped to its corresponding application service
// IDs into the db.
func (s *eventsStatements) countEventsByApplicationServiceID(
ctx context.Context,
appServiceID string,
) (int, error) {
var count int
err := s.countEventsByApplicationServiceIDStmt.QueryRowContext(ctx, appServiceID).Scan(&count)
if err != nil && err != sql.ErrNoRows {
return 0, err
}
return count, nil
}
// insertEvent inserts an event mapped to its corresponding application service
// IDs into the db.
func (s *eventsStatements) insertEvent(
ctx context.Context,
appServiceID string,
event *gomatrixserverlib.Event,
) (err error) {
// Convert event to JSON before inserting
eventJSON, err := json.Marshal(event)
if err != nil {
return err
}
_, err = s.insertEventStmt.ExecContext(
ctx,
appServiceID,
eventJSON,
-1, // No transaction ID yet
)
return
}
// updateTxnIDForEvents sets the transactionID for a collection of events. Done
// before sending them to an AppService. Referenced before sending to make sure
// we aren't constructing multiple transactions with the same events.
func (s *eventsStatements) updateTxnIDForEvents(
ctx context.Context,
appserviceID string,
maxID, txnID int,
) (err error) {
_, err = s.updateTxnIDForEventsStmt.ExecContext(ctx, txnID, appserviceID, maxID)
return
}
// deleteEventsBeforeAndIncludingID removes events matching given IDs from the database.
func (s *eventsStatements) deleteEventsBeforeAndIncludingID(
ctx context.Context,
appserviceID string,
eventTableID int,
) (err error) {
_, err = s.deleteEventsBeforeAndIncludingIDStmt.ExecContext(ctx, appserviceID, eventTableID)
return
}

View file

@ -0,0 +1,110 @@
// Copyright 2018 New Vector Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"database/sql"
// Import postgres database driver
_ "github.com/lib/pq"
"github.com/matrix-org/gomatrixserverlib"
)
// Database stores events intended to be later sent to application services
type Database struct {
events eventsStatements
txnID txnStatements
db *sql.DB
}
// NewDatabase opens a new database
func NewDatabase(dataSourceName string) (*Database, error) {
var result Database
var err error
if result.db, err = sql.Open("postgres", dataSourceName); err != nil {
return nil, err
}
if err = result.prepare(); err != nil {
return nil, err
}
return &result, nil
}
func (d *Database) prepare() error {
if err := d.events.prepare(d.db); err != nil {
return err
}
return d.txnID.prepare(d.db)
}
// StoreEvent takes in a gomatrixserverlib.Event and stores it in the database
// for a transaction worker to pull and later send to an application service.
func (d *Database) StoreEvent(
ctx context.Context,
appServiceID string,
event *gomatrixserverlib.Event,
) error {
return d.events.insertEvent(ctx, appServiceID, event)
}
// GetEventsWithAppServiceID returns a slice of events and their IDs intended to
// be sent to an application service given its ID.
func (d *Database) GetEventsWithAppServiceID(
ctx context.Context,
appServiceID string,
limit int,
) (int, int, []gomatrixserverlib.Event, bool, error) {
return d.events.selectEventsByApplicationServiceID(ctx, appServiceID, limit)
}
// CountEventsWithAppServiceID returns the number of events destined for an
// application service given its ID.
func (d *Database) CountEventsWithAppServiceID(
ctx context.Context,
appServiceID string,
) (int, error) {
return d.events.countEventsByApplicationServiceID(ctx, appServiceID)
}
// UpdateTxnIDForEvents takes in an application service ID and a
// and stores them in the DB, unless the pair already exists, in
// which case it updates them.
func (d *Database) UpdateTxnIDForEvents(
ctx context.Context,
appserviceID string,
maxID, txnID int,
) error {
return d.events.updateTxnIDForEvents(ctx, appserviceID, maxID, txnID)
}
// RemoveEventsBeforeAndIncludingID removes all events from the database that
// are less than or equal to a given maximum ID. IDs here are implemented as a
// serial, thus this should always delete events in chronological order.
func (d *Database) RemoveEventsBeforeAndIncludingID(
ctx context.Context,
appserviceID string,
eventTableID int,
) error {
return d.events.deleteEventsBeforeAndIncludingID(ctx, appserviceID, eventTableID)
}
// GetLatestTxnID returns the latest available transaction id
func (d *Database) GetLatestTxnID(
ctx context.Context,
) (int, error) {
return d.txnID.selectTxnID(ctx)
}

View file

@ -0,0 +1,52 @@
// Copyright 2018 New Vector Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"database/sql"
)
const txnIDSchema = `
-- Keeps a count of the current transaction ID
CREATE SEQUENCE IF NOT EXISTS txn_id_counter START 1;
`
const selectTxnIDSQL = "SELECT nextval('txn_id_counter')"
type txnStatements struct {
selectTxnIDStmt *sql.Stmt
}
func (s *txnStatements) prepare(db *sql.DB) (err error) {
_, err = db.Exec(txnIDSchema)
if err != nil {
return
}
if s.selectTxnIDStmt, err = db.Prepare(selectTxnIDSQL); err != nil {
return
}
return
}
// selectTxnID selects the latest ascending transaction ID
func (s *txnStatements) selectTxnID(
ctx context.Context,
) (txnID int, err error) {
err = s.selectTxnIDStmt.QueryRowContext(ctx).Scan(&txnID)
return
}

64
appservice/types/types.go Normal file
View file

@ -0,0 +1,64 @@
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"sync"
"github.com/matrix-org/dendrite/common/config"
)
const (
// AppServiceDeviceID is the AS dummy device ID
AppServiceDeviceID = "AS_Device"
)
// ApplicationServiceWorkerState is a type that couples an application service,
// a lockable condition as well as some other state variables, allowing the
// roomserver to notify appservice workers when there are events ready to send
// externally to application services.
type ApplicationServiceWorkerState struct {
AppService config.ApplicationService
Cond *sync.Cond
// Events ready to be sent
EventsReady bool
// Backoff exponent (2^x secs). Max 6, aka 64s.
Backoff int
}
// NotifyNewEvents wakes up all waiting goroutines, notifying that events remain
// in the event queue for this application service worker.
func (a *ApplicationServiceWorkerState) NotifyNewEvents() {
a.Cond.L.Lock()
a.EventsReady = true
a.Cond.Broadcast()
a.Cond.L.Unlock()
}
// FinishEventProcessing marks all events of this worker as being sent to the
// application service.
func (a *ApplicationServiceWorkerState) FinishEventProcessing() {
a.Cond.L.Lock()
a.EventsReady = false
a.Cond.L.Unlock()
}
// WaitForNewEvents causes the calling goroutine to wait on the worker state's
// condition for a broadcast or similar wakeup, if there are no events ready.
func (a *ApplicationServiceWorkerState) WaitForNewEvents() {
a.Cond.L.Lock()
if !a.EventsReady {
a.Cond.Wait()
}
a.Cond.L.Unlock()
}

View file

@ -0,0 +1,227 @@
// Copyright 2018 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package workers
import (
"bytes"
"context"
"encoding/json"
"fmt"
"math"
"net/http"
"time"
"github.com/matrix-org/dendrite/appservice/storage"
"github.com/matrix-org/dendrite/appservice/types"
"github.com/matrix-org/dendrite/common/config"
"github.com/matrix-org/gomatrixserverlib"
log "github.com/sirupsen/logrus"
)
var (
// Maximum size of events sent in each transaction.
transactionBatchSize = 50
// Timeout for sending a single transaction to an application service.
transactionTimeout = time.Second * 60
)
// SetupTransactionWorkers spawns a separate goroutine for each application
// service. Each of these "workers" handle taking all events intended for their
// app service, batch them up into a single transaction (up to a max transaction
// size), then send that off to the AS's /transactions/{txnID} endpoint. It also
// handles exponentially backing off in case the AS isn't currently available.
func SetupTransactionWorkers(
appserviceDB *storage.Database,
workerStates []types.ApplicationServiceWorkerState,
) error {
// Create a worker that handles transmitting events to a single homeserver
for _, workerState := range workerStates {
// Don't create a worker if this AS doesn't want to receive events
if workerState.AppService.URL != "" {
go worker(appserviceDB, workerState)
}
}
return nil
}
// worker is a goroutine that sends any queued events to the application service
// it is given.
func worker(db *storage.Database, ws types.ApplicationServiceWorkerState) {
log.WithFields(log.Fields{
"appservice": ws.AppService.ID,
}).Info("starting application service")
ctx := context.Background()
// Create a HTTP client for sending requests to app services
client := &http.Client{
Timeout: transactionTimeout,
}
// Initial check for any leftover events to send from last time
eventCount, err := db.CountEventsWithAppServiceID(ctx, ws.AppService.ID)
if err != nil {
log.WithFields(log.Fields{
"appservice": ws.AppService.ID,
}).WithError(err).Fatal("appservice worker unable to read queued events from DB")
return
}
if eventCount > 0 {
ws.NotifyNewEvents()
}
// Loop forever and keep waiting for more events to send
for {
// Wait for more events if we've sent all the events in the database
ws.WaitForNewEvents()
// Batch events up into a transaction
transactionJSON, txnID, maxEventID, eventsRemaining, err := createTransaction(ctx, db, ws.AppService.ID)
if err != nil {
log.WithFields(log.Fields{
"appservice": ws.AppService.ID,
}).WithError(err).Fatal("appservice worker unable to create transaction")
return
}
// Send the events off to the application service
// Backoff if the application service does not respond
err = send(client, ws.AppService, txnID, transactionJSON)
if err != nil {
// Backoff
backoff(&ws, err)
continue
}
// We sent successfully, hooray!
ws.Backoff = 0
// Transactions have a maximum event size, so there may still be some events
// left over to send. Keep sending until none are left
if !eventsRemaining {
ws.FinishEventProcessing()
}
// Remove sent events from the DB
err = db.RemoveEventsBeforeAndIncludingID(ctx, ws.AppService.ID, maxEventID)
if err != nil {
log.WithFields(log.Fields{
"appservice": ws.AppService.ID,
}).WithError(err).Fatal("unable to remove appservice events from the database")
return
}
}
}
// backoff pauses the calling goroutine for a 2^some backoff exponent seconds
func backoff(ws *types.ApplicationServiceWorkerState, err error) {
// Calculate how long to backoff for
backoffDuration := time.Duration(math.Pow(2, float64(ws.Backoff)))
backoffSeconds := time.Second * backoffDuration
log.WithFields(log.Fields{
"appservice": ws.AppService.ID,
}).WithError(err).Warnf("unable to send transactions successfully, backing off for %ds",
backoffDuration)
ws.Backoff++
if ws.Backoff > 6 {
ws.Backoff = 6
}
// Backoff
time.Sleep(backoffSeconds)
}
// createTransaction takes in a slice of AS events, stores them in an AS
// transaction, and JSON-encodes the results.
func createTransaction(
ctx context.Context,
db *storage.Database,
appserviceID string,
) (
transactionJSON []byte,
txnID, maxID int,
eventsRemaining bool,
err error,
) {
// Retrieve the latest events from the DB (will return old events if they weren't successfully sent)
txnID, maxID, events, eventsRemaining, err := db.GetEventsWithAppServiceID(ctx, appserviceID, transactionBatchSize)
if err != nil {
log.WithFields(log.Fields{
"appservice": appserviceID,
}).WithError(err).Fatalf("appservice worker unable to read queued events from DB")
return
}
// Check if these events do not already have a transaction ID
if txnID == -1 {
// If not, grab next available ID from the DB
txnID, err = db.GetLatestTxnID(ctx)
if err != nil {
return nil, 0, 0, false, err
}
// Mark new events with current transactionID
if err = db.UpdateTxnIDForEvents(ctx, appserviceID, maxID, txnID); err != nil {
return nil, 0, 0, false, err
}
}
// Create a transaction and store the events inside
transaction := gomatrixserverlib.ApplicationServiceTransaction{
Events: events,
}
transactionJSON, err = json.Marshal(transaction)
if err != nil {
return
}
return
}
// send sends events to an application service. Returns an error if an OK was not
// received back from the application service or the request timed out.
func send(
client *http.Client,
appservice config.ApplicationService,
txnID int,
transaction []byte,
) error {
// POST a transaction to our AS
address := fmt.Sprintf("%s/transactions/%d", appservice.URL, txnID)
resp, err := client.Post(address, "application/json", bytes.NewBuffer(transaction))
if err != nil {
return err
}
defer func() {
err := resp.Body.Close()
if err != nil {
log.WithFields(log.Fields{
"appservice": appservice.ID,
}).WithError(err).Error("unable to close response body from application service")
}
}()
// Check the AS received the events correctly
if resp.StatusCode != http.StatusOK {
// TODO: Handle non-200 error codes from application services
return fmt.Errorf("non-OK status code %d returned from AS", resp.StatusCode)
}
return nil
}

View file

@ -1,955 +0,0 @@
reg GET /register yields a set of flows
reg POST /register can create a user
reg POST /register downcases capitals in usernames
reg POST /register returns the same device_id as that in the request
reg POST /register rejects registration of usernames with '!'
reg POST /register rejects registration of usernames with '"'
reg POST /register rejects registration of usernames with ':'
reg POST /register rejects registration of usernames with '?'
reg POST /register rejects registration of usernames with '\'
reg POST /register rejects registration of usernames with '@'
reg POST /register rejects registration of usernames with '['
reg POST /register rejects registration of usernames with ']'
reg POST /register rejects registration of usernames with '{'
reg POST /register rejects registration of usernames with '|'
reg POST /register rejects registration of usernames with '}'
reg POST /register rejects registration of usernames with '£'
reg POST /register rejects registration of usernames with 'é'
reg POST /register rejects registration of usernames with '\n'
reg POST /register rejects registration of usernames with '''
reg POST /register allows registration of usernames with 'q'
reg POST /register allows registration of usernames with '3'
reg POST /register allows registration of usernames with '.'
reg POST /register allows registration of usernames with '_'
reg POST /register allows registration of usernames with '='
reg POST /register allows registration of usernames with '-'
reg POST /register allows registration of usernames with '/'
reg POST /r0/admin/register with shared secret
reg POST /r0/admin/register admin with shared secret
reg POST /r0/admin/register with shared secret downcases capitals
reg POST /r0/admin/register with shared secret disallows symbols
reg POST rejects invalid utf-8 in JSON
log GET /login yields a set of flows
log POST /login can log in as a user
log POST /login returns the same device_id as that in the request
log POST /login can log in as a user with just the local part of the id
log POST /login as non-existing user is rejected
log POST /login wrong password is rejected
log Interactive authentication types include SSO
log Can perform interactive authentication with SSO
log The user must be consistent through an interactive authentication session with SSO
log The operation must be consistent through an interactive authentication session
v1s GET /events initially
v1s GET /initialSync initially
csa Version responds 200 OK with valid structure
pro PUT /profile/:user_id/displayname sets my name
pro GET /profile/:user_id/displayname publicly accessible
pro PUT /profile/:user_id/avatar_url sets my avatar
pro GET /profile/:user_id/avatar_url publicly accessible
dev GET /device/{deviceId}
dev GET /device/{deviceId} gives a 404 for unknown devices
dev GET /devices
dev PUT /device/{deviceId} updates device fields
dev PUT /device/{deviceId} gives a 404 for unknown devices
dev DELETE /device/{deviceId}
dev DELETE /device/{deviceId} requires UI auth user to match device owner
dev DELETE /device/{deviceId} with no body gives a 401
dev The deleted device must be consistent through an interactive auth session
dev Users receive device_list updates for their own devices
pre GET /presence/:user_id/status fetches initial status
pre PUT /presence/:user_id/status updates my presence
crm POST /createRoom makes a public room
crm POST /createRoom makes a private room
crm POST /createRoom makes a private room with invites
crm POST /createRoom makes a room with a name
crm POST /createRoom makes a room with a topic
syn Can /sync newly created room
crm POST /createRoom creates a room with the given version
crm POST /createRoom rejects attempts to create rooms with numeric versions
crm POST /createRoom rejects attempts to create rooms with unknown versions
crm POST /createRoom ignores attempts to set the room version via creation_content
mem GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership
mem GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event
rst GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels
mem GET /rooms/:room_id/joined_members fetches my membership
v1s GET /rooms/:room_id/initialSync fetches initial sync state
pub GET /publicRooms lists newly-created room
ali GET /directory/room/:room_alias yields room ID
mem GET /joined_rooms lists newly-created room
rst POST /rooms/:room_id/state/m.room.name sets name
rst GET /rooms/:room_id/state/m.room.name gets name
rst POST /rooms/:room_id/state/m.room.topic sets topic
rst GET /rooms/:room_id/state/m.room.topic gets topic
rst GET /rooms/:room_id/state fetches entire room state
crm POST /createRoom with creation content
ali PUT /directory/room/:room_alias creates alias
ali GET /rooms/:room_id/aliases lists aliases
jon POST /rooms/:room_id/join can join a room
jon POST /join/:room_alias can join a room
jon POST /join/:room_id can join a room
jon POST /join/:room_id can join a room with custom content
jon POST /join/:room_alias can join a room with custom content
lev POST /rooms/:room_id/leave can leave a room
inv POST /rooms/:room_id/invite can send an invite
ban POST /rooms/:room_id/ban can ban a user
snd POST /rooms/:room_id/send/:event_type sends a message
snd PUT /rooms/:room_id/send/:event_type/:txn_id sends a message
snd PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id
get GET /rooms/:room_id/messages returns a message
get GET /rooms/:room_id/messages lazy loads members correctly
typ PUT /rooms/:room_id/typing/:user_id sets typing notification
typ Typing notifications don't leak (3 subtests)
rst GET /rooms/:room_id/state/m.room.power_levels can fetch levels
rst PUT /rooms/:room_id/state/m.room.power_levels can set levels
rst PUT power_levels should not explode if the old power levels were empty
rst Users cannot set notifications powerlevel higher than their own (2 subtests)
rst Both GET and PUT work
rct POST /rooms/:room_id/receipt can create receipts
red POST /rooms/:room_id/read_markers can create read marker
med POST /media/v3/upload can create an upload
med POST /media/r0/upload can create an upload
med GET /media/v3/download can fetch the value again
med GET /media/r0/download can fetch the value again
cap GET /capabilities is present and well formed for registered user
cap GET /r0/capabilities is not public
cap GET /v3/capabilities is not public
reg Register with a recaptcha
reg registration is idempotent, without username specified
reg registration is idempotent, with username specified
reg registration remembers parameters
reg registration accepts non-ascii passwords
reg registration with inhibit_login inhibits login
reg User signups are forbidden from starting with '_'
reg Can register using an email address
log Can login with 3pid and password using m.login.password
log login types include SSO
log /login/cas/redirect redirects if the old m.login.cas login type is listed
log Can login with new user via CAS
lox Can logout current device
lox Can logout all devices
lox Request to logout with invalid an access token is rejected
lox Request to logout without an access token is rejected
log After changing password, can't log in with old password
log After changing password, can log in with new password
log After changing password, existing session still works
log After changing password, a different session no longer works by default
log After changing password, different sessions can optionally be kept
psh Pushers created with a different access token are deleted on password change
psh Pushers created with a the same access token are not deleted on password change
acc Can deactivate account
acc Can't deactivate account with wrong password
acc After deactivating account, can't log in with password
acc After deactivating account, can't log in with an email
v1s initialSync sees my presence status
pre Presence change reports an event to myself
pre Friends presence changes reports events
crm Room creation reports m.room.create to myself
crm Room creation reports m.room.member to myself
rst Setting room topic reports m.room.topic to myself
v1s Global initialSync
v1s Global initialSync with limit=0 gives no messages
v1s Room initialSync
v1s Room initialSync with limit=0 gives no messages
rst Setting state twice is idempotent
jon Joining room twice is idempotent
syn New room members see their own join event
v1s New room members see existing users' presence in room initialSync
syn Existing members see new members' join events
pre Existing members see new members' presence
v1s All room members see all room members' presence in global initialSync
f,jon Remote users can join room by alias
syn New room members see their own join event
v1s New room members see existing members' presence in room initialSync
syn Existing members see new members' join events
pre Existing members see new member's presence
v1s New room members see first user's profile information in global initialSync
v1s New room members see first user's profile information in per-room initialSync
f,jon Remote users may not join unfederated rooms
syn Local room members see posted message events
v1s Fetching eventstream a second time doesn't yield the message again
syn Local non-members don't see posted message events
get Local room members can get room messages
f,syn Remote room members also see posted message events
f,get Remote room members can get room messages
get Message history can be paginated
f,get Message history can be paginated over federation
eph Ephemeral messages received from clients are correctly expired
ali Room aliases can contain Unicode
f,ali Remote room alias queries can handle Unicode
ali Canonical alias can be set
ali Canonical alias can be set (3 subtests)
ali Canonical alias can include alt_aliases
ali Canonical alias can include alt_aliases (4 subtests)
ali Regular users can add and delete aliases in the default room configuration
ali Regular users can add and delete aliases when m.room.aliases is restricted
ali Deleting a non-existent alias should return a 404
ali Users can't delete other's aliases
ali Users with sufficient power-level can delete other's aliases
ali Can delete canonical alias
ali Alias creators can delete alias with no ops
ali Alias creators can delete canonical alias with no ops
msc Only room members can list aliases of a room
inv Can invite users to invite-only rooms
inv Uninvited users cannot join the room
inv Invited user can reject invite
f,inv Invited user can reject invite over federation
f,inv Invited user can reject invite over federation several times
inv Invited user can reject invite for empty room
f,inv Invited user can reject invite over federation for empty room
inv Invited user can reject local invite after originator leaves
inv Invited user can see room metadata
f,inv Remote invited user can see room metadata
inv Users cannot invite themselves to a room
inv Users cannot invite a user that is already in the room
ban Banned user is kicked and may not rejoin until unbanned
f,ban Remote banned user is kicked and may not rejoin until unbanned
ban 'ban' event respects room powerlevel
plv setting 'm.room.name' respects room powerlevel
plv setting 'm.room.power_levels' respects room powerlevel (2 subtests)
plv Unprivileged users can set m.room.topic if it only needs level 0
plv Users cannot set ban powerlevel higher than their own (2 subtests)
plv Users cannot set kick powerlevel higher than their own (2 subtests)
plv Users cannot set redact powerlevel higher than their own (2 subtests)
v1s Check that event streams started after a client joined a room work (SYT-1)
v1s Event stream catches up fully after many messages
xxx PUT /rooms/:room_id/redact/:event_id/:txn_id as power user redacts message
xxx PUT /rooms/:room_id/redact/:event_id/:txn_id as original message sender redacts message
xxx PUT /rooms/:room_id/redact/:event_id/:txn_id as random user does not redact message
xxx PUT /redact disallows redaction of event in different room
xxx Redaction of a redaction redacts the redaction reason
xxx PUT /rooms/:room_id/redact/:event_id/:txn_id is idempotent
v1s A departed room is still included in /initialSync (SPEC-216)
v1s Can get rooms/{roomId}/initialSync for a departed room (SPEC-216)
rst Can get rooms/{roomId}/state for a departed room (SPEC-216)
mem Can get rooms/{roomId}/members for a departed room (SPEC-216)
get Can get rooms/{roomId}/messages for a departed room (SPEC-216)
rst Can get 'm.room.name' state for a departed room (SPEC-216)
syn Getting messages going forward is limited for a departed room (SPEC-216)
3pd Can invite existing 3pid
3pd Can invite existing 3pid with no ops into a private room
3pd Can invite existing 3pid in createRoom
3pd Can invite unbound 3pid
f,3pd Can invite unbound 3pid over federation
3pd Can invite unbound 3pid with no ops into a private room
f,3pd Can invite unbound 3pid over federation with no ops into a private room
f,3pd Can invite unbound 3pid over federation with users from both servers
3pd Can accept unbound 3pid invite after inviter leaves
3pd Can accept third party invite with /join
3pd 3pid invite join with wrong but valid signature are rejected
3pd 3pid invite join valid signature but revoked keys are rejected
3pd 3pid invite join valid signature but unreachable ID server are rejected
gst Guest user cannot call /events globally
gst Guest users can join guest_access rooms
gst Guest users can send messages to guest_access rooms if joined
gst Guest user calling /events doesn't tightloop
gst Guest users are kicked from guest_access rooms on revocation of guest_access
gst Guest user can set display names
gst Guest users are kicked from guest_access rooms on revocation of guest_access over federation
gst Guest user can upgrade to fully featured user
gst Guest user cannot upgrade other users
pub GET /publicRooms lists rooms
pub GET /publicRooms includes avatar URLs
gst Guest users can accept invites to private rooms over federation
gst Guest users denied access over federation if guest access prohibited
mem Room members can override their displayname on a room-specific basis
mem Room members can join a room with an overridden displayname
mem Users cannot kick users from a room they are not in
mem Users cannot kick users who have already left a room
typ Typing notification sent to local room members
f,typ Typing notifications also sent to remote room members
typ Typing can be explicitly stopped
rct Read receipts are visible to /initialSync
rct Read receipts are sent as events
rct Receipts must be m.read
pro displayname updates affect room member events
pro avatar_url updates affect room member events
gst m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users
gst m.room.history_visibility == "shared" allows/forbids appropriately for Guest users
gst m.room.history_visibility == "invited" allows/forbids appropriately for Guest users
gst m.room.history_visibility == "joined" allows/forbids appropriately for Guest users
gst m.room.history_visibility == "default" allows/forbids appropriately for Guest users
gst Guest non-joined user cannot call /events on shared room
gst Guest non-joined user cannot call /events on invited room
gst Guest non-joined user cannot call /events on joined room
gst Guest non-joined user cannot call /events on default room
gst Guest non-joined user can call /events on world_readable room
gst Guest non-joined users can get state for world_readable rooms
gst Guest non-joined users can get individual state for world_readable rooms
gst Guest non-joined users cannot room initalSync for non-world_readable rooms
gst Guest non-joined users can room initialSync for world_readable rooms
gst Guest non-joined users can get individual state for world_readable rooms after leaving
gst Guest non-joined users cannot send messages to guest_access rooms if not joined
gst Guest users can sync from world_readable guest_access rooms if joined
gst Guest users can sync from shared guest_access rooms if joined
gst Guest users can sync from invited guest_access rooms if joined
gst Guest users can sync from joined guest_access rooms if joined
gst Guest users can sync from default guest_access rooms if joined
ath m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users
ath m.room.history_visibility == "shared" allows/forbids appropriately for Real users
ath m.room.history_visibility == "invited" allows/forbids appropriately for Real users
ath m.room.history_visibility == "joined" allows/forbids appropriately for Real users
ath m.room.history_visibility == "default" allows/forbids appropriately for Real users
ath Real non-joined user cannot call /events on shared room
ath Real non-joined user cannot call /events on invited room
ath Real non-joined user cannot call /events on joined room
ath Real non-joined user cannot call /events on default room
ath Real non-joined user can call /events on world_readable room
ath Real non-joined users can get state for world_readable rooms
ath Real non-joined users can get individual state for world_readable rooms
ath Real non-joined users cannot room initalSync for non-world_readable rooms
ath Real non-joined users can room initialSync for world_readable rooms
ath Real non-joined users can get individual state for world_readable rooms after leaving
ath Real non-joined users cannot send messages to guest_access rooms if not joined
ath Real users can sync from world_readable guest_access rooms if joined
ath Real users can sync from shared guest_access rooms if joined
ath Real users can sync from invited guest_access rooms if joined
ath Real users can sync from joined guest_access rooms if joined
ath Real users can sync from default guest_access rooms if joined
ath Only see history_visibility changes on boundaries
f,ath Backfill works correctly with history visibility set to joined
fgt Forgotten room messages cannot be paginated
fgt Forgetting room does not show up in v2 /sync
fgt Can forget room you've been kicked from
fgt Can't forget room you're still in
fgt Can re-join room if re-invited
ath Only original members of the room can see messages from erased users
mem /joined_rooms returns only joined rooms
mem /joined_members return joined members
ctx /context/ on joined room works
ctx /context/ on non world readable room does not work
ctx /context/ returns correct number of events
ctx /context/ with lazy_load_members filter works
get /event/ on joined room works
get /event/ on non world readable room does not work
get /event/ does not allow access to events before the user joined
mem Can get rooms/{roomId}/members
mem Can get rooms/{roomId}/members at a given point
mem Can filter rooms/{roomId}/members
upg /upgrade creates a new room
upg /upgrade should preserve room visibility for public rooms
upg /upgrade should preserve room visibility for private rooms
upg /upgrade copies >100 power levels to the new room
upg /upgrade copies the power levels to the new room
upg /upgrade preserves the power level of the upgrading user in old and new rooms
upg /upgrade copies important state to the new room
upg /upgrade copies ban events to the new room
upg local user has push rules copied to upgraded room
f,upg remote user has push rules copied to upgraded room
upg /upgrade moves aliases to the new room
upg /upgrade moves remote aliases to the new room
upg /upgrade preserves direct room state
upg /upgrade preserves room federation ability
upg /upgrade restricts power levels in the old room
upg /upgrade restricts power levels in the old room when the old PLs are unusual
upg /upgrade to an unknown version is rejected
upg /upgrade is rejected if the user can't send state events
upg /upgrade of a bogus room fails gracefully
upg Cannot send tombstone event that points to the same room
f,upg Local and remote users' homeservers remove a room from their public directory on upgrade
rst Name/topic keys are correct
f,pub Can get remote public room list
pub Can paginate public room list
pub Can search public room list
syn Can create filter
syn Can download filter
syn Can sync
syn Can sync a joined room
syn Full state sync includes joined rooms
syn Newly joined room is included in an incremental sync
syn Newly joined room has correct timeline in incremental sync
pre Newly joined room includes presence in incremental sync
pre Get presence for newly joined members in incremental sync
syn Can sync a room with a single message
syn Can sync a room with a message with a transaction id
syn A message sent after an initial sync appears in the timeline of an incremental sync.
syn A filtered timeline reaches its limit
syn Syncing a new room with a large timeline limit isn't limited
syn A full_state incremental update returns only recent timeline
syn A prev_batch token can be used in the v1 messages API
syn A next_batch token can be used in the v1 messages API
syn A prev_batch token from incremental sync can be used in the v1 messages API
pre User sees their own presence in a sync
pre User is offline if they set_presence=offline in their sync
pre User sees updates to presence from other users in the incremental sync.
syn State is included in the timeline in the initial sync
f,syn State from remote users is included in the state in the initial sync
syn Changes to state are included in an incremental sync
syn Changes to state are included in an gapped incremental sync
f,syn State from remote users is included in the timeline in an incremental sync
syn A full_state incremental update returns all state
syn When user joins a room the state is included in the next sync
syn A change to displayname should not result in a full state sync
syn A change to displayname should appear in incremental /sync
syn When user joins a room the state is included in a gapped sync
syn When user joins and leaves a room in the same batch, the full state is still included in the next sync
syn Current state appears in timeline in private history
syn Current state appears in timeline in private history with many messages before
syn Current state appears in timeline in private history with many messages after
syn Rooms a user is invited to appear in an initial sync
syn Rooms a user is invited to appear in an incremental sync
syn Newly joined room is included in an incremental sync after invite
syn Sync can be polled for updates
syn Sync is woken up for leaves
syn Left rooms appear in the leave section of sync
syn Newly left rooms appear in the leave section of incremental sync
syn We should see our own leave event, even if history_visibility is restricted (SYN-662)
syn We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462)
syn Newly left rooms appear in the leave section of gapped sync
syn Previously left rooms don't appear in the leave section of sync
syn Left rooms appear in the leave section of full state sync
syn Archived rooms only contain history from before the user left
syn Banned rooms appear in the leave section of sync
syn Newly banned rooms appear in the leave section of incremental sync
syn Newly banned rooms appear in the leave section of incremental sync
syn Typing events appear in initial sync
syn Typing events appear in incremental sync
syn Typing events appear in gapped sync
syn Read receipts appear in initial v2 /sync
syn New read receipts appear in incremental v2 /sync
syn Can pass a JSON filter as a query parameter
syn Can request federation format via the filter
syn Read markers appear in incremental v2 /sync
syn Read markers appear in initial v2 /sync
syn Read markers can be updated
syn Lazy loading parameters in the filter are strictly boolean
syn The only membership state included in an initial sync is for all the senders in the timeline
syn The only membership state included in an incremental sync is for senders in the timeline
syn The only membership state included in a gapped incremental sync is for senders in the timeline
syn Gapped incremental syncs include all state changes
syn Old leaves are present in gapped incremental syncs
syn Leaves are present in non-gapped incremental syncs
syn Old members are included in gappy incr LL sync if they start speaking
syn Members from the gap are included in gappy incr LL sync
syn We don't send redundant membership state across incremental syncs by default
syn We do send redundant membership state across incremental syncs if asked
syn Unnamed room comes with a name summary
syn Named room comes with just joined member count summary
syn Room summary only has 5 heroes
syn Room summary counts change when membership changes
rmv User can create and send/receive messages in a room with version 1
rmv User can create and send/receive messages in a room with version 1 (2 subtests)
rmv local user can join room with version 1
rmv User can invite local user to room with version 1
rmv remote user can join room with version 1
rmv User can invite remote user to room with version 1
rmv Remote user can backfill in a room with version 1
rmv Can reject invites over federation for rooms with version 1
rmv Can receive redactions from regular users over federation in room version 1
rmv User can create and send/receive messages in a room with version 2
rmv User can create and send/receive messages in a room with version 2 (2 subtests)
rmv local user can join room with version 2
rmv User can invite local user to room with version 2
rmv remote user can join room with version 2
rmv User can invite remote user to room with version 2
rmv Remote user can backfill in a room with version 2
rmv Can reject invites over federation for rooms with version 2
rmv Can receive redactions from regular users over federation in room version 2
rmv User can create and send/receive messages in a room with version 3
rmv User can create and send/receive messages in a room with version 3 (2 subtests)
rmv local user can join room with version 3
rmv User can invite local user to room with version 3
rmv remote user can join room with version 3
rmv User can invite remote user to room with version 3
rmv Remote user can backfill in a room with version 3
rmv Can reject invites over federation for rooms with version 3
rmv Can receive redactions from regular users over federation in room version 3
rmv User can create and send/receive messages in a room with version 4
rmv User can create and send/receive messages in a room with version 4 (2 subtests)
rmv local user can join room with version 4
rmv User can invite local user to room with version 4
rmv remote user can join room with version 4
rmv User can invite remote user to room with version 4
rmv Remote user can backfill in a room with version 4
rmv Can reject invites over federation for rooms with version 4
rmv Can receive redactions from regular users over federation in room version 4
rmv User can create and send/receive messages in a room with version 5
rmv User can create and send/receive messages in a room with version 5 (2 subtests)
rmv local user can join room with version 5
rmv User can invite local user to room with version 5
rmv remote user can join room with version 5
rmv User can invite remote user to room with version 5
rmv Remote user can backfill in a room with version 5
rmv Can reject invites over federation for rooms with version 5
rmv Can receive redactions from regular users over federation in room version 5
rmv User can create and send/receive messages in a room with version 6
rmv User can create and send/receive messages in a room with version 6 (2 subtests)
rmv local user can join room with version 6
rmv User can invite local user to room with version 6
rmv remote user can join room with version 6
rmv User can invite remote user to room with version 6
rmv Remote user can backfill in a room with version 6
rmv Can reject invites over federation for rooms with version 6
rmv Can receive redactions from regular users over federation in room version 6
rmv Inbound federation rejects invites which include invalid JSON for room version 6
rmv Outbound federation rejects invite response which include invalid JSON for room version 6
rmv Inbound federation rejects invite rejections which include invalid JSON for room version 6
rmv Server rejects invalid JSON in a version 6 room
rmv User can create and send/receive messages in a room with version 7 (2 subtests)
rmv local user can join room with version 7
rmv User can invite local user to room with version 7
rmv remote user can join room with version 7
rmv User can invite remote user to room with version 7
rmv Remote user can backfill in a room with version 7
rmv Can reject invites over federation for rooms with version 7
rmv Can receive redactions from regular users over federation in room version 7
rmv User can create and send/receive messages in a room with version 8 (2 subtests)
rmv local user can join room with version 8
rmv User can invite local user to room with version 8
rmv remote user can join room with version 8
rmv User can invite remote user to room with version 8
rmv Remote user can backfill in a room with version 8
rmv Can reject invites over federation for rooms with version 8
rmv Can receive redactions from regular users over federation in room version 8
rmv User can create and send/receive messages in a room with version 9 (2 subtests)
rmv local user can join room with version 9
rmv User can invite local user to room with version 9
rmv remote user can join room with version 9
rmv User can invite remote user to room with version 9
rmv Remote user can backfill in a room with version 9
rmv Can reject invites over federation for rooms with version 9
rmv Can receive redactions from regular users over federation in room version 9
pre Presence changes are reported to local room members
f,pre Presence changes are also reported to remote room members
pre Presence changes to UNAVAILABLE are reported to local room members
f,pre Presence changes to UNAVAILABLE are reported to remote room members
v1s Newly created users see their own presence in /initialSync (SYT-34)
dvk Can upload device keys
dvk Should reject keys claiming to belong to a different user
dvk Can query device keys using POST
dvk Can query specific device keys using POST
dvk query for user with no keys returns empty key dict
dvk Can claim one time key using POST
f,dvk Can query remote device keys using POST
f,dvk Can claim remote one time key using POST
dvk Local device key changes appear in v2 /sync
dvk Local new device changes appear in v2 /sync
dvk Local delete device changes appear in v2 /sync
dvk Local update device changes appear in v2 /sync
dvk Can query remote device keys using POST after notification
f,dev Device deletion propagates over federation
f,dev If remote user leaves room, changes device and rejoins we see update in sync
f,dev If remote user leaves room we no longer receive device updates
dvk Local device key changes appear in /keys/changes
dvk New users appear in /keys/changes
f,dvk If remote user leaves room, changes device and rejoins we see update in /keys/changes
dvk Get left notifs in sync and /keys/changes when other user leaves
dvk Get left notifs for other users in sync and /keys/changes when user leaves
f,dvk If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes
dkb Can create backup version
dkb Can update backup version
dkb Responds correctly when backup is empty
dkb Can backup keys
dkb Can update keys with better versions
dkb Will not update keys with worse versions
dkb Will not back up to an old backup version
dkb Can delete backup
dkb Deleted & recreated backups are empty
dkb Can create more than 10 backup versions
xsk Can upload self-signing keys
xsk Fails to upload self-signing keys with no auth
xsk Fails to upload self-signing key without master key
xsk Changing master key notifies local users
xsk Changing user-signing key notifies local users
f,xsk can fetch self-signing keys over federation
f,xsk uploading self-signing key notifies over federation
f,xsk uploading signed devices gets propagated over federation
tag Can add tag
tag Can remove tag
tag Can list tags for a room
v1s Tags appear in the v1 /events stream
v1s Tags appear in the v1 /initalSync
v1s Tags appear in the v1 room initial sync
tag Tags appear in an initial v2 /sync
tag Newly updated tags appear in an incremental v2 /sync
tag Deleted tags appear in an incremental v2 /sync
tag local user has tags copied to the new room
f,tag remote user has tags copied to the new room
sch Can search for an event by body
sch Can get context around search results
sch Can back-paginate search results
sch Search works across an upgraded room and its predecessor
sch Search results with rank ordering do not include redacted events
sch Search results with recent ordering do not include redacted events
acc Can add account data
acc Can add account data to room
acc Can get account data without syncing
acc Can get room account data without syncing
v1s Latest account data comes down in /initialSync
v1s Latest account data comes down in room initialSync
v1s Account data appears in v1 /events stream
v1s Room account data appears in v1 /events stream
acc Latest account data appears in v2 /sync
acc New account data appears in incremental v2 /sync
oid Can generate a openid access_token that can be exchanged for information about a user
oid Invalid openid access tokens are rejected
oid Requests to userinfo without access tokens are rejected
std Can send a message directly to a device using PUT /sendToDevice
std Can recv a device message using /sync
std Can recv device messages until they are acknowledged
std Device messages with the same txn_id are deduplicated
std Device messages wake up /sync
std Can recv device messages over federation
fsd Device messages over federation wake up /sync
std Can send messages with a wildcard device id
std Can send messages with a wildcard device id to two devices
std Wildcard device messages wake up /sync
fsd Wildcard device messages over federation wake up /sync
adm /whois
nsp /purge_history
nsp /purge_history by ts
nsp Can backfill purged history
nsp Shutdown room
ign Ignore user in existing room
ign Ignore invite in full sync
ign Ignore invite in incremental sync
fky Checking local federation server
fky Federation key API allows unsigned requests for keys
fky Federation key API can act as a notary server via a GET request
fky Federation key API can act as a notary server via a POST request
fky Key notary server should return an expired key if it can't find any others
fky Key notary server must not overwrite a valid key with a spurious result from the origin server
fqu Non-numeric ports in server names are rejected
fqu Outbound federation can query profile data
fqu Inbound federation can query profile data
fqu Outbound federation can query room alias directory
fqu Inbound federation can query room alias directory
fsj Membership event with an invalid displayname in the send_join response should not cause room join to fail
fsj Outbound federation can query v1 /send_join
fsj Outbound federation can query v2 /send_join
fmj Outbound federation passes make_join failures through to the client
fsj Inbound federation can receive v1 /send_join
fsj Inbound federation can receive v2 /send_join
fmj Inbound /v1/make_join rejects remote attempts to join local users to rooms
fsj Inbound /v1/send_join rejects incorrectly-signed joins
fsj Inbound /v1/send_join rejects joins from other servers
fau Inbound federation rejects remote attempts to kick local users to rooms
frv Inbound federation rejects attempts to join v1 rooms from servers without v1 support
frv Inbound federation rejects attempts to join v2 rooms from servers lacking version support
frv Inbound federation rejects attempts to join v2 rooms from servers only supporting v1
frv Inbound federation accepts attempts to join v2 rooms from servers with support
frv Outbound federation correctly handles unsupported room versions
frv A pair of servers can establish a join in a v2 room
fsj Outbound federation rejects send_join responses with no m.room.create event
frv Outbound federation rejects m.room.create events with an unknown room version
fsj Event with an invalid signature in the send_join response should not cause room join to fail
fsj Inbound: send_join rejects invalid JSON for room version 6
fed Outbound federation can send events
fed Inbound federation can receive events
fed Inbound federation can receive redacted events
msc Ephemeral messages received from servers are correctly expired
fed Events whose auth_events are in the wrong room do not mess up the room state
fed Inbound federation can return events
fed Inbound federation redacts events from erased users
fme Outbound federation can request missing events
fme Inbound federation can return missing events for world_readable visibility
fme Inbound federation can return missing events for shared visibility
fme Inbound federation can return missing events for invited visibility
fme Inbound federation can return missing events for joined visibility
fme outliers whose auth_events are in a different room are correctly rejected
fbk Outbound federation can backfill events
fbk Inbound federation can backfill events
fbk Backfill checks the events requested belong to the room
fbk Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination
fiv Outbound federation can send invites via v1 API
fiv Outbound federation can send invites via v2 API
fiv Inbound federation can receive invites via v1 API
fiv Inbound federation can receive invites via v2 API
fiv Inbound federation can receive invite and reject when remote replies with a 403
fiv Inbound federation can receive invite and reject when remote replies with a 500
fiv Inbound federation can receive invite and reject when remote is unreachable
fiv Inbound federation rejects invites which are not signed by the sender
fiv Inbound federation can receive invite rejections
fiv Inbound federation rejects incorrectly-signed invite rejections
fsl Inbound /v1/send_leave rejects leaves from other servers
fst Inbound federation can get state for a room
fst Inbound federation of state requires event_id as a mandatory paramater
fst Inbound federation can get state_ids for a room
fst Inbound federation of state_ids requires event_id as a mandatory paramater
fst Federation rejects inbound events where the prev_events cannot be found
fst Room state at a rejected message event is the same as its predecessor
fst Room state at a rejected state event is the same as its predecessor
fst Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state
fst Federation handles empty auth_events in state_ids sanely
fst Getting state checks the events requested belong to the room
fst Getting state IDs checks the events requested belong to the room
fst Should not be able to take over the room by pretending there is no PL event
fpb Inbound federation can get public room list
fed Outbound federation sends receipts
fed Inbound federation rejects receipts from wrong remote
fed Inbound federation ignores redactions from invalid servers room > v3
fed An event which redacts an event in a different room should be ignored
fed An event which redacts itself should be ignored
fed A pair of events which redact each other should be ignored
fdk Local device key changes get to remote servers
fdk Server correctly handles incoming m.device_list_update
fdk Server correctly resyncs when client query keys and there is no remote cache
fdk Server correctly resyncs when server leaves and rejoins a room
fdk Local device key changes get to remote servers with correct prev_id
fdk Device list doesn't change if remote server is down
fdk If a device list update goes missing, the server resyncs on the next one
fst Name/topic keys are correct
fau Remote servers cannot set power levels in rooms without existing powerlevels
fau Remote servers should reject attempts by non-creators to set the power levels
fau Inbound federation rejects typing notifications from wrong remote
fau Users cannot set notifications powerlevel higher than their own
fed Forward extremities remain so even after the next events are populated as outliers
fau Banned servers cannot send events
fau Banned servers cannot /make_join
fau Banned servers cannot /send_join
fau Banned servers cannot /make_leave
fau Banned servers cannot /send_leave
fau Banned servers cannot /invite
fau Banned servers cannot get room state
fau Banned servers cannot get room state ids
fau Banned servers cannot backfill
fau Banned servers cannot /event_auth
fau Banned servers cannot get missing events
fau Server correctly handles transactions that break edu limits
fau Inbound federation correctly soft fails events
fau Inbound federation accepts a second soft-failed event
fau Inbound federation correctly handles soft failed events as extremities
med Can upload with Unicode file name
med Can download with Unicode file name locally
f,med Can download with Unicode file name over federation
med Alternative server names do not cause a routing loop
med Can download specifying a different Unicode file name
med Can upload without a file name
med Can download without a file name locally
f,med Can download without a file name over federation
med Can upload with ASCII file name
med Can download file 'ascii'
med Can download file 'name with spaces'
med Can download file 'name;with;semicolons'
med Can download specifying a different ASCII file name
med Can send image in room message
med Can fetch images in room
med POSTed media can be thumbnailed
f,med Remote media can be thumbnailed
med Test URL preview
med Can read configuration endpoint
nsp Can quarantine media in rooms
udr User appears in user directory
udr User in private room doesn't appear in user directory
udr User joining then leaving public room appears and dissappears from directory
udr Users appear/disappear from directory when join_rules are changed
udr Users appear/disappear from directory when history_visibility are changed
udr Users stay in directory when join_rules are changed but history_visibility is world_readable
f,udr User in remote room doesn't appear in user directory after server left room
udr User directory correctly update on display name change
udr User in shared private room does appear in user directory
udr User in shared private room does appear in user directory until leave
udr User in dir while user still shares private rooms
nsp Create group
nsp Add group rooms
nsp Remove group rooms
nsp Get local group profile
nsp Get local group users
nsp Add/remove local group rooms
nsp Get local group summary
nsp Get remote group profile
nsp Get remote group users
nsp Add/remove remote group rooms
nsp Get remote group summary
nsp Add local group users
nsp Remove self from local group
nsp Remove other from local group
nsp Add remote group users
nsp Remove self from remote group
nsp Listing invited users of a remote group when not a member returns a 403
nsp Add group category
nsp Remove group category
nsp Get group categories
nsp Add group role
nsp Remove group role
nsp Get group roles
nsp Add room to group summary
nsp Adding room to group summary keeps room_id when fetching rooms in group
nsp Adding multiple rooms to group summary have correct order
nsp Remove room from group summary
nsp Add room to group summary with category
nsp Remove room from group summary with category
nsp Add user to group summary
nsp Adding multiple users to group summary have correct order
nsp Remove user from group summary
nsp Add user to group summary with role
nsp Remove user from group summary with role
nsp Local group invites come down sync
nsp Group creator sees group in sync
nsp Group creator sees group in initial sync
nsp Get/set local group publicity
nsp Bulk get group publicity
nsp Joinability comes down summary
nsp Set group joinable and join it
nsp Group is not joinable by default
nsp Group is joinable over federation
nsp Room is transitioned on local and remote groups upon room upgrade
nsp POST /_synapse/admin/v1/register with shared secret
nsp POST /_synapse/admin/v1/register admin with shared secret
nsp POST /_synapse/admin/v1/register with shared secret downcases capitals
nsp POST /_synapse/admin/v1/register with shared secret disallows symbols
3pd Can bind 3PID via home server
3pd Can bind and unbind 3PID via homeserver
3pd Can unbind 3PID via homeserver when bound out of band
3pd 3PIDs are unbound after account deactivation
3pd Can bind and unbind 3PID via /unbind by specifying the identity server
3pd Can bind and unbind 3PID via /unbind without specifying the identity server
app AS can create a user
app AS can create a user with an underscore
app AS can create a user with inhibit_login
app AS cannot create users outside its own namespace
app Regular users cannot register within the AS namespace
app AS can make room aliases
app Regular users cannot create room aliases within the AS namespace
app AS-ghosted users can use rooms via AS
app AS-ghosted users can use rooms themselves
app AS-ghosted users can use rooms via AS (2 subtests)
app AS-ghosted users can use rooms themselves (3 subtests)
app Ghost user must register before joining room
app AS can set avatar for ghosted users
app AS can set displayname for ghosted users
app AS can't set displayname for random users
app Inviting an AS-hosted user asks the AS server
app Accesing an AS-hosted room alias asks the AS server
app Accesing an AS-hosted room alias asks the AS server (2 subtests)
app Events in rooms with AS-hosted room aliases are sent to AS server
app AS user (not ghost) can join room without registering
app AS user (not ghost) can join room without registering, with user_id query param
app HS provides query metadata
app HS can provide query metadata on a single protocol
app HS will proxy request for 3PU mapping
app HS will proxy request for 3PL mapping
app AS can publish rooms in their own list
app AS and main public room lists are separate
app AS can deactivate a user
psh Test that a message is pushed
psh Test that a message is pushed (6 subtests)
psh Test that rejected pushers are removed. (4 subtests)
psh Invites are pushed
psh Rooms with names are correctly named in pushed
psh Rooms with canonical alias are correctly named in pushed
psh Rooms with many users are correctly pushed
psh Don't get pushed for rooms you've muted
psh Rejected events are not pushed
psh Can add global push rule for room
psh Can add global push rule for sender
psh Can add global push rule for content
psh Can add global push rule for override
psh Can add global push rule for underride
psh Can add global push rule for content
psh New rules appear before old rules by default
psh Can add global push rule before an existing rule
psh Can add global push rule after an existing rule
psh Can delete a push rule
psh Can disable a push rule
psh Adding the same push rule twice is idempotent
psh Messages that notify from another user increment unread notification count
psh Messages that highlight from another user increment unread highlight count
psh Can change the actions of default rules
psh Changing the actions of an unknown default rule fails with 404
psh Can change the actions of a user specified rule
psh Changing the actions of an unknown rule fails with 404
psh Can fetch a user's pushers
psh Push rules come down in an initial /sync
psh Adding a push rule wakes up an incremental /sync
psh Disabling a push rule wakes up an incremental /sync
psh Enabling a push rule wakes up an incremental /sync
psh Setting actions for a push rule wakes up an incremental /sync
psh Can enable/disable default rules
psh Enabling an unknown default rule fails with 404
psh Test that rejected pushers are removed.
psh Notifications can be viewed with GET /notifications
psh Trying to add push rule with no scope fails with 400
psh Trying to add push rule with invalid scope fails with 400
psh Trying to add push rule with missing template fails with 400
psh Trying to add push rule with missing rule_id fails with 400
psh Trying to add push rule with empty rule_id fails with 400
psh Trying to add push rule with invalid template fails with 400
psh Trying to add push rule with rule_id with slashes fails with 400
psh Trying to add push rule with override rule without conditions fails with 400
psh Trying to add push rule with underride rule without conditions fails with 400
psh Trying to add push rule with condition without kind fails with 400
psh Trying to add push rule with content rule without pattern fails with 400
psh Trying to add push rule with no actions fails with 400
psh Trying to add push rule with invalid action fails with 400
psh Trying to add push rule with invalid attr fails with 400
psh Trying to add push rule with invalid value for enabled fails with 400
psh Trying to get push rules with no trailing slash fails with 400
psh Trying to get push rules with scope without trailing slash fails with 400
psh Trying to get push rules with template without tailing slash fails with 400
psh Trying to get push rules with unknown scope fails with 400
psh Trying to get push rules with unknown template fails with 400
psh Trying to get push rules with unknown attribute fails with 400
psh Trying to get push rules with unknown rule_id fails with 404
psh Rooms with names are correctly named in pushes
v1s GET /initialSync with non-numeric 'limit'
v1s GET /events with non-numeric 'limit'
v1s GET /events with negative 'limit'
v1s GET /events with non-numeric 'timeout'
ath Event size limits
syn Check creating invalid filters returns 4xx
f,pre New federated private chats get full presence information (SYN-115)
pre Left room members do not cause problems for presence
crm Rooms can be created with an initial invite list (SYN-205) (1 subtests)
typ Typing notifications don't leak
ban Non-present room members cannot ban others
ban Non-present room members cannot ban others (3 subtests)
psh Getting push rules doesn't corrupt the cache SYN-390
psh Getting push rules doesn't corrupt the cache SYN-390 (3 subtests)
inv Test that we can be reinvited to a room we created
syn Multiple calls to /sync should not cause 500 errors
syn Multiple calls to /sync should not cause 500 errors (6 subtests)
gst Guest user can call /events on another world_readable room (SYN-606)
gst Real user can call /events on another world_readable room (SYN-606)
gst Events come down the correct room
pub Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list
std Can send a to-device message to two users which both receive it using /sync
fme Outbound federation will ignore a missing event with bad JSON for room version 6
fbk Outbound federation rejects backfill containing invalid JSON for events in room version 6
jso Invalid JSON integers
jso Invalid JSON floats
jso Invalid JSON special values
inv Can invite users to invite-only rooms (2 subtests)
plv setting 'm.room.name' respects room powerlevel (2 subtests)
psh Messages that notify from another user increment notification_count
msc Messages that org.matrix.msc2625.mark_unread from another user increment org.matrix.msc2625.unread_count
dvk Can claim one time key using POST (2 subtests)
fdk Can query remote device keys using POST (1 subtests)
fdk Can claim remote one time key using POST (2 subtests)
fmj Inbound /make_join rejects attempts to join rooms where all users have left
msc Local users can peek into world_readable rooms by room ID
msc We can't peek into rooms with shared history_visibility
msc We can't peek into rooms with invited history_visibility
msc We can't peek into rooms with joined history_visibility
msc Local users can peek by room alias
msc Peeked rooms only turn up in the sync for the device who peeked them
ban 'ban' event respects room powerlevel (2 subtests)
inv Test that we can be reinvited to a room we created (11 subtests)
fiv Rejecting invite over federation doesn't break incremental /sync
pre Presence can be set from sync
fst /state returns M_NOT_FOUND for an outlier
fst /state_ids returns M_NOT_FOUND for an outlier
fst /state returns M_NOT_FOUND for a rejected message event
fst /state_ids returns M_NOT_FOUND for a rejected message event
fst /state returns M_NOT_FOUND for a rejected state event
fst /state_ids returns M_NOT_FOUND for a rejected state event
fst Room state after a rejected message event is the same as before
fst Room state after a rejected state event is the same as before
fpb Federation publicRoom Name/topic keys are correct
fed New federated private chats get full presence information (SYN-115) (10 subtests)
dvk Rejects invalid device keys
rmv User can create and send/receive messages in a room with version 10
rmv local user can join room with version 10
rmv User can invite local user to room with version 10
rmv remote user can join room with version 10
rmv User can invite remote user to room with version 10
rmv Remote user can backfill in a room with version 10
rmv Can reject invites over federation for rooms with version 10
rmv Can receive redactions from regular users over federation in room version 10
rmv User can create and send/receive messages in a room with version 11
rmv local user can join room with version 11
rmv User can invite local user to room with version 11
rmv remote user can join room with version 11
rmv User can invite remote user to room with version 11
rmv Remote user can backfill in a room with version 11
rmv Can reject invites over federation for rooms with version 11
rmv Can receive redactions from regular users over federation in room version 11

View file

@ -1,275 +0,0 @@
#!/usr/bin/env python3
from __future__ import division
import argparse
import re
import os
# Usage: $ ./are-we-synapse-yet.py [-v] results.tap
# This script scans a results.tap file from Dendrite's CI process and spits out
# a rating of how close we are to Synapse parity, based purely on SyTests.
# The main complexity is grouping tests sensibly into features like 'Registration'
# and 'Federation'. Then it just checks the ones which are passing and calculates
# percentages for each group. Produces results like:
#
# Client-Server APIs: 29% (196/666 tests)
# -------------------
# Registration : 62% (20/32 tests)
# Login : 7% (1/15 tests)
# V1 CS APIs : 10% (3/30 tests)
# ...
#
# or in verbose mode:
#
# Client-Server APIs: 29% (196/666 tests)
# -------------------
# Registration : 62% (20/32 tests)
# ✓ GET /register yields a set of flows
# ✓ POST /register can create a user
# ✓ POST /register downcases capitals in usernames
# ...
#
# You can also tack `-v` on to see exactly which tests each category falls under.
test_mappings = {
"nsp": "Non-Spec API",
"unk": "Unknown API (no group specified)",
"app": "Application Services API",
"msc": "MSCs",
"f": "Federation", # flag to mark test involves federation
"federation_apis": {
"fky": "Key API",
"fsj": "send_join API",
"fmj": "make_join API",
"fsl": "send_leave API",
"fiv": "Invite API",
"fqu": "Query API",
"frv": "room versions",
"fau": "Auth",
"fbk": "Backfill API",
"fme": "get_missing_events API",
"fst": "State APIs",
"fpb": "Public Room API",
"fdk": "Device Key APIs",
"fed": "Federation API",
"fsd": "Send-to-Device APIs",
},
"client_apis": {
"reg": "Registration",
"log": "Login",
"lox": "Logout",
"v1s": "V1 CS APIs",
"csa": "Misc CS APIs",
"pro": "Profile",
"dev": "Devices",
"dvk": "Device Keys",
"dkb": "Device Key Backup",
"xsk": "Cross-signing Keys",
"pre": "Presence",
"crm": "Create Room",
"syn": "Sync API",
"rmv": "Room Versions",
"rst": "Room State APIs",
"pub": "Public Room APIs",
"mem": "Room Membership",
"ali": "Room Aliases",
"jon": "Joining Rooms",
"lev": "Leaving Rooms",
"inv": "Inviting users to Rooms",
"ban": "Banning users",
"snd": "Sending events",
"get": "Getting events for Rooms",
"rct": "Receipts",
"red": "Read markers",
"med": "Media APIs",
"cap": "Capabilities API",
"typ": "Typing API",
"psh": "Push APIs",
"acc": "Account APIs",
"eph": "Ephemeral Events",
"plv": "Power Levels",
"xxx": "Redaction",
"3pd": "Third-Party ID APIs",
"gst": "Guest APIs",
"ath": "Room Auth",
"fgt": "Forget APIs",
"ctx": "Context APIs",
"upg": "Room Upgrade APIs",
"tag": "Tagging APIs",
"sch": "Search APIs",
"oid": "OpenID API",
"std": "Send-to-Device APIs",
"adm": "Server Admin API",
"ign": "Ignore Users",
"udr": "User Directory APIs",
"jso": "Enforced canonical JSON",
},
}
# optional 'not ' with test number then anything but '#'
re_testname = re.compile(r"^(not )?ok [0-9]+ ([^#]+)")
# Parses lines like the following:
#
# SUCCESS: ok 3 POST /register downcases capitals in usernames
# FAIL: not ok 54 (expected fail) POST /createRoom creates a room with the given version
# SKIP: ok 821 Multiple calls to /sync should not cause 500 errors # skip lack of can_post_room_receipts
# EXPECT FAIL: not ok 822 (expected fail) Guest user can call /events on another world_readable room (SYN-606) # TODO expected fail
#
# Only SUCCESS lines are treated as success, the rest are not implemented.
#
# Returns a dict like:
# { name: "...", ok: True }
def parse_test_line(line):
if not line.startswith("ok ") and not line.startswith("not ok "):
return
re_match = re_testname.match(line)
test_name = re_match.groups()[1].replace("(expected fail) ", "").strip()
test_pass = False
if line.startswith("ok ") and not "# skip " in line:
test_pass = True
return {
"name": test_name,
"ok": test_pass,
}
# Prints the stats for a complete section.
# header_name => "Client-Server APIs"
# gid_to_tests => { gid: { <name>: True|False }}
# gid_to_name => { gid: "Group Name" }
# verbose => True|False
# Produces:
# Client-Server APIs: 29% (196/666 tests)
# -------------------
# Registration : 62% (20/32 tests)
# Login : 7% (1/15 tests)
# V1 CS APIs : 10% (3/30 tests)
# ...
# or in verbose mode:
# Client-Server APIs: 29% (196/666 tests)
# -------------------
# Registration : 62% (20/32 tests)
# ✓ GET /register yields a set of flows
# ✓ POST /register can create a user
# ✓ POST /register downcases capitals in usernames
# ...
def print_stats(header_name, gid_to_tests, gid_to_name, verbose):
ci = os.getenv("CI") # When running from GHA, this groups the subsections
subsections = [] # Registration: 100% (13/13 tests)
subsection_test_names = {} # 'subsection name': ["✓ Test 1", "✓ Test 2", "× Test 3"]
total_passing = 0
total_tests = 0
for gid, tests in gid_to_tests.items():
group_total = len(tests)
if group_total == 0:
continue
group_passing = 0
test_names_and_marks = []
for name, passing in tests.items():
if passing:
group_passing += 1
test_names_and_marks.append(f"{'' if passing else ''} {name}")
total_tests += group_total
total_passing += group_passing
pct = "{0:.0f}%".format(group_passing/group_total * 100)
line = "%s: %s (%d/%d tests)" % (gid_to_name[gid].ljust(25, ' '), pct.rjust(4, ' '), group_passing, group_total)
subsections.append(line)
subsection_test_names[line] = test_names_and_marks
# avoid errors when trying to divide by 0
if total_tests == 0:
return
pct = "{0:.0f}%".format(total_passing/total_tests * 100)
print("%s: %s (%d/%d tests)" % (header_name, pct, total_passing, total_tests))
print("-" * (len(header_name)+1))
for line in subsections:
print("%s%s" % ("::group::" if ci and verbose else "", line,))
if verbose:
for test_name_and_pass_mark in subsection_test_names[line]:
print(" %s" % (test_name_and_pass_mark,))
print("%s" % ("::endgroup::" if ci else ""))
print("")
def main(results_tap_path, verbose):
# Load up test mappings
test_name_to_group_id = {}
fed_tests = set()
client_tests = set()
with open("./are-we-synapse-yet.list", "r") as f:
for line in f.readlines():
test_name = " ".join(line.split(" ")[1:]).strip()
groups = line.split(" ")[0].split(",")
for gid in groups:
if gid == "f" or gid in test_mappings["federation_apis"]:
fed_tests.add(test_name)
else:
client_tests.add(test_name)
if gid == "f":
continue # we expect another group ID
test_name_to_group_id[test_name] = gid
# parse results.tap
summary = {
"client": {
# gid: {
# test_name: OK
# }
},
"federation": {
# gid: {
# test_name: OK
# }
},
"appservice": {
"app": {},
},
"nonspec": {
"nsp": {},
"msc": {},
"unk": {}
},
}
with open(results_tap_path, "r") as f:
for line in f.readlines():
test_result = parse_test_line(line)
if not test_result:
continue
name = test_result["name"]
group_id = test_name_to_group_id.get(name)
if not group_id:
summary["nonspec"]["unk"][name] = test_result["ok"]
if group_id == "nsp":
summary["nonspec"]["nsp"][name] = test_result["ok"]
elif group_id == "msc":
summary["nonspec"]["msc"][name] = test_result["ok"]
elif group_id == "app":
summary["appservice"]["app"][name] = test_result["ok"]
elif group_id in test_mappings["federation_apis"]:
group = summary["federation"].get(group_id, {})
group[name] = test_result["ok"]
summary["federation"][group_id] = group
elif group_id in test_mappings["client_apis"]:
group = summary["client"].get(group_id, {})
group[name] = test_result["ok"]
summary["client"][group_id] = group
print("Are We Synapse Yet?")
print("===================")
print("")
print_stats("Non-Spec APIs", summary["nonspec"], test_mappings, verbose)
print_stats("Client-Server APIs", summary["client"], test_mappings["client_apis"], verbose)
print_stats("Federation APIs", summary["federation"], test_mappings["federation_apis"], verbose)
print_stats("Application Services APIs", summary["appservice"], test_mappings, verbose)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("tap_file", help="path to results.tap")
parser.add_argument("-v", action="store_true", help="show individual test names in output")
args = parser.parse_args()
main(args.tap_file, args.v)

View file

@ -1,4 +0,0 @@
#!/bin/sh -eu
export GIT_COMMIT=$(git rev-list -1 HEAD) && \
GOOS=js GOARCH=wasm go build -ldflags "-X main.GitCommit=$GIT_COMMIT" -o bin/main.wasm ./cmd/dendritejs-pinecone

3
build.sh Executable file
View file

@ -0,0 +1,3 @@
#!/bin/sh
GOBIN=$PWD/`dirname $0`/bin go install -v $PWD/`dirname $0`/cmd/...

View file

@ -1,101 +0,0 @@
// Copyright 2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build wasm
// +build wasm
package main
import (
"bufio"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"syscall/js"
)
// JSServer exposes an HTTP-like server interface which allows JS to 'send' requests to it.
type JSServer struct {
// The router which will service requests
Mux http.Handler
}
// OnRequestFromJS is the function that JS will invoke when there is a new request.
// The JS function signature is:
// function(reqString: string): Promise<{result: string, error: string}>
// Usage is like:
// const res = await global._go_js_server.fetch(reqString);
// if (res.error) {
// // handle error: this is a 'network' error, not a non-2xx error.
// }
// const rawHttpResponse = res.result;
func (h *JSServer) OnRequestFromJS(this js.Value, args []js.Value) interface{} {
// we HAVE to spawn a new goroutine and return immediately or else Go will deadlock
// if this request blocks at all e.g for /sync calls
httpStr := args[0].String()
promise := js.Global().Get("Promise").New(js.FuncOf(func(pthis js.Value, pargs []js.Value) interface{} {
// The initial callback code for new Promise() is also called on the critical path, which is why
// we need to put this in an immediately invoked goroutine.
go func() {
resolve := pargs[0]
resStr, err := h.handle(httpStr)
errStr := ""
if err != nil {
errStr = err.Error()
}
resolve.Invoke(map[string]interface{}{
"result": resStr,
"error": errStr,
})
}()
return nil
}))
return promise
}
// handle invokes the http.ServeMux for this request and returns the raw HTTP response.
func (h *JSServer) handle(httpStr string) (resStr string, err error) {
req, err := http.ReadRequest(bufio.NewReader(strings.NewReader(httpStr)))
if err != nil {
return
}
w := httptest.NewRecorder()
h.Mux.ServeHTTP(w, req)
res := w.Result()
var resBuffer strings.Builder
err = res.Write(&resBuffer)
return resBuffer.String(), err
}
// ListenAndServe registers a variable in JS-land with the given namespace. This variable is
// a function which JS-land can call to 'send' HTTP requests. The function is attached to
// a global object called "_go_js_server". See OnRequestFromJS for more info.
func (h *JSServer) ListenAndServe(namespace string) {
globalName := "_go_js_server"
// register a hook in JS-land for it to invoke stuff
server := js.Global().Get(globalName)
if !server.Truthy() {
server = js.Global().Get("Object").New()
js.Global().Set(globalName, server)
}
server.Set(namespace, js.FuncOf(h.OnRequestFromJS))
fmt.Printf("Listening for requests from JS on function %s.%s\n", globalName, namespace)
// Block forever to mimic http.ListenAndServe
select {}
}

View file

@ -1,234 +0,0 @@
// Copyright 2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build wasm
// +build wasm
package main
import (
"crypto/ed25519"
"encoding/hex"
"fmt"
"syscall/js"
"github.com/gorilla/mux"
"github.com/matrix-org/dendrite/appservice"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/conn"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/rooms"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-yggdrasil/signing"
"github.com/matrix-org/dendrite/federationapi"
"github.com/matrix-org/dendrite/internal/caching"
"github.com/matrix-org/dendrite/internal/httputil"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/roomserver"
"github.com/matrix-org/dendrite/setup"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/dendrite/userapi"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/gomatrixserverlib"
"github.com/sirupsen/logrus"
_ "github.com/matrix-org/go-sqlite3-js"
pineconeConnections "github.com/matrix-org/pinecone/connections"
pineconeRouter "github.com/matrix-org/pinecone/router"
pineconeSessions "github.com/matrix-org/pinecone/sessions"
)
var GitCommit string
func init() {
fmt.Printf("[%s] dendrite.js starting...\n", GitCommit)
}
const publicPeer = "wss://pinecone.matrix.org/public"
const keyNameEd25519 = "_go_ed25519_key"
func readKeyFromLocalStorage() (key ed25519.PrivateKey, err error) {
localforage := js.Global().Get("localforage")
if !localforage.Truthy() {
err = fmt.Errorf("readKeyFromLocalStorage: no localforage")
return
}
// https://localforage.github.io/localForage/
item, ok := await(localforage.Call("getItem", keyNameEd25519))
if !ok || !item.Truthy() {
err = fmt.Errorf("readKeyFromLocalStorage: no key in localforage")
return
}
fmt.Println("Found key in localforage")
// extract []byte and make an ed25519 key
seed := make([]byte, 32, 32)
js.CopyBytesToGo(seed, item)
return ed25519.NewKeyFromSeed(seed), nil
}
func writeKeyToLocalStorage(key ed25519.PrivateKey) error {
localforage := js.Global().Get("localforage")
if !localforage.Truthy() {
return fmt.Errorf("writeKeyToLocalStorage: no localforage")
}
// make a Uint8Array from the key's seed
seed := key.Seed()
jsSeed := js.Global().Get("Uint8Array").New(len(seed))
js.CopyBytesToJS(jsSeed, seed)
// write it
localforage.Call("setItem", keyNameEd25519, jsSeed)
return nil
}
// taken from https://go-review.googlesource.com/c/go/+/150917
// await waits until the promise v has been resolved or rejected and returns the promise's result value.
// The boolean value ok is true if the promise has been resolved, false if it has been rejected.
// If v is not a promise, v itself is returned as the value and ok is true.
func await(v js.Value) (result js.Value, ok bool) {
if v.Type() != js.TypeObject || v.Get("then").Type() != js.TypeFunction {
return v, true
}
done := make(chan struct{})
onResolve := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
result = args[0]
ok = true
close(done)
return nil
})
defer onResolve.Release()
onReject := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
result = args[0]
ok = false
close(done)
return nil
})
defer onReject.Release()
v.Call("then", onResolve, onReject)
<-done
return
}
func generateKey() ed25519.PrivateKey {
// attempt to look for a seed in JS-land and if it exists use it.
priv, err := readKeyFromLocalStorage()
if err == nil {
fmt.Println("Read key from localStorage")
return priv
}
// generate a new key
fmt.Println(err, " : Generating new ed25519 key")
_, priv, err = ed25519.GenerateKey(nil)
if err != nil {
logrus.Fatalf("Failed to generate ed25519 key: %s", err)
}
if err := writeKeyToLocalStorage(priv); err != nil {
fmt.Println("failed to write key to localStorage: ", err)
// non-fatal, we'll just have amnesia for a while
}
return priv
}
func main() {
startup()
// We want to block forever to let the fetch and libp2p handler serve the APIs
select {}
}
func startup() {
sk := generateKey()
pk := sk.Public().(ed25519.PublicKey)
pRouter := pineconeRouter.NewRouter(logrus.WithField("pinecone", "router"), sk, false)
pSessions := pineconeSessions.NewSessions(logrus.WithField("pinecone", "sessions"), pRouter, []string{"matrix"})
pManager := pineconeConnections.NewConnectionManager(pRouter)
pManager.AddPeer("wss://pinecone.matrix.org/public")
cfg := &config.Dendrite{}
cfg.Defaults(config.DefaultOpts{Generate: true, SingleDatabase: false})
cfg.UserAPI.AccountDatabase.ConnectionString = "file:/idb/dendritejs_account.db"
cfg.FederationAPI.Database.ConnectionString = "file:/idb/dendritejs_fedsender.db"
cfg.MediaAPI.Database.ConnectionString = "file:/idb/dendritejs_mediaapi.db"
cfg.RoomServer.Database.ConnectionString = "file:/idb/dendritejs_roomserver.db"
cfg.SyncAPI.Database.ConnectionString = "file:/idb/dendritejs_syncapi.db"
cfg.KeyServer.Database.ConnectionString = "file:/idb/dendritejs_e2ekey.db"
cfg.Global.JetStream.StoragePath = "file:/idb/dendritejs/"
cfg.Global.TrustedIDServers = []string{}
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
cfg.Global.PrivateKey = sk
cfg.Global.ServerName = spec.ServerName(hex.EncodeToString(pk))
cfg.ClientAPI.RegistrationDisabled = false
cfg.ClientAPI.OpenRegistrationWithoutVerificationEnabled = true
if err := cfg.Derive(); err != nil {
logrus.Fatalf("Failed to derive values from config: %s", err)
}
natsInstance := jetstream.NATSInstance{}
processCtx := process.NewProcessContext()
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
routers := httputil.NewRouters()
caches := caching.NewRistrettoCache(cfg.Global.Cache.EstimatedMaxSize, cfg.Global.Cache.MaxAge, caching.EnableMetrics)
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.EnableMetrics)
federation := conn.CreateFederationClient(cfg, pSessions)
serverKeyAPI := &signing.YggdrasilKeys{}
keyRing := serverKeyAPI.KeyRing()
fedSenderAPI := federationapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, federation, rsAPI, caches, keyRing, true)
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, federation, caching.EnableMetrics, fedSenderAPI.IsBlacklistedOrBackingOff)
asQuery := appservice.NewInternalAPI(
processCtx, cfg, &natsInstance, userAPI, rsAPI,
)
rsAPI.SetAppserviceAPI(asQuery)
rsAPI.SetFederationAPI(fedSenderAPI, keyRing)
monolith := setup.Monolith{
Config: cfg,
Client: conn.CreateClient(pSessions),
FedClient: federation,
KeyRing: keyRing,
AppserviceAPI: asQuery,
FederationAPI: fedSenderAPI,
RoomserverAPI: rsAPI,
UserAPI: userAPI,
//ServerKeyAPI: serverKeyAPI,
ExtPublicRoomsProvider: rooms.NewPineconeRoomProvider(pRouter, pSessions, fedSenderAPI, federation),
}
monolith.AddAllPublicRoutes(processCtx, cfg, routers, cm, &natsInstance, caches, caching.EnableMetrics)
httpRouter := mux.NewRouter().SkipClean(true).UseEncodedPath()
httpRouter.PathPrefix(httputil.PublicClientPathPrefix).Handler(routers.Client)
httpRouter.PathPrefix(httputil.PublicMediaPathPrefix).Handler(routers.Media)
p2pRouter := pSessions.Protocol("matrix").HTTP().Mux()
p2pRouter.Handle(httputil.PublicFederationPathPrefix, routers.Federation)
p2pRouter.Handle(httputil.PublicMediaPathPrefix, routers.Media)
// Expose the matrix APIs via fetch - for local traffic
go func() {
logrus.Info("Listening for service-worker fetch traffic")
s := JSServer{
Mux: httpRouter,
}
s.ListenAndServe("fetch")
}()
}

View file

@ -1,111 +0,0 @@
# This dockerfile will build dendritejs and hook it up to riot-web, build that then dump the
# resulting HTML/JS onto an nginx container for hosting. It requires no specific build context
# as it pulls archives straight from github branches.
#
# $ docker build -t dendritejs -f DendriteJS.Dockerfile .
# $ docker run --rm -p 8888:80 dendritejs
# Then visit http://localhost:8888
FROM golang:1.14-alpine AS gobuild
# Download and build dendrite
WORKDIR /build
ADD https://github.com/matrix-org/dendrite/archive/main.tar.gz /build/main.tar.gz
RUN tar xvfz main.tar.gz
WORKDIR /build/dendrite-main
RUN GOOS=js GOARCH=wasm go build -o main.wasm ./cmd/dendritejs
FROM node:14-stretch AS jsbuild
# apparently some deps require python
RUN apt-get update && apt-get -y install python
# Download riot-web and libp2p repos
WORKDIR /build
ADD https://github.com/matrix-org/go-http-js-libp2p/archive/main.tar.gz /build/libp2p.tar.gz
RUN tar xvfz libp2p.tar.gz
ADD https://github.com/vector-im/element-web/archive/matthew/p2p.tar.gz /build/p2p.tar.gz
RUN tar xvfz p2p.tar.gz
# Install deps for element-web, symlink in libp2p repo and build that too
WORKDIR /build/element-web-matthew-p2p
RUN yarn install
RUN ln -s /build/go-http-js-libp2p-master /build/element-web-matthew-p2p/node_modules/go-http-js-libp2p
RUN (cd node_modules/go-http-js-libp2p && yarn install)
COPY --from=gobuild /build/dendrite-main/main.wasm ./src/vector/dendrite.wasm
# build it all
RUN yarn build:p2p
SHELL ["/bin/bash", "-c"]
RUN echo $'\
{ \n\
"default_server_config": { \n\
"m.homeserver": { \n\
"base_url": "https://p2p.riot.im", \n\
"server_name": "p2p.riot.im" \n\
}, \n\
"m.identity_server": { \n\
"base_url": "https://vector.im" \n\
} \n\
}, \n\
"disable_custom_urls": false, \n\
"disable_guests": true, \n\
"disable_login_language_selector": false, \n\
"disable_3pid_login": true, \n\
"brand": "Riot", \n\
"integrations_ui_url": "https://scalar.vector.im/", \n\
"integrations_rest_url": "https://scalar.vector.im/api", \n\
"integrations_widgets_urls": [ \n\
"https://scalar.vector.im/_matrix/integrations/v1", \n\
"https://scalar.vector.im/api", \n\
"https://scalar-staging.vector.im/_matrix/integrations/v1", \n\
"https://scalar-staging.vector.im/api", \n\
"https://scalar-staging.riot.im/scalar/api" \n\
], \n\
"integrations_jitsi_widget_url": "https://scalar.vector.im/api/widgets/jitsi.html", \n\
"bug_report_endpoint_url": "https://riot.im/bugreports/submit", \n\
"defaultCountryCode": "GB", \n\
"showLabsSettings": false, \n\
"features": { \n\
"feature_pinning": "labs", \n\
"feature_custom_status": "labs", \n\
"feature_custom_tags": "labs", \n\
"feature_state_counters": "labs" \n\
}, \n\
"default_federate": true, \n\
"default_theme": "light", \n\
"roomDirectory": { \n\
"servers": [ \n\
"matrix.org" \n\
] \n\
}, \n\
"welcomeUserId": "", \n\
"piwik": { \n\
"url": "https://piwik.riot.im/", \n\
"whitelistedHSUrls": ["https://matrix.org"], \n\
"whitelistedISUrls": ["https://vector.im", "https://matrix.org"], \n\
"siteId": 1 \n\
}, \n\
"enable_presence_by_hs_url": { \n\
"https://matrix.org": false, \n\
"https://matrix-client.matrix.org": false \n\
}, \n\
"settingDefaults": { \n\
"breadcrumbs": true \n\
} \n\
}' > webapp/config.json
FROM nginx
# Add "Service-Worker-Allowed: /" header so the worker can sniff traffic on this domain rather
# than just the path this gets hosted under. NB this newline echo syntax only works on bash.
SHELL ["/bin/bash", "-c"]
RUN echo $'\
server { \n\
listen 80; \n\
add_header \'Service-Worker-Allowed\' \'/\'; \n\
location / { \n\
root /usr/share/nginx/html; \n\
index index.html index.htm; \n\
} \n\
}' > /etc/nginx/conf.d/default.conf
RUN sed -i 's/}/ application\/wasm wasm;\n}/g' /etc/nginx/mime.types
COPY --from=jsbuild /build/element-web-matthew-p2p/webapp /usr/share/nginx/html

View file

@ -1,32 +0,0 @@
# Pinned to alpine3.18 until https://github.com/mattn/go-sqlite3/issues/1164 is solved
FROM docker.io/golang:1.21-alpine3.18 AS base
#
# Needs to be separate from the main Dockerfile for OpenShift,
# as --target is not supported there.
#
RUN apk --update --no-cache add bash build-base
WORKDIR /build
COPY . /build
RUN mkdir -p bin
RUN go build -trimpath -o bin/ ./cmd/dendrite-demo-pinecone
RUN go build -trimpath -o bin/ ./cmd/create-account
RUN go build -trimpath -o bin/ ./cmd/generate-keys
FROM alpine:latest
RUN apk --update --no-cache add curl
LABEL org.opencontainers.image.title="Dendrite (Pinecone demo)"
LABEL org.opencontainers.image.description="Next-generation Matrix homeserver written in Go"
LABEL org.opencontainers.image.source="https://github.com/matrix-org/dendrite"
LABEL org.opencontainers.image.licenses="Apache-2.0"
COPY --from=base /build/bin/* /usr/bin/
VOLUME /etc/dendrite
WORKDIR /etc/dendrite
ENTRYPOINT ["/usr/bin/dendrite-demo-pinecone"]

View file

@ -1,31 +0,0 @@
# Pinned to alpine3.18 until https://github.com/mattn/go-sqlite3/issues/1164 is solved
FROM docker.io/golang:1.21-alpine3.18 AS base
#
# Needs to be separate from the main Dockerfile for OpenShift,
# as --target is not supported there.
#
RUN apk --update --no-cache add bash build-base
WORKDIR /build
COPY . /build
RUN mkdir -p bin
RUN go build -trimpath -o bin/ ./cmd/dendrite-demo-yggdrasil
RUN go build -trimpath -o bin/ ./cmd/create-account
RUN go build -trimpath -o bin/ ./cmd/generate-keys
FROM alpine:latest
LABEL org.opencontainers.image.title="Dendrite (Yggdrasil demo)"
LABEL org.opencontainers.image.description="Next-generation Matrix homeserver written in Go"
LABEL org.opencontainers.image.source="https://github.com/matrix-org/dendrite"
LABEL org.opencontainers.image.licenses="Apache-2.0"
COPY --from=base /build/bin/* /usr/bin/
VOLUME /etc/dendrite
WORKDIR /etc/dendrite
ENTRYPOINT ["/usr/bin/dendrite-demo-yggdrasil"]

View file

@ -1,67 +0,0 @@
# Docker images
These are Docker images for Dendrite!
They can be found on Docker Hub:
- [matrixdotorg/dendrite-monolith](https://hub.docker.com/r/matrixdotorg/dendrite-monolith) for monolith deployments
## Dockerfile
The `Dockerfile` is a multistage file which can build Dendrite. From the root of the Dendrite
repository, run:
```
docker build . -t matrixdotorg/dendrite-monolith
```
## Compose file
There is one sample `docker-compose` files:
- `docker-compose.yml` which runs a Dendrite deployment with Postgres
## Configuration
The `docker-compose` files refer to the `/etc/dendrite` volume as where the
runtime config should come from. The mounted folder must contain:
- `dendrite.yaml` configuration file (based on one of the sample config files)
- `matrix_key.pem` server key, as generated using `cmd/generate-keys`
- `server.crt` certificate file
- `server.key` private key file for the above certificate
To generate keys:
```
docker run --rm --entrypoint="" \
-v $(pwd):/mnt \
matrixdotorg/dendrite-monolith:latest \
/usr/bin/generate-keys \
-private-key /mnt/matrix_key.pem \
-tls-cert /mnt/server.crt \
-tls-key /mnt/server.key
```
The key files will now exist in your current working directory, and can be mounted into place.
## Starting Dendrite
Create your config based on the [`dendrite-sample.yaml`](https://github.com/matrix-org/dendrite/blob/main/dendrite-sample.yaml) sample configuration file.
Then start the deployment:
```
docker-compose -f docker-compose.yml up
```
## Building the images
The `build/docker/images-build.sh` script will build the base image, followed by
all of the component images.
The `build/docker/images-push.sh` script will push them to Docker Hub (subject
to permissions).
If you wish to build and push your own images, rename `matrixdotorg/dendrite` to
the name of another Docker Hub repository in `images-build.sh` and `images-push.sh`.

View file

@ -1,52 +0,0 @@
version: "3.4"
services:
postgres:
hostname: postgres
image: postgres:15-alpine
restart: always
volumes:
# This will create a docker volume to persist the database files in.
# If you prefer those files to be outside of docker, you'll need to change this.
- dendrite_postgres_data:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: itsasecret
POSTGRES_USER: dendrite
POSTGRES_DATABASE: dendrite
healthcheck:
test: ["CMD-SHELL", "pg_isready -U dendrite"]
interval: 5s
timeout: 5s
retries: 5
networks:
- internal
monolith:
hostname: monolith
image: matrixdotorg/dendrite-monolith:latest
ports:
- 8008:8008
- 8448:8448
volumes:
- ./config:/etc/dendrite
# The following volumes use docker volumes, change this
# if you prefer to have those files outside of docker.
- dendrite_media:/var/dendrite/media
- dendrite_jetstream:/var/dendrite/jetstream
- dendrite_search_index:/var/dendrite/searchindex
depends_on:
postgres:
condition: service_healthy
networks:
- internal
restart: unless-stopped
networks:
internal:
attachable: true
volumes:
dendrite_postgres_data:
dendrite_media:
dendrite_jetstream:
dendrite_search_index:

View file

@ -1,11 +0,0 @@
#!/usr/bin/env bash
cd $(git rev-parse --show-toplevel)
TAG=${1:-latest}
echo "Building tag '${TAG}'"
docker build . --target monolith -t matrixdotorg/dendrite-monolith:${TAG}
docker build . --target demo-pinecone -t matrixdotorg/dendrite-demo-pinecone:${TAG}
docker build . --target demo-yggdrasil -t matrixdotorg/dendrite-demo-yggdrasil:${TAG}

View file

@ -1,7 +0,0 @@
#!/usr/bin/env bash
TAG=${1:-latest}
echo "Pulling tag '${TAG}'"
docker pull matrixdotorg/dendrite-monolith:${TAG}

View file

@ -1,7 +0,0 @@
#!/usr/bin/env bash
TAG=${1:-latest}
echo "Pushing tag '${TAG}'"
docker push matrixdotorg/dendrite-monolith:${TAG}

View file

@ -1,13 +0,0 @@
#!/bin/sh
TARGET=""
while getopts "ai" option
do
case "$option"
in
a) gomobile bind -v -target android -trimpath -ldflags="-s -w" github.com/matrix-org/dendrite/build/gobind-pinecone ;;
i) gomobile bind -v -target ios -trimpath -ldflags="" -o ~/DendriteBindings/Gobind.xcframework . ;;
*) echo "No target specified, specify -a or -i"; exit 1 ;;
esac
done

View file

@ -1,366 +0,0 @@
// Copyright 2022 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gobind
import (
"context"
"crypto/ed25519"
"crypto/rand"
"encoding/hex"
"fmt"
"net"
"path/filepath"
"strings"
"github.com/matrix-org/dendrite/clientapi/userutil"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/conduit"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/monolith"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-pinecone/relay"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-yggdrasil/signing"
"github.com/matrix-org/dendrite/federationapi/api"
"github.com/matrix-org/dendrite/internal/httputil"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/setup/process"
userapiAPI "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/pinecone/types"
"github.com/sirupsen/logrus"
pineconeMulticast "github.com/matrix-org/pinecone/multicast"
pineconeRouter "github.com/matrix-org/pinecone/router"
_ "golang.org/x/mobile/bind"
)
const (
PeerTypeRemote = pineconeRouter.PeerTypeRemote
PeerTypeMulticast = pineconeRouter.PeerTypeMulticast
PeerTypeBluetooth = pineconeRouter.PeerTypeBluetooth
PeerTypeBonjour = pineconeRouter.PeerTypeBonjour
MaxFrameSize = types.MaxFrameSize
)
// Re-export Conduit in this package for bindings.
type Conduit struct {
conduit.Conduit
}
type DendriteMonolith struct {
logger logrus.Logger
p2pMonolith monolith.P2PMonolith
StorageDirectory string
CacheDirectory string
listener net.Listener
}
func (m *DendriteMonolith) PublicKey() string {
return m.p2pMonolith.Router.PublicKey().String()
}
func (m *DendriteMonolith) BaseURL() string {
return fmt.Sprintf("http://%s", m.p2pMonolith.Addr())
}
func (m *DendriteMonolith) PeerCount(peertype int) int {
return m.p2pMonolith.Router.PeerCount(peertype)
}
func (m *DendriteMonolith) SessionCount() int {
return len(m.p2pMonolith.Sessions.Protocol(monolith.SessionProtocol).Sessions())
}
type InterfaceInfo struct {
Name string
Index int
Mtu int
Up bool
Broadcast bool
Loopback bool
PointToPoint bool
Multicast bool
Addrs string
}
type InterfaceRetriever interface {
CacheCurrentInterfaces() int
GetCachedInterface(index int) *InterfaceInfo
}
func (m *DendriteMonolith) RegisterNetworkCallback(intfCallback InterfaceRetriever) {
callback := func() []pineconeMulticast.InterfaceInfo {
count := intfCallback.CacheCurrentInterfaces()
intfs := []pineconeMulticast.InterfaceInfo{}
for i := 0; i < count; i++ {
iface := intfCallback.GetCachedInterface(i)
if iface != nil {
intfs = append(intfs, pineconeMulticast.InterfaceInfo{
Name: iface.Name,
Index: iface.Index,
Mtu: iface.Mtu,
Up: iface.Up,
Broadcast: iface.Broadcast,
Loopback: iface.Loopback,
PointToPoint: iface.PointToPoint,
Multicast: iface.Multicast,
Addrs: iface.Addrs,
})
}
}
return intfs
}
m.p2pMonolith.Multicast.RegisterNetworkCallback(callback)
}
func (m *DendriteMonolith) SetMulticastEnabled(enabled bool) {
if enabled {
m.p2pMonolith.Multicast.Start()
} else {
m.p2pMonolith.Multicast.Stop()
m.DisconnectType(int(pineconeRouter.PeerTypeMulticast))
}
}
func (m *DendriteMonolith) SetStaticPeer(uri string) {
m.p2pMonolith.ConnManager.RemovePeers()
for _, uri := range strings.Split(uri, ",") {
m.p2pMonolith.ConnManager.AddPeer(strings.TrimSpace(uri))
}
}
func getServerKeyFromString(nodeID string) (spec.ServerName, error) {
var nodeKey spec.ServerName
if userID, err := spec.NewUserID(nodeID, false); err == nil {
hexKey, decodeErr := hex.DecodeString(string(userID.Domain()))
if decodeErr != nil || len(hexKey) != ed25519.PublicKeySize {
return "", fmt.Errorf("UserID domain is not a valid ed25519 public key: %v", userID.Domain())
} else {
nodeKey = userID.Domain()
}
} else {
hexKey, decodeErr := hex.DecodeString(nodeID)
if decodeErr != nil || len(hexKey) != ed25519.PublicKeySize {
return "", fmt.Errorf("Relay server uri is not a valid ed25519 public key: %v", nodeID)
} else {
nodeKey = spec.ServerName(nodeID)
}
}
return nodeKey, nil
}
func (m *DendriteMonolith) SetRelayServers(nodeID string, uris string) {
relays := []spec.ServerName{}
for _, uri := range strings.Split(uris, ",") {
uri = strings.TrimSpace(uri)
if len(uri) == 0 {
continue
}
nodeKey, err := getServerKeyFromString(uri)
if err != nil {
logrus.Errorf(err.Error())
continue
}
relays = append(relays, nodeKey)
}
nodeKey, err := getServerKeyFromString(nodeID)
if err != nil {
logrus.Errorf(err.Error())
return
}
if string(nodeKey) == m.PublicKey() {
logrus.Infof("Setting own relay servers to: %v", relays)
m.p2pMonolith.RelayRetriever.SetRelayServers(relays)
} else {
relay.UpdateNodeRelayServers(
spec.ServerName(nodeKey),
relays,
m.p2pMonolith.ProcessCtx.Context(),
m.p2pMonolith.GetFederationAPI(),
)
}
}
func (m *DendriteMonolith) GetRelayServers(nodeID string) string {
nodeKey, err := getServerKeyFromString(nodeID)
if err != nil {
logrus.Errorf(err.Error())
return ""
}
relaysString := ""
if string(nodeKey) == m.PublicKey() {
relays := m.p2pMonolith.RelayRetriever.GetRelayServers()
for i, relay := range relays {
if i != 0 {
// Append a comma to the previous entry if there is one.
relaysString += ","
}
relaysString += string(relay)
}
} else {
request := api.P2PQueryRelayServersRequest{Server: spec.ServerName(nodeKey)}
response := api.P2PQueryRelayServersResponse{}
err := m.p2pMonolith.GetFederationAPI().P2PQueryRelayServers(m.p2pMonolith.ProcessCtx.Context(), &request, &response)
if err != nil {
logrus.Warnf("Failed obtaining list of this node's relay servers: %s", err.Error())
return ""
}
for i, relay := range response.RelayServers {
if i != 0 {
// Append a comma to the previous entry if there is one.
relaysString += ","
}
relaysString += string(relay)
}
}
return relaysString
}
func (m *DendriteMonolith) RelayingEnabled() bool {
return m.p2pMonolith.GetRelayAPI().RelayingEnabled()
}
func (m *DendriteMonolith) SetRelayingEnabled(enabled bool) {
m.p2pMonolith.GetRelayAPI().SetRelayingEnabled(enabled)
}
func (m *DendriteMonolith) DisconnectType(peertype int) {
for _, p := range m.p2pMonolith.Router.Peers() {
if int(peertype) == p.PeerType {
m.p2pMonolith.Router.Disconnect(types.SwitchPortID(p.Port), nil)
}
}
}
func (m *DendriteMonolith) DisconnectZone(zone string) {
for _, p := range m.p2pMonolith.Router.Peers() {
if zone == p.Zone {
m.p2pMonolith.Router.Disconnect(types.SwitchPortID(p.Port), nil)
}
}
}
func (m *DendriteMonolith) DisconnectPort(port int) {
m.p2pMonolith.Router.Disconnect(types.SwitchPortID(port), nil)
}
func (m *DendriteMonolith) Conduit(zone string, peertype int) (*Conduit, error) {
l, r := net.Pipe()
newConduit := Conduit{conduit.NewConduit(r, 0)}
go func() {
logrus.Errorf("Attempting authenticated connect")
var port types.SwitchPortID
var err error
if port, err = m.p2pMonolith.Router.Connect(
l,
pineconeRouter.ConnectionZone(zone),
pineconeRouter.ConnectionPeerType(peertype),
); err != nil {
logrus.Errorf("Authenticated connect failed: %s", err)
_ = l.Close()
_ = r.Close()
_ = newConduit.Close()
return
}
newConduit.SetPort(port)
logrus.Infof("Authenticated connect succeeded (port %d)", newConduit.Port())
}()
return &newConduit, nil
}
func (m *DendriteMonolith) RegisterUser(localpart, password string) (string, error) {
pubkey := m.p2pMonolith.Router.PublicKey()
userID := userutil.MakeUserID(
localpart,
spec.ServerName(hex.EncodeToString(pubkey[:])),
)
userReq := &userapiAPI.PerformAccountCreationRequest{
AccountType: userapiAPI.AccountTypeUser,
Localpart: localpart,
Password: password,
}
userRes := &userapiAPI.PerformAccountCreationResponse{}
if err := m.p2pMonolith.GetUserAPI().PerformAccountCreation(context.Background(), userReq, userRes); err != nil {
return userID, fmt.Errorf("userAPI.PerformAccountCreation: %w", err)
}
return userID, nil
}
func (m *DendriteMonolith) RegisterDevice(localpart, deviceID string) (string, error) {
accessTokenBytes := make([]byte, 16)
n, err := rand.Read(accessTokenBytes)
if err != nil {
return "", fmt.Errorf("rand.Read: %w", err)
}
loginReq := &userapiAPI.PerformDeviceCreationRequest{
Localpart: localpart,
DeviceID: &deviceID,
AccessToken: hex.EncodeToString(accessTokenBytes[:n]),
}
loginRes := &userapiAPI.PerformDeviceCreationResponse{}
if err := m.p2pMonolith.GetUserAPI().PerformDeviceCreation(context.Background(), loginReq, loginRes); err != nil {
return "", fmt.Errorf("userAPI.PerformDeviceCreation: %w", err)
}
if !loginRes.DeviceCreated {
return "", fmt.Errorf("device was not created")
}
return loginRes.Device.AccessToken, nil
}
func (m *DendriteMonolith) Start() {
keyfile := filepath.Join(m.StorageDirectory, "p2p.pem")
oldKeyfile := filepath.Join(m.StorageDirectory, "p2p.key")
sk, pk := monolith.GetOrCreateKey(keyfile, oldKeyfile)
m.logger = logrus.Logger{
Out: BindLogger{},
}
m.logger.SetOutput(BindLogger{})
logrus.SetOutput(BindLogger{})
m.p2pMonolith = monolith.P2PMonolith{}
m.p2pMonolith.SetupPinecone(sk)
prefix := hex.EncodeToString(pk)
cfg := monolith.GenerateDefaultConfig(sk, m.StorageDirectory, m.CacheDirectory, prefix)
cfg.Global.ServerName = spec.ServerName(hex.EncodeToString(pk))
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
cfg.Global.JetStream.InMemory = false
// NOTE : disabled for now since there is a 64 bit alignment panic on 32 bit systems
// This isn't actually fixed: https://github.com/blevesearch/zapx/pull/147
cfg.SyncAPI.Fulltext.Enabled = false
processCtx := process.NewProcessContext()
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
routers := httputil.NewRouters()
enableRelaying := false
enableMetrics := false
enableWebsockets := false
m.p2pMonolith.SetupDendrite(processCtx, cfg, cm, routers, 65432, enableRelaying, enableMetrics, enableWebsockets)
m.p2pMonolith.StartMonolith()
}
func (m *DendriteMonolith) Stop() {
m.p2pMonolith.Stop()
}

View file

@ -1,158 +0,0 @@
// Copyright 2022 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gobind
import (
"strings"
"testing"
"github.com/matrix-org/gomatrixserverlib/spec"
)
func TestMonolithStarts(t *testing.T) {
monolith := DendriteMonolith{
StorageDirectory: t.TempDir(),
CacheDirectory: t.TempDir(),
}
monolith.Start()
monolith.PublicKey()
monolith.Stop()
}
func TestMonolithSetRelayServers(t *testing.T) {
testCases := []struct {
name string
nodeID string
relays string
expectedRelays string
expectSelf bool
}{
{
name: "assorted valid, invalid, empty & self keys",
nodeID: "@valid:abcdef123456abcdef123456abcdef123456abcdef123456abcdef123456abcd",
relays: "@valid:123456123456abcdef123456abcdef123456abcdef123456abcdef123456abcd,@invalid:notakey,,",
expectedRelays: "123456123456abcdef123456abcdef123456abcdef123456abcdef123456abcd",
expectSelf: true,
},
{
name: "invalid node key",
nodeID: "@invalid:notakey",
relays: "@valid:123456123456abcdef123456abcdef123456abcdef123456abcdef123456abcd,@invalid:notakey,,",
expectedRelays: "",
expectSelf: false,
},
{
name: "node is self",
nodeID: "self",
relays: "@valid:123456123456abcdef123456abcdef123456abcdef123456abcdef123456abcd,@invalid:notakey,,",
expectedRelays: "123456123456abcdef123456abcdef123456abcdef123456abcdef123456abcd",
expectSelf: false,
},
}
for _, tc := range testCases {
monolith := DendriteMonolith{
StorageDirectory: t.TempDir(),
CacheDirectory: t.TempDir(),
}
monolith.Start()
inputRelays := tc.relays
expectedRelays := tc.expectedRelays
if tc.expectSelf {
inputRelays += "," + monolith.PublicKey()
expectedRelays += "," + monolith.PublicKey()
}
nodeID := tc.nodeID
if nodeID == "self" {
nodeID = monolith.PublicKey()
}
monolith.SetRelayServers(nodeID, inputRelays)
relays := monolith.GetRelayServers(nodeID)
monolith.Stop()
if !containSameKeys(strings.Split(relays, ","), strings.Split(expectedRelays, ",")) {
t.Fatalf("%s: expected %s got %s", tc.name, expectedRelays, relays)
}
}
}
func containSameKeys(expected []string, actual []string) bool {
if len(expected) != len(actual) {
return false
}
for _, expectedKey := range expected {
hasMatch := false
for _, actualKey := range actual {
if actualKey == expectedKey {
hasMatch = true
}
}
if !hasMatch {
return false
}
}
return true
}
func TestParseServerKey(t *testing.T) {
testCases := []struct {
name string
serverKey string
expectedErr bool
expectedKey spec.ServerName
}{
{
name: "valid userid as key",
serverKey: "@valid:abcdef123456abcdef123456abcdef123456abcdef123456abcdef123456abcd",
expectedErr: false,
expectedKey: "abcdef123456abcdef123456abcdef123456abcdef123456abcdef123456abcd",
},
{
name: "valid key",
serverKey: "abcdef123456abcdef123456abcdef123456abcdef123456abcdef123456abcd",
expectedErr: false,
expectedKey: "abcdef123456abcdef123456abcdef123456abcdef123456abcdef123456abcd",
},
{
name: "invalid userid key",
serverKey: "@invalid:notakey",
expectedErr: true,
expectedKey: "",
},
{
name: "invalid key",
serverKey: "@invalid:notakey",
expectedErr: true,
expectedKey: "",
},
}
for _, tc := range testCases {
key, err := getServerKeyFromString(tc.serverKey)
if tc.expectedErr && err == nil {
t.Fatalf("%s: expected an error", tc.name)
} else if !tc.expectedErr && err != nil {
t.Fatalf("%s: didn't expect an error: %s", tc.name, err.Error())
}
if tc.expectedKey != key {
t.Fatalf("%s: keys not equal. expected: %s got: %s", tc.name, tc.expectedKey, key)
}
}
}

View file

@ -1,27 +0,0 @@
// Copyright 2022 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !ios
// +build !ios
package gobind
import "log"
type BindLogger struct{}
func (nsl BindLogger) Write(p []byte) (n int, err error) {
log.Println(string(p))
return len(p), nil
}

View file

@ -1,25 +0,0 @@
#!/bin/sh
#!/bin/sh
TARGET=""
while getopts "ai" option
do
case "$option"
in
a) TARGET="android";;
i) TARGET="ios";;
esac
done
if [[ $TARGET = "" ]];
then
echo "No target specified, specify -a or -i"
exit 1
fi
gomobile bind -v \
-target $TARGET \
-ldflags "-X github.com/yggdrasil-network/yggdrasil-go/src/version.buildName=dendrite" \
github.com/matrix-org/dendrite/build/gobind-pinecone

View file

@ -1,293 +0,0 @@
package gobind
import (
"context"
"crypto/ed25519"
"crypto/tls"
"encoding/hex"
"fmt"
"net"
"net/http"
"os"
"path/filepath"
"time"
"github.com/getsentry/sentry-go"
"github.com/gorilla/mux"
"github.com/matrix-org/dendrite/appservice"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-yggdrasil/signing"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-yggdrasil/yggconn"
"github.com/matrix-org/dendrite/cmd/dendrite-demo-yggdrasil/yggrooms"
"github.com/matrix-org/dendrite/federationapi"
"github.com/matrix-org/dendrite/federationapi/api"
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/internal/caching"
"github.com/matrix-org/dendrite/internal/httputil"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/roomserver"
"github.com/matrix-org/dendrite/setup"
basepkg "github.com/matrix-org/dendrite/setup/base"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/dendrite/test"
"github.com/matrix-org/dendrite/userapi"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/sirupsen/logrus"
_ "golang.org/x/mobile/bind"
)
type DendriteMonolith struct {
logger logrus.Logger
YggdrasilNode *yggconn.Node
StorageDirectory string
listener net.Listener
httpServer *http.Server
processContext *process.ProcessContext
}
func (m *DendriteMonolith) BaseURL() string {
return fmt.Sprintf("http://%s", m.listener.Addr().String())
}
func (m *DendriteMonolith) PeerCount() int {
return m.YggdrasilNode.PeerCount()
}
func (m *DendriteMonolith) SetMulticastEnabled(enabled bool) {
m.YggdrasilNode.SetMulticastEnabled(enabled)
}
func (m *DendriteMonolith) SetStaticPeer(uri string) error {
return m.YggdrasilNode.SetStaticPeer(uri)
}
func (m *DendriteMonolith) DisconnectNonMulticastPeers() {
m.YggdrasilNode.DisconnectNonMulticastPeers()
}
func (m *DendriteMonolith) DisconnectMulticastPeers() {
m.YggdrasilNode.DisconnectMulticastPeers()
}
func (m *DendriteMonolith) Start() {
var pk ed25519.PublicKey
var sk ed25519.PrivateKey
m.logger = logrus.Logger{
Out: BindLogger{},
}
m.logger.SetOutput(BindLogger{})
logrus.SetOutput(BindLogger{})
keyfile := filepath.Join(m.StorageDirectory, "p2p.pem")
if _, err := os.Stat(keyfile); os.IsNotExist(err) {
oldkeyfile := filepath.Join(m.StorageDirectory, "p2p.key")
if _, err = os.Stat(oldkeyfile); os.IsNotExist(err) {
if err = test.NewMatrixKey(keyfile); err != nil {
panic("failed to generate a new PEM key: " + err.Error())
}
if _, sk, err = config.LoadMatrixKey(keyfile, os.ReadFile); err != nil {
panic("failed to load PEM key: " + err.Error())
}
if len(sk) != ed25519.PrivateKeySize {
panic("the private key is not long enough")
}
} else {
if sk, err = os.ReadFile(oldkeyfile); err != nil {
panic("failed to read the old private key: " + err.Error())
}
if len(sk) != ed25519.PrivateKeySize {
panic("the private key is not long enough")
}
if err := test.SaveMatrixKey(keyfile, sk); err != nil {
panic("failed to convert the private key to PEM format: " + err.Error())
}
}
} else {
var err error
if _, sk, err = config.LoadMatrixKey(keyfile, os.ReadFile); err != nil {
panic("failed to load PEM key: " + err.Error())
}
if len(sk) != ed25519.PrivateKeySize {
panic("the private key is not long enough")
}
}
pk = sk.Public().(ed25519.PublicKey)
var err error
m.listener, err = net.Listen("tcp", "localhost:65432")
if err != nil {
panic(err)
}
ygg, err := yggconn.Setup(sk, "dendrite", m.StorageDirectory, "", "")
if err != nil {
panic(err)
}
m.YggdrasilNode = ygg
cfg := &config.Dendrite{}
cfg.Defaults(config.DefaultOpts{
Generate: true,
SingleDatabase: true,
})
cfg.Global.ServerName = spec.ServerName(hex.EncodeToString(pk))
cfg.Global.PrivateKey = sk
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", m.StorageDirectory))
cfg.Global.JetStream.InMemory = true
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-account.db", m.StorageDirectory))
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-mediaapi.db", m.StorageDirectory))
cfg.SyncAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-syncapi.db", m.StorageDirectory))
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-roomserver.db", m.StorageDirectory))
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-keyserver.db", m.StorageDirectory))
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-federationsender.db", m.StorageDirectory))
cfg.MediaAPI.BasePath = config.Path(fmt.Sprintf("%s/tmp", m.StorageDirectory))
cfg.MediaAPI.AbsBasePath = config.Path(fmt.Sprintf("%s/tmp", m.StorageDirectory))
cfg.ClientAPI.RegistrationDisabled = false
cfg.ClientAPI.OpenRegistrationWithoutVerificationEnabled = true
if err = cfg.Derive(); err != nil {
panic(err)
}
configErrors := &config.ConfigErrors{}
cfg.Verify(configErrors)
if len(*configErrors) > 0 {
for _, err := range *configErrors {
logrus.Errorf("Configuration error: %s", err)
}
logrus.Fatalf("Failed to start due to configuration errors")
}
internal.SetupStdLogging()
internal.SetupHookLogging(cfg.Logging)
internal.SetupPprof()
logrus.Infof("Dendrite version %s", internal.VersionString())
if !cfg.ClientAPI.RegistrationDisabled && cfg.ClientAPI.OpenRegistrationWithoutVerificationEnabled {
logrus.Warn("Open registration is enabled")
}
closer, err := cfg.SetupTracing()
if err != nil {
logrus.WithError(err).Panicf("failed to start opentracing")
}
defer closer.Close()
if cfg.Global.Sentry.Enabled {
logrus.Info("Setting up Sentry for debugging...")
err = sentry.Init(sentry.ClientOptions{
Dsn: cfg.Global.Sentry.DSN,
Environment: cfg.Global.Sentry.Environment,
Debug: true,
ServerName: string(cfg.Global.ServerName),
Release: "dendrite@" + internal.VersionString(),
AttachStacktrace: true,
})
if err != nil {
logrus.WithError(err).Panic("failed to start Sentry")
}
}
processCtx := process.NewProcessContext()
cm := sqlutil.NewConnectionManager(processCtx, cfg.Global.DatabaseOptions)
routers := httputil.NewRouters()
basepkg.ConfigureAdminEndpoints(processCtx, routers)
m.processContext = processCtx
defer func() {
processCtx.ShutdownDendrite()
processCtx.WaitForShutdown()
}() // nolint: errcheck
federation := ygg.CreateFederationClient(cfg)
serverKeyAPI := &signing.YggdrasilKeys{}
keyRing := serverKeyAPI.KeyRing()
caches := caching.NewRistrettoCache(cfg.Global.Cache.EstimatedMaxSize, cfg.Global.Cache.MaxAge, caching.EnableMetrics)
natsInstance := jetstream.NATSInstance{}
rsAPI := roomserver.NewInternalAPI(processCtx, cfg, cm, &natsInstance, caches, caching.EnableMetrics)
fsAPI := federationapi.NewInternalAPI(
processCtx, cfg, cm, &natsInstance, federation, rsAPI, caches, keyRing, true,
)
userAPI := userapi.NewInternalAPI(processCtx, cfg, cm, &natsInstance, rsAPI, federation, caching.EnableMetrics, fsAPI.IsBlacklistedOrBackingOff)
asAPI := appservice.NewInternalAPI(processCtx, cfg, &natsInstance, userAPI, rsAPI)
rsAPI.SetAppserviceAPI(asAPI)
// The underlying roomserver implementation needs to be able to call the fedsender.
// This is different to rsAPI which can be the http client which doesn't need this dependency
rsAPI.SetFederationAPI(fsAPI, keyRing)
monolith := setup.Monolith{
Config: cfg,
Client: ygg.CreateClient(),
FedClient: federation,
KeyRing: keyRing,
AppserviceAPI: asAPI,
FederationAPI: fsAPI,
RoomserverAPI: rsAPI,
UserAPI: userAPI,
ExtPublicRoomsProvider: yggrooms.NewYggdrasilRoomProvider(
ygg, fsAPI, federation,
),
}
monolith.AddAllPublicRoutes(processCtx, cfg, routers, cm, &natsInstance, caches, caching.EnableMetrics)
httpRouter := mux.NewRouter()
httpRouter.PathPrefix(httputil.PublicClientPathPrefix).Handler(routers.Client)
httpRouter.PathPrefix(httputil.PublicMediaPathPrefix).Handler(routers.Media)
httpRouter.PathPrefix(httputil.DendriteAdminPathPrefix).Handler(routers.DendriteAdmin)
httpRouter.PathPrefix(httputil.SynapseAdminPathPrefix).Handler(routers.SynapseAdmin)
yggRouter := mux.NewRouter()
yggRouter.PathPrefix(httputil.PublicFederationPathPrefix).Handler(routers.Federation)
yggRouter.PathPrefix(httputil.PublicMediaPathPrefix).Handler(routers.Media)
// Build both ends of a HTTP multiplex.
m.httpServer = &http.Server{
Addr: ":0",
TLSNextProto: map[string]func(*http.Server, *tls.Conn, http.Handler){},
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 30 * time.Second,
BaseContext: func(_ net.Listener) context.Context {
return context.Background()
},
Handler: yggRouter,
}
go func() {
m.logger.Info("Listening on ", ygg.DerivedServerName())
m.logger.Error(m.httpServer.Serve(ygg))
}()
go func() {
logrus.Info("Listening on ", m.listener.Addr())
logrus.Error(http.Serve(m.listener, httpRouter))
}()
go func() {
logrus.Info("Sending wake-up message to known nodes")
req := &api.PerformBroadcastEDURequest{}
res := &api.PerformBroadcastEDUResponse{}
if err := fsAPI.PerformBroadcastEDU(context.TODO(), req, res); err != nil {
logrus.WithError(err).Error("Failed to send wake-up message to known nodes")
}
}()
}
func (m *DendriteMonolith) Stop() {
if err := m.httpServer.Close(); err != nil {
m.logger.Warn("Error stopping HTTP server:", err)
}
if m.processContext != nil {
m.processContext.ShutdownDendrite()
m.processContext.WaitForComponentsToFinish()
}
}

View file

@ -1,26 +0,0 @@
//go:build ios
// +build ios
package gobind
/*
#cgo CFLAGS: -x objective-c
#cgo LDFLAGS: -framework Foundation
#import <Foundation/Foundation.h>
void Log(const char *text) {
NSString *nss = [NSString stringWithUTF8String:text];
NSLog(@"%@", nss);
}
*/
import "C"
import "unsafe"
type BindLogger struct {
}
func (nsl BindLogger) Write(p []byte) (n int, err error) {
p = append(p, 0)
cstr := (*C.char)(unsafe.Pointer(&p[0]))
C.Log(cstr)
return len(p), nil
}

View file

@ -1,13 +0,0 @@
//go:build !ios
// +build !ios
package gobind
import "log"
type BindLogger struct{}
func (nsl BindLogger) Write(p []byte) (n int, err error) {
log.Println(string(p))
return len(p), nil
}

View file

@ -1,36 +0,0 @@
#syntax=docker/dockerfile:1.2
FROM golang:1.20-bullseye as build
RUN apt-get update && apt-get install -y sqlite3
WORKDIR /build
# we will dump the binaries and config file to this location to ensure any local untracked files
# that come from the COPY . . file don't contaminate the build
RUN mkdir /dendrite
# Utilise Docker caching when downloading dependencies, this stops us needlessly
# downloading dependencies every time.
ARG CGO
RUN --mount=target=. \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=cache,target=/root/.cache/go-build \
CGO_ENABLED=${CGO} go build -o /dendrite ./cmd/generate-config && \
CGO_ENABLED=${CGO} go build -o /dendrite ./cmd/generate-keys && \
CGO_ENABLED=${CGO} go build -o /dendrite/dendrite ./cmd/dendrite && \
CGO_ENABLED=${CGO} go build -cover -covermode=atomic -o /dendrite/dendrite-cover -coverpkg "github.com/matrix-org/..." ./cmd/dendrite && \
cp build/scripts/complement-cmd.sh /complement-cmd.sh
WORKDIR /dendrite
RUN ./generate-keys --private-key matrix_key.pem
ENV SERVER_NAME=localhost
ENV API=0
ENV COVER=0
EXPOSE 8008 8448
# At runtime, generate TLS cert based on the CA now mounted at /ca
# At runtime, replace the SERVER_NAME with what we are told
CMD ./generate-keys -keysize 1024 --server $SERVER_NAME --tls-cert server.crt --tls-key server.key --tls-authority-cert /complement/ca/ca.crt --tls-authority-key /complement/ca/ca.key && \
./generate-config -server $SERVER_NAME --ci > dendrite.yaml && \
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates && \
exec /complement-cmd.sh

View file

@ -1,55 +0,0 @@
#syntax=docker/dockerfile:1.2
# A local development Complement dockerfile, to be used with host mounts
# /cache -> Contains the entire dendrite code at Dockerfile build time. Builds binaries but only keeps the generate-* ones. Pre-compilation saves time.
# /dendrite -> Host-mounted sources
# /runtime -> Binaries and config go here and are run at runtime
# At runtime, dendrite is built from /dendrite and run in /runtime.
#
# Use these mounts to make use of this dockerfile:
# COMPLEMENT_HOST_MOUNTS='/your/local/dendrite:/dendrite:ro;/your/go/path:/go:ro'
FROM golang:1.18-stretch
RUN apt-get update && apt-get install -y sqlite3
ENV SERVER_NAME=localhost
ENV COVER=0
EXPOSE 8008 8448
WORKDIR /runtime
# This script compiles Dendrite for us.
RUN echo '\
#!/bin/bash -eux \n\
if test -f "/runtime/dendrite" && test -f "/runtime/dendrite-cover"; then \n\
echo "Skipping compilation; binaries exist" \n\
exit 0 \n\
fi \n\
cd /dendrite \n\
go build -v -o /runtime /dendrite/cmd/dendrite \n\
go test -c -cover -covermode=atomic -o /runtime/dendrite-cover -coverpkg "github.com/matrix-org/..." /dendrite/cmd/dendrite \n\
' > compile.sh && chmod +x compile.sh
# This script runs Dendrite for us. Must be run in the /runtime directory.
RUN echo '\
#!/bin/bash -eu \n\
./generate-keys --private-key matrix_key.pem \n\
./generate-keys -keysize 1024 --server $SERVER_NAME --tls-cert server.crt --tls-key server.key --tls-authority-cert /complement/ca/ca.crt --tls-authority-key /complement/ca/ca.key \n\
./generate-config -server $SERVER_NAME --ci > dendrite.yaml \n\
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates \n\
[ ${COVER} -eq 1 ] && exec ./dendrite-cover --test.coverprofile=integrationcover.log --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml \n\
exec ./dendrite --really-enable-open-registration --tls-cert server.crt --tls-key server.key --config dendrite.yaml \n\
' > run.sh && chmod +x run.sh
WORKDIR /cache
# Build the monolith in /cache - we won't actually use this but will rely on build artifacts to speed
# up the real compilation. Build the generate-* binaries in the true /runtime locations.
# If the generate-* source is changed, this dockerfile needs re-running.
RUN --mount=target=. \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=cache,target=/root/.cache/go-build \
go build -o /runtime ./cmd/generate-config && \
go build -o /runtime ./cmd/generate-keys
WORKDIR /runtime
CMD /runtime/compile.sh && exec /runtime/run.sh

View file

@ -1,57 +0,0 @@
#syntax=docker/dockerfile:1.2
FROM golang:1.20-bullseye as build
RUN apt-get update && apt-get install -y postgresql
WORKDIR /build
# No password when connecting over localhost
RUN sed -i "s%127.0.0.1/32 md5%127.0.0.1/32 trust%g" /etc/postgresql/13/main/pg_hba.conf && \
# Bump up max conns for moar concurrency
sed -i 's/max_connections = 100/max_connections = 2000/g' /etc/postgresql/13/main/postgresql.conf
# This entry script starts postgres, waits for it to be up then starts dendrite
RUN echo '\
#!/bin/bash -eu \n\
pg_lsclusters \n\
pg_ctlcluster 13 main start \n\
\n\
until pg_isready \n\
do \n\
echo "Waiting for postgres"; \n\
sleep 1; \n\
done \n\
' > run_postgres.sh && chmod +x run_postgres.sh
# we will dump the binaries and config file to this location to ensure any local untracked files
# that come from the COPY . . file don't contaminate the build
RUN mkdir /dendrite
# Utilise Docker caching when downloading dependencies, this stops us needlessly
# downloading dependencies every time.
ARG CGO
RUN --mount=target=. \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=cache,target=/root/.cache/go-build \
CGO_ENABLED=${CGO} go build -o /dendrite ./cmd/generate-config && \
CGO_ENABLED=${CGO} go build -o /dendrite ./cmd/generate-keys && \
CGO_ENABLED=${CGO} go build -o /dendrite/dendrite ./cmd/dendrite && \
CGO_ENABLED=${CGO} go build -cover -covermode=atomic -o /dendrite/dendrite-cover -coverpkg "github.com/matrix-org/..." ./cmd/dendrite && \
cp build/scripts/complement-cmd.sh /complement-cmd.sh
WORKDIR /dendrite
RUN ./generate-keys --private-key matrix_key.pem
ENV SERVER_NAME=localhost
ENV API=0
ENV COVER=0
EXPOSE 8008 8448
# At runtime, generate TLS cert based on the CA now mounted at /ca
# At runtime, replace the SERVER_NAME with what we are told
CMD /build/run_postgres.sh && ./generate-keys --keysize 1024 --server $SERVER_NAME --tls-cert server.crt --tls-key server.key --tls-authority-cert /complement/ca/ca.crt --tls-authority-key /complement/ca/ca.key && \
./generate-config -server $SERVER_NAME --ci --db postgresql://postgres@localhost/postgres?sslmode=disable > dendrite.yaml && \
# Bump max_open_conns up here in the global database config
sed -i 's/max_open_conns:.*$/max_open_conns: 1990/g' dendrite.yaml && \
cp /complement/ca/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates && \
exec /complement-cmd.sh

View file

@ -1,21 +0,0 @@
#!/bin/bash -e
# This script is intended to be used inside a docker container for Complement
export GOCOVERDIR=/tmp/covdatafiles
mkdir -p "${GOCOVERDIR}"
if [[ "${COVER}" -eq 1 ]]; then
echo "Running with coverage"
exec /dendrite/dendrite-cover \
--really-enable-open-registration \
--tls-cert server.crt \
--tls-key server.key \
--config dendrite.yaml
else
echo "Not running with coverage"
exec /dendrite/dendrite \
--really-enable-open-registration \
--tls-cert server.crt \
--tls-key server.key \
--config dendrite.yaml
fi

View file

@ -1,19 +0,0 @@
#! /bin/bash -eu
# This script is designed for developers who want to test their Dendrite code
# against Complement.
#
# It makes a Dendrite image which represents the current checkout,
# then downloads Complement and runs it with that image.
# Make image
cd `dirname $0`/../..
docker build -t complement-dendrite -f build/scripts/Complement.Dockerfile .
# Download Complement
wget -N https://github.com/matrix-org/complement/archive/master.tar.gz
tar -xzf master.tar.gz
# Run the tests!
cd complement-master
COMPLEMENT_BASE_IMAGE=complement-dendrite:latest go test -v -count=1 ./tests ./tests/csapi

File diff suppressed because it is too large Load diff

View file

@ -1,31 +0,0 @@
// Copyright 2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import "github.com/matrix-org/gomatrixserverlib/fclient"
// ExtraPublicRoomsProvider provides a way to inject extra published rooms into /publicRooms requests.
type ExtraPublicRoomsProvider interface {
// Rooms returns the extra rooms. This is called on-demand by clients, so cache appropriately.
Rooms() []fclient.PublicRoom
}
type RegistrationToken struct {
Token *string `json:"token"`
UsesAllowed *int32 `json:"uses_allowed"`
Pending *int32 `json:"pending"`
Completed *int32 `json:"completed"`
ExpiryTime *int64 `json:"expiry_time"`
}

View file

@ -18,13 +18,18 @@ package auth
import (
"context"
"crypto/rand"
"database/sql"
"encoding/base64"
"fmt"
"net/http"
"strings"
"github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/dendrite/appservice/types"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
"github.com/matrix-org/dendrite/clientapi/httputil"
"github.com/matrix-org/dendrite/clientapi/jsonerror"
"github.com/matrix-org/dendrite/clientapi/userutil"
"github.com/matrix-org/dendrite/common/config"
"github.com/matrix-org/util"
)
@ -35,14 +40,21 @@ var tokenByteLength = 32
// DeviceDatabase represents a device database.
type DeviceDatabase interface {
// Look up the device matching the given access token.
GetDeviceByAccessToken(ctx context.Context, token string) (*api.Device, error)
GetDeviceByAccessToken(ctx context.Context, token string) (*authtypes.Device, error)
}
// AccountDatabase represents an account database.
type AccountDatabase interface {
// Look up the account matching the given localpart.
GetAccountByLocalpart(ctx context.Context, localpart string) (*api.Account, error)
GetAccountByPassword(ctx context.Context, localpart, password string) (*api.Account, error)
GetAccountByLocalpart(ctx context.Context, localpart string) (*authtypes.Account, error)
}
// Data contains information required to authenticate a request.
type Data struct {
AccountDB AccountDatabase
DeviceDB DeviceDatabase
// AppServices is the list of all registered AS
AppServices []config.ApplicationService
}
// VerifyUserFromRequest authenticates the HTTP request,
@ -51,43 +63,114 @@ type AccountDatabase interface {
// Note: For an AS user, AS dummy device is returned.
// On failure returns an JSON error response which can be sent to the client.
func VerifyUserFromRequest(
req *http.Request, userAPI api.QueryAcccessTokenAPI,
) (*api.Device, *util.JSONResponse) {
req *http.Request, data Data,
) (*authtypes.Device, *util.JSONResponse) {
// Try to find the Application Service user
token, err := ExtractAccessToken(req)
if err != nil {
return nil, &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: spec.MissingToken(err.Error()),
JSON: jsonerror.MissingToken(err.Error()),
}
}
var res api.QueryAccessTokenResponse
err = userAPI.QueryAccessToken(req.Context(), &api.QueryAccessTokenRequest{
AccessToken: token,
AppServiceUserID: req.URL.Query().Get("user_id"),
}, &res)
if err != nil {
util.GetLogger(req.Context()).WithError(err).Error("userAPI.QueryAccessToken failed")
return nil, &util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
// Search for app service with given access_token
var appService *config.ApplicationService
for _, as := range data.AppServices {
if as.ASToken == token {
appService = &as
break
}
}
if res.Err != "" {
if strings.HasPrefix(strings.ToLower(res.Err), "forbidden:") { // TODO: use actual error and no string comparison
if appService != nil {
// Create a dummy device for AS user
dev := authtypes.Device{
// Use AS dummy device ID
ID: types.AppServiceDeviceID,
// AS dummy device has AS's token.
AccessToken: token,
}
userID := req.URL.Query().Get("user_id")
localpart, err := userutil.ParseUsernameParam(userID, nil)
if err != nil {
return nil, &util.JSONResponse{
Code: http.StatusForbidden,
JSON: spec.Forbidden(res.Err),
Code: http.StatusBadRequest,
JSON: jsonerror.InvalidUsername(err.Error()),
}
}
if localpart != "" { // AS is masquerading as another user
// Verify that the user is registered
account, err := data.AccountDB.GetAccountByLocalpart(req.Context(), localpart)
// Verify that account exists & appServiceID matches
if err == nil && account.AppServiceID == appService.ID {
// Set the userID of dummy device
dev.UserID = userID
return &dev, nil
}
return nil, &util.JSONResponse{
Code: http.StatusForbidden,
JSON: jsonerror.Forbidden("Application service has not registered this user"),
}
}
// AS is not masquerading as any user, so use AS's sender_localpart
dev.UserID = appService.SenderLocalpart
return &dev, nil
}
if res.Device == nil {
return nil, &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: spec.UnknownToken("Unknown token"),
// Try to find local user from device database
dev, devErr := verifyAccessToken(req, data.DeviceDB)
if devErr == nil {
return dev, verifyUserParameters(req)
}
return nil, &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: jsonerror.UnknownToken("Unrecognized access token"), // nolint: misspell
}
}
// verifyUserParameters ensures that a request coming from a regular user is not
// using any query parameters reserved for an application service
func verifyUserParameters(req *http.Request) *util.JSONResponse {
if req.URL.Query().Get("ts") != "" {
return &util.JSONResponse{
Code: http.StatusBadRequest,
JSON: jsonerror.Unknown("parameter 'ts' not allowed without valid parameter 'access_token'"),
}
}
return res.Device, nil
return nil
}
// verifyAccessToken verifies that an access token was supplied in the given HTTP request
// and returns the device it corresponds to. Returns resErr (an error response which can be
// sent to the client) if the token is invalid or there was a problem querying the database.
func verifyAccessToken(req *http.Request, deviceDB DeviceDatabase) (device *authtypes.Device, resErr *util.JSONResponse) {
token, err := ExtractAccessToken(req)
if err != nil {
resErr = &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: jsonerror.MissingToken(err.Error()),
}
return
}
device, err = deviceDB.GetDeviceByAccessToken(req.Context(), token)
if err != nil {
if err == sql.ErrNoRows {
resErr = &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: jsonerror.UnknownToken("Unknown token"),
}
} else {
jsonErr := httputil.LogThenError(req, err)
resErr = &jsonErr
}
}
return
}
// GenerateAccessToken creates a new access token. Returns an error if failed to generate

View file

@ -1,4 +1,4 @@
// Copyright 2021 The Matrix.org Foundation C.I.C.
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -12,25 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package deltas
package authtypes
import (
"context"
"database/sql"
"fmt"
"github.com/matrix-org/gomatrixserverlib"
)
func UpRemoveRoomsTable(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
DROP TABLE IF EXISTS federationsender_rooms;
`)
if err != nil {
return fmt.Errorf("failed to execute upgrade: %w", err)
}
return nil
}
func DownRemoveRoomsTable(tx *sql.Tx) error {
// We can't reverse this.
return nil
// Account represents a Matrix account on this home server.
type Account struct {
UserID string
Localpart string
ServerName gomatrixserverlib.ServerName
Profile *Profile
AppServiceID string
// TODO: Other flags like IsAdmin, IsGuest
// TODO: Devices
// TODO: Associations (e.g. with application services)
}

View file

@ -1,4 +1,4 @@
// Copyright 2022 The Matrix.org Foundation C.I.C.
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -12,29 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build ios
// +build ios
package authtypes
package gobind
/*
#cgo CFLAGS: -x objective-c
#cgo LDFLAGS: -framework Foundation
#import <Foundation/Foundation.h>
void Log(const char *text) {
NSString *nss = [NSString stringWithUTF8String:text];
NSLog(@"%@", nss);
}
*/
import "C"
import "unsafe"
type BindLogger struct {
}
func (nsl BindLogger) Write(p []byte) (n int, err error) {
p = append(p, 0)
cstr := (*C.char)(unsafe.Pointer(&p[0]))
C.Log(cstr)
return len(p), nil
// Device represents a client's device (mobile, web, etc)
type Device struct {
ID string
UserID string
// The access_token granted to this device.
// This uniquely identifies the device from all other devices and clients.
AccessToken string
// The unique ID of the session identified by the access token.
// Can be used as a secure substitution in places where data needs to be
// associated with access tokens.
SessionID int64
// TODO: display name, last used timestamp, keys, etc
}

View file

@ -5,10 +5,8 @@ type LoginType string
// The relevant login types implemented in Dendrite
const (
LoginTypePassword = "m.login.password"
LoginTypeDummy = "m.login.dummy"
LoginTypeSharedSecret = "org.matrix.login.shared_secret"
LoginTypeRecaptcha = "m.login.recaptcha"
LoginTypeApplicationService = "m.login.application_service"
LoginTypeToken = "m.login.token"
)

View file

@ -16,15 +16,7 @@ package authtypes
// Profile represents the profile for a Matrix account.
type Profile struct {
Localpart string `json:"local_part"`
ServerName string `json:"server_name,omitempty"` // NOTSPEC: only set by Pinecone user provider
DisplayName string `json:"display_name"`
AvatarURL string `json:"avatar_url"`
}
// FullyQualifiedProfile represents the profile for a Matrix account.
type FullyQualifiedProfile struct {
UserID string `json:"user_id"`
DisplayName string `json:"display_name"`
AvatarURL string `json:"avatar_url"`
Localpart string
DisplayName string
AvatarURL string
}

View file

@ -16,8 +16,6 @@ package authtypes
// ThreePID represents a third-party identifier
type ThreePID struct {
Address string `json:"address"`
Medium string `json:"medium"`
AddedAt int64 `json:"added_at"`
ValidatedAt int64 `json:"validated_at"`
Address string `json:"address"`
Medium string `json:"medium"`
}

View file

@ -1,100 +0,0 @@
// Copyright 2021 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"encoding/json"
"io"
"net/http"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
"github.com/matrix-org/dendrite/setup/config"
uapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/util"
)
// LoginFromJSONReader performs authentication given a login request body reader and
// some context. It returns the basic login information and a cleanup function to be
// called after authorization has completed, with the result of the authorization.
// If the final return value is non-nil, an error occurred and the cleanup function
// is nil.
func LoginFromJSONReader(
req *http.Request,
useraccountAPI uapi.UserLoginAPI,
userAPI UserInternalAPIForLogin,
cfg *config.ClientAPI,
) (*Login, LoginCleanupFunc, *util.JSONResponse) {
reqBytes, err := io.ReadAll(req.Body)
if err != nil {
err := &util.JSONResponse{
Code: http.StatusBadRequest,
JSON: spec.BadJSON("Reading request body failed: " + err.Error()),
}
return nil, nil, err
}
var header struct {
Type string `json:"type"`
}
if err := json.Unmarshal(reqBytes, &header); err != nil {
err := &util.JSONResponse{
Code: http.StatusBadRequest,
JSON: spec.BadJSON("Reading request body failed: " + err.Error()),
}
return nil, nil, err
}
var typ Type
switch header.Type {
case authtypes.LoginTypePassword:
typ = &LoginTypePassword{
UserAPI: useraccountAPI,
Config: cfg,
}
case authtypes.LoginTypeToken:
typ = &LoginTypeToken{
UserAPI: userAPI,
Config: cfg,
}
case authtypes.LoginTypeApplicationService:
token, err := ExtractAccessToken(req)
if err != nil {
err := &util.JSONResponse{
Code: http.StatusForbidden,
JSON: spec.MissingToken(err.Error()),
}
return nil, nil, err
}
typ = &LoginTypeApplicationService{
Config: cfg,
Token: token,
}
default:
err := util.JSONResponse{
Code: http.StatusBadRequest,
JSON: spec.InvalidParam("unhandled login type: " + header.Type),
}
return nil, nil, &err
}
return typ.LoginFromJSON(req.Context(), reqBytes)
}
// UserInternalAPIForLogin contains the aspects of UserAPI required for logging in.
type UserInternalAPIForLogin interface {
uapi.LoginTokenInternalAPI
}

View file

@ -1,55 +0,0 @@
// Copyright 2023 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"context"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
"github.com/matrix-org/dendrite/clientapi/httputil"
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/util"
)
// LoginTypeApplicationService describes how to authenticate as an
// application service
type LoginTypeApplicationService struct {
Config *config.ClientAPI
Token string
}
// Name implements Type
func (t *LoginTypeApplicationService) Name() string {
return authtypes.LoginTypeApplicationService
}
// LoginFromJSON implements Type
func (t *LoginTypeApplicationService) LoginFromJSON(
ctx context.Context, reqBytes []byte,
) (*Login, LoginCleanupFunc, *util.JSONResponse) {
var r Login
if err := httputil.UnmarshalJSON(reqBytes, &r); err != nil {
return nil, nil, err
}
_, err := internal.ValidateApplicationServiceRequest(t.Config, r.Identifier.User, t.Token)
if err != nil {
return nil, nil, err
}
cleanup := func(ctx context.Context, j *util.JSONResponse) {}
return &r, cleanup, nil
}

View file

@ -1,319 +0,0 @@
// Copyright 2021 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"context"
"net/http"
"net/http/httptest"
"reflect"
"regexp"
"strings"
"testing"
"github.com/matrix-org/dendrite/clientapi/userutil"
"github.com/matrix-org/dendrite/setup/config"
uapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib/fclient"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/util"
)
func TestLoginFromJSONReader(t *testing.T) {
ctx := context.Background()
tsts := []struct {
Name string
Body string
Token string
WantUsername string
WantDeviceID string
WantDeletedTokens []string
}{
{
Name: "passwordWorks",
Body: `{
"type": "m.login.password",
"identifier": { "type": "m.id.user", "user": "alice" },
"password": "herpassword",
"device_id": "adevice"
}`,
WantUsername: "@alice:example.com",
WantDeviceID: "adevice",
},
{
Name: "tokenWorks",
Body: `{
"type": "m.login.token",
"token": "atoken",
"device_id": "adevice"
}`,
WantUsername: "@auser:example.com",
WantDeviceID: "adevice",
WantDeletedTokens: []string{"atoken"},
},
{
Name: "appServiceWorksUserID",
Body: `{
"type": "m.login.application_service",
"identifier": { "type": "m.id.user", "user": "@alice:example.com" },
"device_id": "adevice"
}`,
Token: "astoken",
WantUsername: "@alice:example.com",
WantDeviceID: "adevice",
},
{
Name: "appServiceWorksLocalpart",
Body: `{
"type": "m.login.application_service",
"identifier": { "type": "m.id.user", "user": "alice" },
"device_id": "adevice"
}`,
Token: "astoken",
WantUsername: "alice",
WantDeviceID: "adevice",
},
}
for _, tst := range tsts {
t.Run(tst.Name, func(t *testing.T) {
var userAPI fakeUserInternalAPI
cfg := &config.ClientAPI{
Matrix: &config.Global{
SigningIdentity: fclient.SigningIdentity{
ServerName: serverName,
},
},
Derived: &config.Derived{
ApplicationServices: []config.ApplicationService{
{
ID: "anapplicationservice",
ASToken: "astoken",
NamespaceMap: map[string][]config.ApplicationServiceNamespace{
"users": {
{
Exclusive: true,
Regex: "@alice:example.com",
RegexpObject: regexp.MustCompile("@alice:example.com"),
},
},
},
},
},
},
}
req := httptest.NewRequest(http.MethodPost, "/", strings.NewReader(tst.Body))
if tst.Token != "" {
req.Header.Add("Authorization", "Bearer "+tst.Token)
}
login, cleanup, jsonErr := LoginFromJSONReader(req, &userAPI, &userAPI, cfg)
if jsonErr != nil {
t.Fatalf("LoginFromJSONReader failed: %+v", jsonErr)
}
cleanup(ctx, &util.JSONResponse{Code: http.StatusOK})
if login.Username() != tst.WantUsername {
t.Errorf("Username: got %q, want %q", login.Username(), tst.WantUsername)
}
if login.DeviceID == nil {
if tst.WantDeviceID != "" {
t.Errorf("DeviceID: got %v, want %q", login.DeviceID, tst.WantDeviceID)
}
} else {
if *login.DeviceID != tst.WantDeviceID {
t.Errorf("DeviceID: got %q, want %q", *login.DeviceID, tst.WantDeviceID)
}
}
if !reflect.DeepEqual(userAPI.DeletedTokens, tst.WantDeletedTokens) {
t.Errorf("DeletedTokens: got %+v, want %+v", userAPI.DeletedTokens, tst.WantDeletedTokens)
}
})
}
}
func TestBadLoginFromJSONReader(t *testing.T) {
ctx := context.Background()
tsts := []struct {
Name string
Body string
Token string
WantErrCode spec.MatrixErrorCode
}{
{Name: "empty", WantErrCode: spec.ErrorBadJSON},
{
Name: "badUnmarshal",
Body: `badsyntaxJSON`,
WantErrCode: spec.ErrorBadJSON,
},
{
Name: "badPassword",
Body: `{
"type": "m.login.password",
"identifier": { "type": "m.id.user", "user": "alice" },
"password": "invalidpassword",
"device_id": "adevice"
}`,
WantErrCode: spec.ErrorForbidden,
},
{
Name: "badToken",
Body: `{
"type": "m.login.token",
"token": "invalidtoken",
"device_id": "adevice"
}`,
WantErrCode: spec.ErrorForbidden,
},
{
Name: "badType",
Body: `{
"type": "m.login.invalid",
"device_id": "adevice"
}`,
WantErrCode: spec.ErrorInvalidParam,
},
{
Name: "noASToken",
Body: `{
"type": "m.login.application_service",
"identifier": { "type": "m.id.user", "user": "@alice:example.com" },
"device_id": "adevice"
}`,
WantErrCode: "M_MISSING_TOKEN",
},
{
Name: "badASToken",
Token: "badastoken",
Body: `{
"type": "m.login.application_service",
"identifier": { "type": "m.id.user", "user": "@alice:example.com" },
"device_id": "adevice"
}`,
WantErrCode: "M_UNKNOWN_TOKEN",
},
{
Name: "badASNamespace",
Token: "astoken",
Body: `{
"type": "m.login.application_service",
"identifier": { "type": "m.id.user", "user": "@bob:example.com" },
"device_id": "adevice"
}`,
WantErrCode: "M_EXCLUSIVE",
},
{
Name: "badASUserID",
Token: "astoken",
Body: `{
"type": "m.login.application_service",
"identifier": { "type": "m.id.user", "user": "@alice:wrong.example.com" },
"device_id": "adevice"
}`,
WantErrCode: "M_INVALID_USERNAME",
},
}
for _, tst := range tsts {
t.Run(tst.Name, func(t *testing.T) {
var userAPI fakeUserInternalAPI
cfg := &config.ClientAPI{
Matrix: &config.Global{
SigningIdentity: fclient.SigningIdentity{
ServerName: serverName,
},
},
Derived: &config.Derived{
ApplicationServices: []config.ApplicationService{
{
ID: "anapplicationservice",
ASToken: "astoken",
NamespaceMap: map[string][]config.ApplicationServiceNamespace{
"users": {
{
Exclusive: true,
Regex: "@alice:example.com",
RegexpObject: regexp.MustCompile("@alice:example.com"),
},
},
},
},
},
},
}
req := httptest.NewRequest(http.MethodPost, "/", strings.NewReader(tst.Body))
if tst.Token != "" {
req.Header.Add("Authorization", "Bearer "+tst.Token)
}
_, cleanup, errRes := LoginFromJSONReader(req, &userAPI, &userAPI, cfg)
if errRes == nil {
cleanup(ctx, nil)
t.Fatalf("LoginFromJSONReader err: got %+v, want code %q", errRes, tst.WantErrCode)
} else if merr, ok := errRes.JSON.(spec.MatrixError); ok && merr.ErrCode != tst.WantErrCode {
t.Fatalf("LoginFromJSONReader err: got %+v, want code %q", errRes, tst.WantErrCode)
}
})
}
}
type fakeUserInternalAPI struct {
UserInternalAPIForLogin
DeletedTokens []string
}
func (ua *fakeUserInternalAPI) QueryAccountByPassword(ctx context.Context, req *uapi.QueryAccountByPasswordRequest, res *uapi.QueryAccountByPasswordResponse) error {
if req.PlaintextPassword == "invalidpassword" {
res.Account = nil
return nil
}
res.Exists = true
res.Account = &uapi.Account{UserID: userutil.MakeUserID(req.Localpart, req.ServerName)}
return nil
}
func (ua *fakeUserInternalAPI) QueryAccountByLocalpart(ctx context.Context, req *uapi.QueryAccountByLocalpartRequest, res *uapi.QueryAccountByLocalpartResponse) error {
return nil
}
func (ua *fakeUserInternalAPI) PerformAccountCreation(ctx context.Context, req *uapi.PerformAccountCreationRequest, res *uapi.PerformAccountCreationResponse) error {
return nil
}
func (ua *fakeUserInternalAPI) PerformLoginTokenDeletion(ctx context.Context, req *uapi.PerformLoginTokenDeletionRequest, res *uapi.PerformLoginTokenDeletionResponse) error {
ua.DeletedTokens = append(ua.DeletedTokens, req.Token)
return nil
}
func (ua *fakeUserInternalAPI) PerformLoginTokenCreation(ctx context.Context, req *uapi.PerformLoginTokenCreationRequest, res *uapi.PerformLoginTokenCreationResponse) error {
return nil
}
func (*fakeUserInternalAPI) QueryLoginToken(ctx context.Context, req *uapi.QueryLoginTokenRequest, res *uapi.QueryLoginTokenResponse) error {
if req.Token == "invalidtoken" {
return nil
}
res.Data = &uapi.LoginTokenData{UserID: "@auser:example.com"}
return nil
}

View file

@ -1,85 +0,0 @@
// Copyright 2021 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"context"
"net/http"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
"github.com/matrix-org/dendrite/clientapi/httputil"
"github.com/matrix-org/dendrite/setup/config"
uapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/util"
)
// LoginTypeToken describes how to authenticate with a login token.
type LoginTypeToken struct {
UserAPI uapi.LoginTokenInternalAPI
Config *config.ClientAPI
}
// Name implements Type.
func (t *LoginTypeToken) Name() string {
return authtypes.LoginTypeToken
}
// LoginFromJSON implements Type. The cleanup function deletes the token from
// the database on success.
func (t *LoginTypeToken) LoginFromJSON(ctx context.Context, reqBytes []byte) (*Login, LoginCleanupFunc, *util.JSONResponse) {
var r loginTokenRequest
if err := httputil.UnmarshalJSON(reqBytes, &r); err != nil {
return nil, nil, err
}
var res uapi.QueryLoginTokenResponse
if err := t.UserAPI.QueryLoginToken(ctx, &uapi.QueryLoginTokenRequest{Token: r.Token}, &res); err != nil {
util.GetLogger(ctx).WithError(err).Error("UserAPI.QueryLoginToken failed")
return nil, nil, &util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
}
}
if res.Data == nil {
return nil, nil, &util.JSONResponse{
Code: http.StatusForbidden,
JSON: spec.Forbidden("invalid login token"),
}
}
r.Login.Identifier.Type = "m.id.user"
r.Login.Identifier.User = res.Data.UserID
cleanup := func(ctx context.Context, authRes *util.JSONResponse) {
if authRes == nil {
util.GetLogger(ctx).Error("No JSONResponse provided to LoginTokenType cleanup function")
return
}
if authRes.Code == http.StatusOK {
var res uapi.PerformLoginTokenDeletionResponse
if err := t.UserAPI.PerformLoginTokenDeletion(ctx, &uapi.PerformLoginTokenDeletionRequest{Token: r.Token}, &res); err != nil {
util.GetLogger(ctx).WithError(err).Error("UserAPI.PerformLoginTokenDeletion failed")
}
}
}
return &r.Login, cleanup, nil
}
// loginTokenRequest struct to hold the possible parameters from an HTTP request.
type loginTokenRequest struct {
Login
Token string `json:"token"`
}

View file

@ -1,286 +0,0 @@
// Copyright 2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"context"
"database/sql"
"github.com/go-ldap/ldap/v3"
"github.com/google/uuid"
"net/http"
"strings"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
"github.com/matrix-org/dendrite/clientapi/httputil"
"github.com/matrix-org/dendrite/clientapi/userutil"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/util"
)
type PasswordRequest struct {
Login
Password string `json:"password"`
}
// LoginTypePassword implements https://matrix.org/docs/spec/client_server/r0.6.1#password-based
type LoginTypePassword struct {
Config *config.ClientAPI
UserAPI api.UserLoginAPI
}
func (t *LoginTypePassword) Name() string {
return authtypes.LoginTypePassword
}
func (t *LoginTypePassword) LoginFromJSON(ctx context.Context, reqBytes []byte) (*Login, LoginCleanupFunc, *util.JSONResponse) {
var r PasswordRequest
if err := httputil.UnmarshalJSON(reqBytes, &r); err != nil {
return nil, nil, err
}
login, err := t.Login(ctx, &r)
if err != nil {
return nil, nil, err
}
return login, func(context.Context, *util.JSONResponse) {}, nil
}
func (t *LoginTypePassword) Login(ctx context.Context, request *PasswordRequest) (*Login, *util.JSONResponse) {
fullUsername := request.Username()
if fullUsername == "" {
return nil, &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: spec.BadJSON("A username must be supplied."),
}
}
if len(request.Password) == 0 {
return nil, &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: spec.BadJSON("A password must be supplied."),
}
}
username, domain, err := userutil.ParseUsernameParam(fullUsername, t.Config.Matrix)
if err != nil {
return nil, &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: spec.InvalidUsername(err.Error()),
}
}
if !t.Config.Matrix.IsLocalServerName(domain) {
return nil, &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: spec.InvalidUsername("The server name is not known."),
}
}
var account *api.Account
if t.Config.Ldap.Enabled {
isAdmin, err := t.authenticateLdap(username, request.Password)
if err != nil {
return nil, err
}
acc, err := t.getOrCreateAccount(ctx, username, domain, isAdmin)
if err != nil {
return nil, err
}
account = acc
} else {
acc, err := t.authenticateDb(ctx, username, domain, request.Password)
if err != nil {
return nil, err
}
account = acc
}
// Set the user, so login.Username() can do the right thing
request.Identifier.User = account.UserID
request.User = account.UserID
return &request.Login, nil
}
func (t *LoginTypePassword) authenticateDb(ctx context.Context, username string, domain spec.ServerName, password string) (*api.Account, *util.JSONResponse) {
res := &api.QueryAccountByPasswordResponse{}
err := t.UserAPI.QueryAccountByPassword(ctx, &api.QueryAccountByPasswordRequest{
Localpart: strings.ToLower(username),
ServerName: domain,
PlaintextPassword: password,
}, res)
if err != nil {
return nil, &util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.Unknown("Unable to fetch account by password."),
}
}
if !res.Exists {
err = t.UserAPI.QueryAccountByPassword(ctx, &api.QueryAccountByPasswordRequest{
Localpart: username,
ServerName: domain,
PlaintextPassword: password,
}, res)
if err != nil {
return nil, &util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.Unknown("Unable to fetch account by password."),
}
}
if !res.Exists {
return nil, &util.JSONResponse{
Code: http.StatusForbidden,
JSON: spec.Forbidden("The username or password was incorrect or the account does not exist."),
}
}
}
return res.Account, nil
}
func (t *LoginTypePassword) authenticateLdap(username, password string) (bool, *util.JSONResponse) {
var conn *ldap.Conn
conn, err := ldap.DialURL(t.Config.Ldap.Uri)
if err != nil {
return false, &util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.Unknown("unable to connect to ldap: " + err.Error()),
}
}
defer conn.Close()
if t.Config.Ldap.AdminBindEnabled {
err = conn.Bind(t.Config.Ldap.AdminBindDn, t.Config.Ldap.AdminBindPassword)
if err != nil {
return false, &util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.Unknown("unable to bind to ldap: " + err.Error()),
}
}
filter := strings.ReplaceAll(t.Config.Ldap.SearchFilter, "{username}", username)
searchRequest := ldap.NewSearchRequest(
t.Config.Ldap.BaseDn, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases,
0, 0, false, filter, []string{t.Config.Ldap.SearchAttribute}, nil,
)
result, err := conn.Search(searchRequest)
if err != nil {
return false, &util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.Unknown("unable to bind to search ldap: " + err.Error()),
}
}
if len(result.Entries) > 1 {
return false, &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: spec.BadJSON("'user' must be duplicated."),
}
}
if len(result.Entries) < 1 {
return false, &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: spec.BadJSON("'user' not found."),
}
}
userDN := result.Entries[0].DN
err = conn.Bind(userDN, password)
if err != nil {
return false, &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: spec.InvalidUsername(err.Error()),
}
}
} else {
bindDn := strings.ReplaceAll(t.Config.Ldap.UserBindDn, "{username}", username)
err = conn.Bind(bindDn, password)
if err != nil {
return false, &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: spec.InvalidUsername(err.Error()),
}
}
}
isAdmin, err := t.isLdapAdmin(conn, username)
if err != nil {
return false, &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: spec.InvalidUsername(err.Error()),
}
}
return isAdmin, nil
}
func (t *LoginTypePassword) isLdapAdmin(conn *ldap.Conn, username string) (bool, error) {
searchRequest := ldap.NewSearchRequest(
t.Config.Ldap.AdminGroupDn,
ldap.ScopeWholeSubtree, ldap.DerefAlways, 0, 0, false,
strings.ReplaceAll(t.Config.Ldap.AdminGroupFilter, "{username}", username),
[]string{t.Config.Ldap.AdminGroupAttribute},
nil)
sr, err := conn.Search(searchRequest)
if err != nil {
return false, err
}
if len(sr.Entries) < 1 {
return false, nil
}
return true, nil
}
func (t *LoginTypePassword) getOrCreateAccount(ctx context.Context, username string, domain spec.ServerName, admin bool) (*api.Account, *util.JSONResponse) {
var existing api.QueryAccountByLocalpartResponse
err := t.UserAPI.QueryAccountByLocalpart(ctx, &api.QueryAccountByLocalpartRequest{
Localpart: username,
ServerName: domain,
}, &existing)
if err == nil {
return existing.Account, nil
}
if err != sql.ErrNoRows {
return nil, &util.JSONResponse{
Code: http.StatusUnauthorized,
JSON: spec.InvalidUsername(err.Error()),
}
}
accountType := api.AccountTypeUser
if admin {
accountType = api.AccountTypeAdmin
}
var created api.PerformAccountCreationResponse
err = t.UserAPI.PerformAccountCreation(ctx, &api.PerformAccountCreationRequest{
AppServiceID: "ldap",
Localpart: username,
Password: uuid.New().String(),
AccountType: accountType,
OnConflict: api.ConflictAbort,
}, &created)
if err != nil {
if _, ok := err.(*api.ErrorConflict); ok {
return nil, &util.JSONResponse{
Code: http.StatusBadRequest,
JSON: spec.UserInUse("Desired user ID is already taken."),
}
}
return nil, &util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.Unknown("failed to create account: " + err.Error()),
}
}
return created.Account, nil
}

View file

@ -0,0 +1,141 @@
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package accounts
import (
"context"
"database/sql"
"github.com/matrix-org/gomatrixserverlib"
)
const accountDataSchema = `
-- Stores data about accounts data.
CREATE TABLE IF NOT EXISTS account_data (
-- The Matrix user ID localpart for this account
localpart TEXT NOT NULL,
-- The room ID for this data (empty string if not specific to a room)
room_id TEXT,
-- The account data type
type TEXT NOT NULL,
-- The account data content
content TEXT NOT NULL,
PRIMARY KEY(localpart, room_id, type)
);
`
const insertAccountDataSQL = `
INSERT INTO account_data(localpart, room_id, type, content) VALUES($1, $2, $3, $4)
ON CONFLICT (localpart, room_id, type) DO UPDATE SET content = EXCLUDED.content
`
const selectAccountDataSQL = "" +
"SELECT room_id, type, content FROM account_data WHERE localpart = $1"
const selectAccountDataByTypeSQL = "" +
"SELECT content FROM account_data WHERE localpart = $1 AND room_id = $2 AND type = $3"
type accountDataStatements struct {
insertAccountDataStmt *sql.Stmt
selectAccountDataStmt *sql.Stmt
selectAccountDataByTypeStmt *sql.Stmt
}
func (s *accountDataStatements) prepare(db *sql.DB) (err error) {
_, err = db.Exec(accountDataSchema)
if err != nil {
return
}
if s.insertAccountDataStmt, err = db.Prepare(insertAccountDataSQL); err != nil {
return
}
if s.selectAccountDataStmt, err = db.Prepare(selectAccountDataSQL); err != nil {
return
}
if s.selectAccountDataByTypeStmt, err = db.Prepare(selectAccountDataByTypeSQL); err != nil {
return
}
return
}
func (s *accountDataStatements) insertAccountData(
ctx context.Context, localpart, roomID, dataType, content string,
) (err error) {
stmt := s.insertAccountDataStmt
_, err = stmt.ExecContext(ctx, localpart, roomID, dataType, content)
return
}
func (s *accountDataStatements) selectAccountData(
ctx context.Context, localpart string,
) (
global []gomatrixserverlib.ClientEvent,
rooms map[string][]gomatrixserverlib.ClientEvent,
err error,
) {
rows, err := s.selectAccountDataStmt.QueryContext(ctx, localpart)
if err != nil {
return
}
global = []gomatrixserverlib.ClientEvent{}
rooms = make(map[string][]gomatrixserverlib.ClientEvent)
for rows.Next() {
var roomID string
var dataType string
var content []byte
if err = rows.Scan(&roomID, &dataType, &content); err != nil {
return
}
ac := gomatrixserverlib.ClientEvent{
Type: dataType,
Content: content,
}
if len(roomID) > 0 {
rooms[roomID] = append(rooms[roomID], ac)
} else {
global = append(global, ac)
}
}
return
}
func (s *accountDataStatements) selectAccountDataByType(
ctx context.Context, localpart, roomID, dataType string,
) (data *gomatrixserverlib.ClientEvent, err error) {
stmt := s.selectAccountDataByTypeStmt
var content []byte
if err = stmt.QueryRowContext(ctx, localpart, roomID, dataType).Scan(&content); err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return
}
data = &gomatrixserverlib.ClientEvent{
Type: dataType,
Content: content,
}
return
}

View file

@ -0,0 +1,153 @@
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package accounts
import (
"context"
"database/sql"
"time"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
"github.com/matrix-org/dendrite/clientapi/userutil"
"github.com/matrix-org/gomatrixserverlib"
log "github.com/sirupsen/logrus"
)
const accountsSchema = `
-- Stores data about accounts.
CREATE TABLE IF NOT EXISTS account_accounts (
-- The Matrix user ID localpart for this account
localpart TEXT NOT NULL PRIMARY KEY,
-- When this account was first created, as a unix timestamp (ms resolution).
created_ts BIGINT NOT NULL,
-- The password hash for this account. Can be NULL if this is a passwordless account.
password_hash TEXT,
-- Identifies which application service this account belongs to, if any.
appservice_id TEXT
-- TODO:
-- is_guest, is_admin, upgraded_ts, devices, any email reset stuff?
);
-- Create sequence for autogenerated numeric usernames
CREATE SEQUENCE IF NOT EXISTS numeric_username_seq START 1;
`
const insertAccountSQL = "" +
"INSERT INTO account_accounts(localpart, created_ts, password_hash, appservice_id) VALUES ($1, $2, $3, $4)"
const selectAccountByLocalpartSQL = "" +
"SELECT localpart, appservice_id FROM account_accounts WHERE localpart = $1"
const selectPasswordHashSQL = "" +
"SELECT password_hash FROM account_accounts WHERE localpart = $1"
const selectNewNumericLocalpartSQL = "" +
"SELECT nextval('numeric_username_seq')"
// TODO: Update password
type accountsStatements struct {
insertAccountStmt *sql.Stmt
selectAccountByLocalpartStmt *sql.Stmt
selectPasswordHashStmt *sql.Stmt
selectNewNumericLocalpartStmt *sql.Stmt
serverName gomatrixserverlib.ServerName
}
func (s *accountsStatements) prepare(db *sql.DB, server gomatrixserverlib.ServerName) (err error) {
_, err = db.Exec(accountsSchema)
if err != nil {
return
}
if s.insertAccountStmt, err = db.Prepare(insertAccountSQL); err != nil {
return
}
if s.selectAccountByLocalpartStmt, err = db.Prepare(selectAccountByLocalpartSQL); err != nil {
return
}
if s.selectPasswordHashStmt, err = db.Prepare(selectPasswordHashSQL); err != nil {
return
}
if s.selectNewNumericLocalpartStmt, err = db.Prepare(selectNewNumericLocalpartSQL); err != nil {
return
}
s.serverName = server
return
}
// insertAccount creates a new account. 'hash' should be the password hash for this account. If it is missing,
// this account will be passwordless. Returns an error if this account already exists. Returns the account
// on success.
func (s *accountsStatements) insertAccount(
ctx context.Context, localpart, hash, appserviceID string,
) (*authtypes.Account, error) {
createdTimeMS := time.Now().UnixNano() / 1000000
stmt := s.insertAccountStmt
var err error
if appserviceID == "" {
_, err = stmt.ExecContext(ctx, localpart, createdTimeMS, hash, nil)
} else {
_, err = stmt.ExecContext(ctx, localpart, createdTimeMS, hash, appserviceID)
}
if err != nil {
return nil, err
}
return &authtypes.Account{
Localpart: localpart,
UserID: userutil.MakeUserID(localpart, s.serverName),
ServerName: s.serverName,
AppServiceID: appserviceID,
}, nil
}
func (s *accountsStatements) selectPasswordHash(
ctx context.Context, localpart string,
) (hash string, err error) {
err = s.selectPasswordHashStmt.QueryRowContext(ctx, localpart).Scan(&hash)
return
}
func (s *accountsStatements) selectAccountByLocalpart(
ctx context.Context, localpart string,
) (*authtypes.Account, error) {
var appserviceIDPtr sql.NullString
var acc authtypes.Account
stmt := s.selectAccountByLocalpartStmt
err := stmt.QueryRowContext(ctx, localpart).Scan(&acc.Localpart, &appserviceIDPtr)
if err != nil {
if err != sql.ErrNoRows {
log.WithError(err).Error("Unable to retrieve user from the db")
}
return nil, err
}
if appserviceIDPtr.Valid {
acc.AppServiceID = appserviceIDPtr.String
}
acc.UserID = userutil.MakeUserID(localpart, s.serverName)
acc.ServerName = s.serverName
return &acc, nil
}
func (s *accountsStatements) selectNewNumericLocalpart(
ctx context.Context,
) (id int64, err error) {
err = s.selectNewNumericLocalpartStmt.QueryRowContext(ctx).Scan(&id)
return
}

View file

@ -12,22 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package postgres
package accounts
import (
"context"
"database/sql"
"encoding/json"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/syncapi/storage/tables"
"github.com/matrix-org/dendrite/syncapi/synctypes"
"github.com/matrix-org/gomatrixserverlib"
)
const filterSchema = `
-- Stores data about filters
CREATE TABLE IF NOT EXISTS syncapi_filter (
CREATE TABLE IF NOT EXISTS account_filter (
-- The filter
filter TEXT NOT NULL,
-- The ID
@ -38,17 +35,17 @@ CREATE TABLE IF NOT EXISTS syncapi_filter (
PRIMARY KEY(id, localpart)
);
CREATE INDEX IF NOT EXISTS syncapi_filter_localpart ON syncapi_filter(localpart);
CREATE INDEX IF NOT EXISTS account_filter_localpart ON account_filter(localpart);
`
const selectFilterSQL = "" +
"SELECT filter FROM syncapi_filter WHERE localpart = $1 AND id = $2"
"SELECT filter FROM account_filter WHERE localpart = $1 AND id = $2"
const selectFilterIDByContentSQL = "" +
"SELECT id FROM syncapi_filter WHERE localpart = $1 AND filter = $2"
"SELECT id FROM account_filter WHERE localpart = $1 AND filter = $2"
const insertFilterSQL = "" +
"INSERT INTO syncapi_filter (filter, id, localpart) VALUES ($1, DEFAULT, $2) RETURNING id"
"INSERT INTO account_filter (filter, id, localpart) VALUES ($1, DEFAULT, $2) RETURNING id"
type filterStatements struct {
selectFilterStmt *sql.Stmt
@ -56,38 +53,43 @@ type filterStatements struct {
insertFilterStmt *sql.Stmt
}
func NewPostgresFilterTable(db *sql.DB) (tables.Filter, error) {
_, err := db.Exec(filterSchema)
func (s *filterStatements) prepare(db *sql.DB) (err error) {
_, err = db.Exec(filterSchema)
if err != nil {
return
}
if s.selectFilterStmt, err = db.Prepare(selectFilterSQL); err != nil {
return
}
if s.selectFilterIDByContentStmt, err = db.Prepare(selectFilterIDByContentSQL); err != nil {
return
}
if s.insertFilterStmt, err = db.Prepare(insertFilterSQL); err != nil {
return
}
return
}
func (s *filterStatements) selectFilter(
ctx context.Context, localpart string, filterID string,
) (*gomatrixserverlib.Filter, error) {
// Retrieve filter from database (stored as canonical JSON)
var filterData []byte
err := s.selectFilterStmt.QueryRowContext(ctx, localpart, filterID).Scan(&filterData)
if err != nil {
return nil, err
}
s := &filterStatements{}
return s, sqlutil.StatementList{
{&s.selectFilterStmt, selectFilterSQL},
{&s.selectFilterIDByContentStmt, selectFilterIDByContentSQL},
{&s.insertFilterStmt, insertFilterSQL},
}.Prepare(db)
}
func (s *filterStatements) SelectFilter(
ctx context.Context, txn *sql.Tx, target *synctypes.Filter, localpart string, filterID string,
) error {
// Retrieve filter from database (stored as canonical JSON)
var filterData []byte
err := sqlutil.TxStmt(txn, s.selectFilterStmt).QueryRowContext(ctx, localpart, filterID).Scan(&filterData)
if err != nil {
return err
}
// Unmarshal JSON into Filter struct
if err = json.Unmarshal(filterData, &target); err != nil {
return err
var filter gomatrixserverlib.Filter
if err = json.Unmarshal(filterData, &filter); err != nil {
return nil, err
}
return nil
return &filter, nil
}
func (s *filterStatements) InsertFilter(
ctx context.Context, txn *sql.Tx, filter *synctypes.Filter, localpart string,
func (s *filterStatements) insertFilter(
ctx context.Context, filter *gomatrixserverlib.Filter, localpart string,
) (filterID string, err error) {
var existingFilterID string
@ -108,9 +110,8 @@ func (s *filterStatements) InsertFilter(
// This can result in a race condition when two clients try to insert the
// same filter and localpart at the same time, however this is not a
// problem as both calls will result in the same filterID
err = sqlutil.TxStmt(txn, s.selectFilterIDByContentStmt).QueryRowContext(
ctx, localpart, filterJSON,
).Scan(&existingFilterID)
err = s.selectFilterIDByContentStmt.QueryRowContext(ctx,
localpart, filterJSON).Scan(&existingFilterID)
if err != nil && err != sql.ErrNoRows {
return "", err
}
@ -120,7 +121,7 @@ func (s *filterStatements) InsertFilter(
}
// Otherwise insert the filter and return the new ID
err = sqlutil.TxStmt(txn, s.insertFilterStmt).QueryRowContext(ctx, filterJSON, localpart).
err = s.insertFilterStmt.QueryRowContext(ctx, filterJSON, localpart).
Scan(&filterID)
return
}

View file

@ -0,0 +1,132 @@
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package accounts
import (
"context"
"database/sql"
"github.com/lib/pq"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
)
const membershipSchema = `
-- Stores data about users memberships to rooms.
CREATE TABLE IF NOT EXISTS account_memberships (
-- The Matrix user ID localpart for the member
localpart TEXT NOT NULL,
-- The room this user is a member of
room_id TEXT NOT NULL,
-- The ID of the join membership event
event_id TEXT NOT NULL,
-- A user can only be member of a room once
PRIMARY KEY (localpart, room_id)
);
-- Use index to process deletion by ID more efficiently
CREATE UNIQUE INDEX IF NOT EXISTS account_membership_event_id ON account_memberships(event_id);
`
const insertMembershipSQL = `
INSERT INTO account_memberships(localpart, room_id, event_id) VALUES ($1, $2, $3)
ON CONFLICT (localpart, room_id) DO UPDATE SET event_id = EXCLUDED.event_id
`
const selectMembershipsByLocalpartSQL = "" +
"SELECT room_id, event_id FROM account_memberships WHERE localpart = $1"
const selectMembershipInRoomByLocalpartSQL = "" +
"SELECT event_id FROM account_memberships WHERE localpart = $1 AND room_id = $2"
const deleteMembershipsByEventIDsSQL = "" +
"DELETE FROM account_memberships WHERE event_id = ANY($1)"
type membershipStatements struct {
deleteMembershipsByEventIDsStmt *sql.Stmt
insertMembershipStmt *sql.Stmt
selectMembershipInRoomByLocalpartStmt *sql.Stmt
selectMembershipsByLocalpartStmt *sql.Stmt
}
func (s *membershipStatements) prepare(db *sql.DB) (err error) {
_, err = db.Exec(membershipSchema)
if err != nil {
return
}
if s.deleteMembershipsByEventIDsStmt, err = db.Prepare(deleteMembershipsByEventIDsSQL); err != nil {
return
}
if s.insertMembershipStmt, err = db.Prepare(insertMembershipSQL); err != nil {
return
}
if s.selectMembershipInRoomByLocalpartStmt, err = db.Prepare(selectMembershipInRoomByLocalpartSQL); err != nil {
return
}
if s.selectMembershipsByLocalpartStmt, err = db.Prepare(selectMembershipsByLocalpartSQL); err != nil {
return
}
return
}
func (s *membershipStatements) insertMembership(
ctx context.Context, txn *sql.Tx, localpart, roomID, eventID string,
) (err error) {
stmt := txn.Stmt(s.insertMembershipStmt)
_, err = stmt.ExecContext(ctx, localpart, roomID, eventID)
return
}
func (s *membershipStatements) deleteMembershipsByEventIDs(
ctx context.Context, txn *sql.Tx, eventIDs []string,
) (err error) {
stmt := txn.Stmt(s.deleteMembershipsByEventIDsStmt)
_, err = stmt.ExecContext(ctx, pq.StringArray(eventIDs))
return
}
func (s *membershipStatements) selectMembershipInRoomByLocalpart(
ctx context.Context, localpart, roomID string,
) (authtypes.Membership, error) {
membership := authtypes.Membership{Localpart: localpart, RoomID: roomID}
stmt := s.selectMembershipInRoomByLocalpartStmt
err := stmt.QueryRowContext(ctx, localpart, roomID).Scan(&membership.EventID)
return membership, err
}
func (s *membershipStatements) selectMembershipsByLocalpart(
ctx context.Context, localpart string,
) (memberships []authtypes.Membership, err error) {
stmt := s.selectMembershipsByLocalpartStmt
rows, err := stmt.QueryContext(ctx, localpart)
if err != nil {
return
}
memberships = []authtypes.Membership{}
defer rows.Close() // nolint: errcheck
for rows.Next() {
var m authtypes.Membership
m.Localpart = localpart
if err := rows.Scan(&m.RoomID, &m.EventID); err != nil {
return nil, err
}
memberships = append(memberships, m)
}
return
}

View file

@ -0,0 +1,107 @@
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package accounts
import (
"context"
"database/sql"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
)
const profilesSchema = `
-- Stores data about accounts profiles.
CREATE TABLE IF NOT EXISTS account_profiles (
-- The Matrix user ID localpart for this account
localpart TEXT NOT NULL PRIMARY KEY,
-- The display name for this account
display_name TEXT,
-- The URL of the avatar for this account
avatar_url TEXT
);
`
const insertProfileSQL = "" +
"INSERT INTO account_profiles(localpart, display_name, avatar_url) VALUES ($1, $2, $3)"
const selectProfileByLocalpartSQL = "" +
"SELECT localpart, display_name, avatar_url FROM account_profiles WHERE localpart = $1"
const setAvatarURLSQL = "" +
"UPDATE account_profiles SET avatar_url = $1 WHERE localpart = $2"
const setDisplayNameSQL = "" +
"UPDATE account_profiles SET display_name = $1 WHERE localpart = $2"
type profilesStatements struct {
insertProfileStmt *sql.Stmt
selectProfileByLocalpartStmt *sql.Stmt
setAvatarURLStmt *sql.Stmt
setDisplayNameStmt *sql.Stmt
}
func (s *profilesStatements) prepare(db *sql.DB) (err error) {
_, err = db.Exec(profilesSchema)
if err != nil {
return
}
if s.insertProfileStmt, err = db.Prepare(insertProfileSQL); err != nil {
return
}
if s.selectProfileByLocalpartStmt, err = db.Prepare(selectProfileByLocalpartSQL); err != nil {
return
}
if s.setAvatarURLStmt, err = db.Prepare(setAvatarURLSQL); err != nil {
return
}
if s.setDisplayNameStmt, err = db.Prepare(setDisplayNameSQL); err != nil {
return
}
return
}
func (s *profilesStatements) insertProfile(
ctx context.Context, localpart string,
) (err error) {
_, err = s.insertProfileStmt.ExecContext(ctx, localpart, "", "")
return
}
func (s *profilesStatements) selectProfileByLocalpart(
ctx context.Context, localpart string,
) (*authtypes.Profile, error) {
var profile authtypes.Profile
err := s.selectProfileByLocalpartStmt.QueryRowContext(ctx, localpart).Scan(
&profile.Localpart, &profile.DisplayName, &profile.AvatarURL,
)
if err != nil {
return nil, err
}
return &profile, nil
}
func (s *profilesStatements) setAvatarURL(
ctx context.Context, localpart string, avatarURL string,
) (err error) {
_, err = s.setAvatarURLStmt.ExecContext(ctx, avatarURL, localpart)
return
}
func (s *profilesStatements) setDisplayName(
ctx context.Context, localpart string, displayName string,
) (err error) {
_, err = s.setDisplayNameStmt.ExecContext(ctx, displayName, localpart)
return
}

View file

@ -0,0 +1,381 @@
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package accounts
import (
"context"
"database/sql"
"errors"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
"github.com/matrix-org/dendrite/common"
"github.com/matrix-org/gomatrixserverlib"
"golang.org/x/crypto/bcrypt"
// Import the postgres database driver.
_ "github.com/lib/pq"
)
// Database represents an account database
type Database struct {
db *sql.DB
common.PartitionOffsetStatements
accounts accountsStatements
profiles profilesStatements
memberships membershipStatements
accountDatas accountDataStatements
threepids threepidStatements
filter filterStatements
serverName gomatrixserverlib.ServerName
}
// NewDatabase creates a new accounts and profiles database
func NewDatabase(dataSourceName string, serverName gomatrixserverlib.ServerName) (*Database, error) {
var db *sql.DB
var err error
if db, err = sql.Open("postgres", dataSourceName); err != nil {
return nil, err
}
partitions := common.PartitionOffsetStatements{}
if err = partitions.Prepare(db, "account"); err != nil {
return nil, err
}
a := accountsStatements{}
if err = a.prepare(db, serverName); err != nil {
return nil, err
}
p := profilesStatements{}
if err = p.prepare(db); err != nil {
return nil, err
}
m := membershipStatements{}
if err = m.prepare(db); err != nil {
return nil, err
}
ac := accountDataStatements{}
if err = ac.prepare(db); err != nil {
return nil, err
}
t := threepidStatements{}
if err = t.prepare(db); err != nil {
return nil, err
}
f := filterStatements{}
if err = f.prepare(db); err != nil {
return nil, err
}
return &Database{db, partitions, a, p, m, ac, t, f, serverName}, nil
}
// GetAccountByPassword returns the account associated with the given localpart and password.
// Returns sql.ErrNoRows if no account exists which matches the given localpart.
func (d *Database) GetAccountByPassword(
ctx context.Context, localpart, plaintextPassword string,
) (*authtypes.Account, error) {
hash, err := d.accounts.selectPasswordHash(ctx, localpart)
if err != nil {
return nil, err
}
if err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(plaintextPassword)); err != nil {
return nil, err
}
return d.accounts.selectAccountByLocalpart(ctx, localpart)
}
// GetProfileByLocalpart returns the profile associated with the given localpart.
// Returns sql.ErrNoRows if no profile exists which matches the given localpart.
func (d *Database) GetProfileByLocalpart(
ctx context.Context, localpart string,
) (*authtypes.Profile, error) {
return d.profiles.selectProfileByLocalpart(ctx, localpart)
}
// SetAvatarURL updates the avatar URL of the profile associated with the given
// localpart. Returns an error if something went wrong with the SQL query
func (d *Database) SetAvatarURL(
ctx context.Context, localpart string, avatarURL string,
) error {
return d.profiles.setAvatarURL(ctx, localpart, avatarURL)
}
// SetDisplayName updates the display name of the profile associated with the given
// localpart. Returns an error if something went wrong with the SQL query
func (d *Database) SetDisplayName(
ctx context.Context, localpart string, displayName string,
) error {
return d.profiles.setDisplayName(ctx, localpart, displayName)
}
// CreateAccount makes a new account with the given login name and password, and creates an empty profile
// for this account. If no password is supplied, the account will be a passwordless account. If the
// account already exists, it will return nil, nil.
func (d *Database) CreateAccount(
ctx context.Context, localpart, plaintextPassword, appserviceID string,
) (*authtypes.Account, error) {
var err error
// Generate a password hash if this is not a password-less user
hash := ""
if plaintextPassword != "" {
hash, err = hashPassword(plaintextPassword)
if err != nil {
return nil, err
}
}
if err := d.profiles.insertProfile(ctx, localpart); err != nil {
if common.IsUniqueConstraintViolationErr(err) {
return nil, nil
}
return nil, err
}
return d.accounts.insertAccount(ctx, localpart, hash, appserviceID)
}
// SaveMembership saves the user matching a given localpart as a member of a given
// room. It also stores the ID of the membership event.
// If a membership already exists between the user and the room, or if the
// insert fails, returns the SQL error
func (d *Database) saveMembership(
ctx context.Context, txn *sql.Tx, localpart, roomID, eventID string,
) error {
return d.memberships.insertMembership(ctx, txn, localpart, roomID, eventID)
}
// removeMembershipsByEventIDs removes the memberships corresponding to the
// `join` membership events IDs in the eventIDs slice.
// If the removal fails, or if there is no membership to remove, returns an error
func (d *Database) removeMembershipsByEventIDs(
ctx context.Context, txn *sql.Tx, eventIDs []string,
) error {
return d.memberships.deleteMembershipsByEventIDs(ctx, txn, eventIDs)
}
// UpdateMemberships adds the "join" membership events included in a given state
// events array, and removes those which ID is included in a given array of events
// IDs. All of the process is run in a transaction, which commits only once/if every
// insertion and deletion has been successfully processed.
// Returns a SQL error if there was an issue with any part of the process
func (d *Database) UpdateMemberships(
ctx context.Context, eventsToAdd []gomatrixserverlib.Event, idsToRemove []string,
) error {
return common.WithTransaction(d.db, func(txn *sql.Tx) error {
if err := d.removeMembershipsByEventIDs(ctx, txn, idsToRemove); err != nil {
return err
}
for _, event := range eventsToAdd {
if err := d.newMembership(ctx, txn, event); err != nil {
return err
}
}
return nil
})
}
// GetMembershipInRoomByLocalpart returns the membership for an user
// matching the given localpart if he is a member of the room matching roomID,
// if not sql.ErrNoRows is returned.
// If there was an issue during the retrieval, returns the SQL error
func (d *Database) GetMembershipInRoomByLocalpart(
ctx context.Context, localpart, roomID string,
) (authtypes.Membership, error) {
return d.memberships.selectMembershipInRoomByLocalpart(ctx, localpart, roomID)
}
// GetMembershipsByLocalpart returns an array containing the memberships for all
// the rooms a user matching a given localpart is a member of
// If no membership match the given localpart, returns an empty array
// If there was an issue during the retrieval, returns the SQL error
func (d *Database) GetMembershipsByLocalpart(
ctx context.Context, localpart string,
) (memberships []authtypes.Membership, err error) {
return d.memberships.selectMembershipsByLocalpart(ctx, localpart)
}
// newMembership saves a new membership in the database.
// If the event isn't a valid m.room.member event with type `join`, does nothing.
// If an error occurred, returns the SQL error
func (d *Database) newMembership(
ctx context.Context, txn *sql.Tx, ev gomatrixserverlib.Event,
) error {
if ev.Type() == "m.room.member" && ev.StateKey() != nil {
localpart, serverName, err := gomatrixserverlib.SplitID('@', *ev.StateKey())
if err != nil {
return err
}
// We only want state events from local users
if string(serverName) != string(d.serverName) {
return nil
}
eventID := ev.EventID()
roomID := ev.RoomID()
membership, err := ev.Membership()
if err != nil {
return err
}
// Only "join" membership events can be considered as new memberships
if membership == gomatrixserverlib.Join {
if err := d.saveMembership(ctx, txn, localpart, roomID, eventID); err != nil {
return err
}
}
}
return nil
}
// SaveAccountData saves new account data for a given user and a given room.
// If the account data is not specific to a room, the room ID should be an empty string
// If an account data already exists for a given set (user, room, data type), it will
// update the corresponding row with the new content
// Returns a SQL error if there was an issue with the insertion/update
func (d *Database) SaveAccountData(
ctx context.Context, localpart, roomID, dataType, content string,
) error {
return d.accountDatas.insertAccountData(ctx, localpart, roomID, dataType, content)
}
// GetAccountData returns account data related to a given localpart
// If no account data could be found, returns an empty arrays
// Returns an error if there was an issue with the retrieval
func (d *Database) GetAccountData(ctx context.Context, localpart string) (
global []gomatrixserverlib.ClientEvent,
rooms map[string][]gomatrixserverlib.ClientEvent,
err error,
) {
return d.accountDatas.selectAccountData(ctx, localpart)
}
// GetAccountDataByType returns account data matching a given
// localpart, room ID and type.
// If no account data could be found, returns nil
// Returns an error if there was an issue with the retrieval
func (d *Database) GetAccountDataByType(
ctx context.Context, localpart, roomID, dataType string,
) (data *gomatrixserverlib.ClientEvent, err error) {
return d.accountDatas.selectAccountDataByType(
ctx, localpart, roomID, dataType,
)
}
// GetNewNumericLocalpart generates and returns a new unused numeric localpart
func (d *Database) GetNewNumericLocalpart(
ctx context.Context,
) (int64, error) {
return d.accounts.selectNewNumericLocalpart(ctx)
}
func hashPassword(plaintext string) (hash string, err error) {
hashBytes, err := bcrypt.GenerateFromPassword([]byte(plaintext), bcrypt.DefaultCost)
return string(hashBytes), err
}
// Err3PIDInUse is the error returned when trying to save an association involving
// a third-party identifier which is already associated to a local user.
var Err3PIDInUse = errors.New("This third-party identifier is already in use")
// SaveThreePIDAssociation saves the association between a third party identifier
// and a local Matrix user (identified by the user's ID's local part).
// If the third-party identifier is already part of an association, returns Err3PIDInUse.
// Returns an error if there was a problem talking to the database.
func (d *Database) SaveThreePIDAssociation(
ctx context.Context, threepid, localpart, medium string,
) (err error) {
return common.WithTransaction(d.db, func(txn *sql.Tx) error {
user, err := d.threepids.selectLocalpartForThreePID(
ctx, txn, threepid, medium,
)
if err != nil {
return err
}
if len(user) > 0 {
return Err3PIDInUse
}
return d.threepids.insertThreePID(ctx, txn, threepid, medium, localpart)
})
}
// RemoveThreePIDAssociation removes the association involving a given third-party
// identifier.
// If no association exists involving this third-party identifier, returns nothing.
// If there was a problem talking to the database, returns an error.
func (d *Database) RemoveThreePIDAssociation(
ctx context.Context, threepid string, medium string,
) (err error) {
return d.threepids.deleteThreePID(ctx, threepid, medium)
}
// GetLocalpartForThreePID looks up the localpart associated with a given third-party
// identifier.
// If no association involves the given third-party idenfitier, returns an empty
// string.
// Returns an error if there was a problem talking to the database.
func (d *Database) GetLocalpartForThreePID(
ctx context.Context, threepid string, medium string,
) (localpart string, err error) {
return d.threepids.selectLocalpartForThreePID(ctx, nil, threepid, medium)
}
// GetThreePIDsForLocalpart looks up the third-party identifiers associated with
// a given local user.
// If no association is known for this user, returns an empty slice.
// Returns an error if there was an issue talking to the database.
func (d *Database) GetThreePIDsForLocalpart(
ctx context.Context, localpart string,
) (threepids []authtypes.ThreePID, err error) {
return d.threepids.selectThreePIDsForLocalpart(ctx, localpart)
}
// GetFilter looks up the filter associated with a given local user and filter ID.
// Returns a filter structure. Otherwise returns an error if no such filter exists
// or if there was an error talking to the database.
func (d *Database) GetFilter(
ctx context.Context, localpart string, filterID string,
) (*gomatrixserverlib.Filter, error) {
return d.filter.selectFilter(ctx, localpart, filterID)
}
// PutFilter puts the passed filter into the database.
// Returns the filterID as a string. Otherwise returns an error if something
// goes wrong.
func (d *Database) PutFilter(
ctx context.Context, localpart string, filter *gomatrixserverlib.Filter,
) (string, error) {
return d.filter.insertFilter(ctx, filter, localpart)
}
// CheckAccountAvailability checks if the username/localpart is already present
// in the database.
// If the DB returns sql.ErrNoRows the Localpart isn't taken.
func (d *Database) CheckAccountAvailability(ctx context.Context, localpart string) (bool, error) {
_, err := d.accounts.selectAccountByLocalpart(ctx, localpart)
if err == sql.ErrNoRows {
return true, nil
}
return false, err
}
// GetAccountByLocalpart returns the account associated with the given localpart.
// This function assumes the request is authenticated or the account data is used only internally.
// Returns sql.ErrNoRows if no account exists which matches the given localpart.
func (d *Database) GetAccountByLocalpart(ctx context.Context, localpart string,
) (*authtypes.Account, error) {
return d.accounts.selectAccountByLocalpart(ctx, localpart)
}

View file

@ -0,0 +1,129 @@
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package accounts
import (
"context"
"database/sql"
"github.com/matrix-org/dendrite/common"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
)
const threepidSchema = `
-- Stores data about third party identifiers
CREATE TABLE IF NOT EXISTS account_threepid (
-- The third party identifier
threepid TEXT NOT NULL,
-- The 3PID medium
medium TEXT NOT NULL DEFAULT 'email',
-- The localpart of the Matrix user ID associated to this 3PID
localpart TEXT NOT NULL,
PRIMARY KEY(threepid, medium)
);
CREATE INDEX IF NOT EXISTS account_threepid_localpart ON account_threepid(localpart);
`
const selectLocalpartForThreePIDSQL = "" +
"SELECT localpart FROM account_threepid WHERE threepid = $1 AND medium = $2"
const selectThreePIDsForLocalpartSQL = "" +
"SELECT threepid, medium FROM account_threepid WHERE localpart = $1"
const insertThreePIDSQL = "" +
"INSERT INTO account_threepid (threepid, medium, localpart) VALUES ($1, $2, $3)"
const deleteThreePIDSQL = "" +
"DELETE FROM account_threepid WHERE threepid = $1 AND medium = $2"
type threepidStatements struct {
selectLocalpartForThreePIDStmt *sql.Stmt
selectThreePIDsForLocalpartStmt *sql.Stmt
insertThreePIDStmt *sql.Stmt
deleteThreePIDStmt *sql.Stmt
}
func (s *threepidStatements) prepare(db *sql.DB) (err error) {
_, err = db.Exec(threepidSchema)
if err != nil {
return
}
if s.selectLocalpartForThreePIDStmt, err = db.Prepare(selectLocalpartForThreePIDSQL); err != nil {
return
}
if s.selectThreePIDsForLocalpartStmt, err = db.Prepare(selectThreePIDsForLocalpartSQL); err != nil {
return
}
if s.insertThreePIDStmt, err = db.Prepare(insertThreePIDSQL); err != nil {
return
}
if s.deleteThreePIDStmt, err = db.Prepare(deleteThreePIDSQL); err != nil {
return
}
return
}
func (s *threepidStatements) selectLocalpartForThreePID(
ctx context.Context, txn *sql.Tx, threepid string, medium string,
) (localpart string, err error) {
stmt := common.TxStmt(txn, s.selectLocalpartForThreePIDStmt)
err = stmt.QueryRowContext(ctx, threepid, medium).Scan(&localpart)
if err == sql.ErrNoRows {
return "", nil
}
return
}
func (s *threepidStatements) selectThreePIDsForLocalpart(
ctx context.Context, localpart string,
) (threepids []authtypes.ThreePID, err error) {
rows, err := s.selectThreePIDsForLocalpartStmt.QueryContext(ctx, localpart)
if err != nil {
return
}
threepids = []authtypes.ThreePID{}
for rows.Next() {
var threepid string
var medium string
if err = rows.Scan(&threepid, &medium); err != nil {
return
}
threepids = append(threepids, authtypes.ThreePID{
Address: threepid,
Medium: medium,
})
}
return
}
func (s *threepidStatements) insertThreePID(
ctx context.Context, txn *sql.Tx, threepid, medium, localpart string,
) (err error) {
stmt := common.TxStmt(txn, s.insertThreePIDStmt)
_, err = stmt.ExecContext(ctx, threepid, medium, localpart)
return
}
func (s *threepidStatements) deleteThreePID(
ctx context.Context, threepid string, medium string) (err error) {
_, err = s.deleteThreePIDStmt.ExecContext(ctx, threepid, medium)
return
}

View file

@ -0,0 +1,221 @@
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package devices
import (
"context"
"database/sql"
"time"
"github.com/matrix-org/dendrite/common"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
"github.com/matrix-org/dendrite/clientapi/userutil"
"github.com/matrix-org/gomatrixserverlib"
)
const devicesSchema = `
-- This sequence is used for automatic allocation of session_id.
CREATE SEQUENCE IF NOT EXISTS device_session_id_seq START 1;
-- Stores data about devices.
CREATE TABLE IF NOT EXISTS device_devices (
-- The access token granted to this device. This has to be the primary key
-- so we can distinguish which device is making a given request.
access_token TEXT NOT NULL PRIMARY KEY,
-- The auto-allocated unique ID of the session identified by the access token.
-- This can be used as a secure substitution of the access token in situations
-- where data is associated with access tokens (e.g. transaction storage),
-- so we don't have to store users' access tokens everywhere.
session_id BIGINT NOT NULL DEFAULT nextval('device_session_id_seq'),
-- The device identifier. This only needs to uniquely identify a device for a given user, not globally.
-- access_tokens will be clobbered based on the device ID for a user.
device_id TEXT NOT NULL,
-- The Matrix user ID localpart for this device. This is preferable to storing the full user_id
-- as it is smaller, makes it clearer that we only manage devices for our own users, and may make
-- migration to different domain names easier.
localpart TEXT NOT NULL,
-- When this devices was first recognised on the network, as a unix timestamp (ms resolution).
created_ts BIGINT NOT NULL,
-- The display name, human friendlier than device_id and updatable
display_name TEXT
-- TODO: device keys, device display names, last used ts and IP address?, token restrictions (if 3rd-party OAuth app)
);
-- Device IDs must be unique for a given user.
CREATE UNIQUE INDEX IF NOT EXISTS device_localpart_id_idx ON device_devices(localpart, device_id);
`
const insertDeviceSQL = "" +
"INSERT INTO device_devices(device_id, localpart, access_token, created_ts, display_name) VALUES ($1, $2, $3, $4, $5)" +
" RETURNING session_id"
const selectDeviceByTokenSQL = "" +
"SELECT session_id, device_id, localpart FROM device_devices WHERE access_token = $1"
const selectDeviceByIDSQL = "" +
"SELECT display_name FROM device_devices WHERE localpart = $1 and device_id = $2"
const selectDevicesByLocalpartSQL = "" +
"SELECT device_id, display_name FROM device_devices WHERE localpart = $1"
const updateDeviceNameSQL = "" +
"UPDATE device_devices SET display_name = $1 WHERE localpart = $2 AND device_id = $3"
const deleteDeviceSQL = "" +
"DELETE FROM device_devices WHERE device_id = $1 AND localpart = $2"
const deleteDevicesByLocalpartSQL = "" +
"DELETE FROM device_devices WHERE localpart = $1"
type devicesStatements struct {
insertDeviceStmt *sql.Stmt
selectDeviceByTokenStmt *sql.Stmt
selectDeviceByIDStmt *sql.Stmt
selectDevicesByLocalpartStmt *sql.Stmt
updateDeviceNameStmt *sql.Stmt
deleteDeviceStmt *sql.Stmt
deleteDevicesByLocalpartStmt *sql.Stmt
serverName gomatrixserverlib.ServerName
}
func (s *devicesStatements) prepare(db *sql.DB, server gomatrixserverlib.ServerName) (err error) {
_, err = db.Exec(devicesSchema)
if err != nil {
return
}
if s.insertDeviceStmt, err = db.Prepare(insertDeviceSQL); err != nil {
return
}
if s.selectDeviceByTokenStmt, err = db.Prepare(selectDeviceByTokenSQL); err != nil {
return
}
if s.selectDeviceByIDStmt, err = db.Prepare(selectDeviceByIDSQL); err != nil {
return
}
if s.selectDevicesByLocalpartStmt, err = db.Prepare(selectDevicesByLocalpartSQL); err != nil {
return
}
if s.updateDeviceNameStmt, err = db.Prepare(updateDeviceNameSQL); err != nil {
return
}
if s.deleteDeviceStmt, err = db.Prepare(deleteDeviceSQL); err != nil {
return
}
if s.deleteDevicesByLocalpartStmt, err = db.Prepare(deleteDevicesByLocalpartSQL); err != nil {
return
}
s.serverName = server
return
}
// insertDevice creates a new device. Returns an error if any device with the same access token already exists.
// Returns an error if the user already has a device with the given device ID.
// Returns the device on success.
func (s *devicesStatements) insertDevice(
ctx context.Context, txn *sql.Tx, id, localpart, accessToken string,
displayName *string,
) (*authtypes.Device, error) {
createdTimeMS := time.Now().UnixNano() / 1000000
var sessionID int64
stmt := common.TxStmt(txn, s.insertDeviceStmt)
if err := stmt.QueryRowContext(ctx, id, localpart, accessToken, createdTimeMS, displayName).Scan(&sessionID); err != nil {
return nil, err
}
return &authtypes.Device{
ID: id,
UserID: userutil.MakeUserID(localpart, s.serverName),
AccessToken: accessToken,
SessionID: sessionID,
}, nil
}
func (s *devicesStatements) deleteDevice(
ctx context.Context, txn *sql.Tx, id, localpart string,
) error {
stmt := common.TxStmt(txn, s.deleteDeviceStmt)
_, err := stmt.ExecContext(ctx, id, localpart)
return err
}
func (s *devicesStatements) deleteDevicesByLocalpart(
ctx context.Context, txn *sql.Tx, localpart string,
) error {
stmt := common.TxStmt(txn, s.deleteDevicesByLocalpartStmt)
_, err := stmt.ExecContext(ctx, localpart)
return err
}
func (s *devicesStatements) updateDeviceName(
ctx context.Context, txn *sql.Tx, localpart, deviceID string, displayName *string,
) error {
stmt := common.TxStmt(txn, s.updateDeviceNameStmt)
_, err := stmt.ExecContext(ctx, displayName, localpart, deviceID)
return err
}
func (s *devicesStatements) selectDeviceByToken(
ctx context.Context, accessToken string,
) (*authtypes.Device, error) {
var dev authtypes.Device
var localpart string
stmt := s.selectDeviceByTokenStmt
err := stmt.QueryRowContext(ctx, accessToken).Scan(&dev.SessionID, &dev.ID, &localpart)
if err == nil {
dev.UserID = userutil.MakeUserID(localpart, s.serverName)
dev.AccessToken = accessToken
}
return &dev, err
}
// selectDeviceByID retrieves a device from the database with the given user
// localpart and deviceID
func (s *devicesStatements) selectDeviceByID(
ctx context.Context, localpart, deviceID string,
) (*authtypes.Device, error) {
var dev authtypes.Device
var created sql.NullInt64
stmt := s.selectDeviceByIDStmt
err := stmt.QueryRowContext(ctx, localpart, deviceID).Scan(&created)
if err == nil {
dev.ID = deviceID
dev.UserID = userutil.MakeUserID(localpart, s.serverName)
}
return &dev, err
}
func (s *devicesStatements) selectDevicesByLocalpart(
ctx context.Context, localpart string,
) ([]authtypes.Device, error) {
devices := []authtypes.Device{}
rows, err := s.selectDevicesByLocalpartStmt.QueryContext(ctx, localpart)
if err != nil {
return devices, err
}
for rows.Next() {
var dev authtypes.Device
err = rows.Scan(&dev.ID)
if err != nil {
return devices, err
}
dev.UserID = userutil.MakeUserID(localpart, s.serverName)
devices = append(devices, dev)
}
return devices, nil
}

View file

@ -0,0 +1,167 @@
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package devices
import (
"context"
"crypto/rand"
"database/sql"
"encoding/base64"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
"github.com/matrix-org/dendrite/common"
"github.com/matrix-org/gomatrixserverlib"
)
// The length of generated device IDs
var deviceIDByteLength = 6
// Database represents a device database.
type Database struct {
db *sql.DB
devices devicesStatements
}
// NewDatabase creates a new device database
func NewDatabase(dataSourceName string, serverName gomatrixserverlib.ServerName) (*Database, error) {
var db *sql.DB
var err error
if db, err = sql.Open("postgres", dataSourceName); err != nil {
return nil, err
}
d := devicesStatements{}
if err = d.prepare(db, serverName); err != nil {
return nil, err
}
return &Database{db, d}, nil
}
// GetDeviceByAccessToken returns the device matching the given access token.
// Returns sql.ErrNoRows if no matching device was found.
func (d *Database) GetDeviceByAccessToken(
ctx context.Context, token string,
) (*authtypes.Device, error) {
return d.devices.selectDeviceByToken(ctx, token)
}
// GetDeviceByID returns the device matching the given ID.
// Returns sql.ErrNoRows if no matching device was found.
func (d *Database) GetDeviceByID(
ctx context.Context, localpart, deviceID string,
) (*authtypes.Device, error) {
return d.devices.selectDeviceByID(ctx, localpart, deviceID)
}
// GetDevicesByLocalpart returns the devices matching the given localpart.
func (d *Database) GetDevicesByLocalpart(
ctx context.Context, localpart string,
) ([]authtypes.Device, error) {
return d.devices.selectDevicesByLocalpart(ctx, localpart)
}
// CreateDevice makes a new device associated with the given user ID localpart.
// If there is already a device with the same device ID for this user, that access token will be revoked
// and replaced with the given accessToken. If the given accessToken is already in use for another device,
// an error will be returned.
// If no device ID is given one is generated.
// Returns the device on success.
func (d *Database) CreateDevice(
ctx context.Context, localpart string, deviceID *string, accessToken string,
displayName *string,
) (dev *authtypes.Device, returnErr error) {
if deviceID != nil {
returnErr = common.WithTransaction(d.db, func(txn *sql.Tx) error {
var err error
// Revoke existing tokens for this device
if err = d.devices.deleteDevice(ctx, txn, *deviceID, localpart); err != nil {
return err
}
dev, err = d.devices.insertDevice(ctx, txn, *deviceID, localpart, accessToken, displayName)
return err
})
} else {
// We generate device IDs in a loop in case its already taken.
// We cap this at going round 5 times to ensure we don't spin forever
var newDeviceID string
for i := 1; i <= 5; i++ {
newDeviceID, returnErr = generateDeviceID()
if returnErr != nil {
return
}
returnErr = common.WithTransaction(d.db, func(txn *sql.Tx) error {
var err error
dev, err = d.devices.insertDevice(ctx, txn, newDeviceID, localpart, accessToken, displayName)
return err
})
if returnErr == nil {
return
}
}
}
return
}
// generateDeviceID creates a new device id. Returns an error if failed to generate
// random bytes.
func generateDeviceID() (string, error) {
b := make([]byte, deviceIDByteLength)
_, err := rand.Read(b)
if err != nil {
return "", err
}
// url-safe no padding
return base64.RawURLEncoding.EncodeToString(b), nil
}
// UpdateDevice updates the given device with the display name.
// Returns SQL error if there are problems and nil on success.
func (d *Database) UpdateDevice(
ctx context.Context, localpart, deviceID string, displayName *string,
) error {
return common.WithTransaction(d.db, func(txn *sql.Tx) error {
return d.devices.updateDeviceName(ctx, txn, localpart, deviceID, displayName)
})
}
// RemoveDevice revokes a device by deleting the entry in the database
// matching with the given device ID and user ID localpart.
// If the device doesn't exist, it will not return an error
// If something went wrong during the deletion, it will return the SQL error.
func (d *Database) RemoveDevice(
ctx context.Context, deviceID, localpart string,
) error {
return common.WithTransaction(d.db, func(txn *sql.Tx) error {
if err := d.devices.deleteDevice(ctx, txn, deviceID, localpart); err != sql.ErrNoRows {
return err
}
return nil
})
}
// RemoveAllDevices revokes devices by deleting the entry in the
// database matching the given user ID localpart.
// If something went wrong during the deletion, it will return the SQL error.
func (d *Database) RemoveAllDevices(
ctx context.Context, localpart string,
) error {
return common.WithTransaction(d.db, func(txn *sql.Tx) error {
if err := d.devices.deleteDevicesByLocalpart(ctx, txn, localpart); err != sql.ErrNoRows {
return err
}
return nil
})
}

View file

@ -1,273 +0,0 @@
// Copyright 2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"context"
"encoding/json"
"net/http"
"sync"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/util"
"github.com/sirupsen/logrus"
"github.com/tidwall/gjson"
)
// Type represents an auth type
// https://matrix.org/docs/spec/client_server/r0.6.1#authentication-types
type Type interface {
// Name returns the name of the auth type e.g `m.login.password`
Name() string
// Login with the auth type, returning an error response on failure.
// Not all types support login, only m.login.password and m.login.token
// See https://matrix.org/docs/spec/client_server/r0.6.1#post-matrix-client-r0-login
// This function will be called when doing login and when doing 'sudo' style
// actions e.g deleting devices. The response must be a 401 as per:
// "If the homeserver decides that an attempt on a stage was unsuccessful, but the
// client may make a second attempt, it returns the same HTTP status 401 response as above,
// with the addition of the standard errcode and error fields describing the error."
//
// The returned cleanup function must be non-nil on success, and will be called after
// authorization has been completed. Its argument is the final result of authorization.
LoginFromJSON(ctx context.Context, reqBytes []byte) (login *Login, cleanup LoginCleanupFunc, errRes *util.JSONResponse)
// TODO: Extend to support Register() flow
// Register(ctx context.Context, sessionID string, req interface{})
}
type LoginCleanupFunc func(context.Context, *util.JSONResponse)
// LoginIdentifier represents identifier types
// https://matrix.org/docs/spec/client_server/r0.6.1#identifier-types
type LoginIdentifier struct {
Type string `json:"type"`
// when type = m.id.user or m.id.application_service
User string `json:"user"`
// when type = m.id.thirdparty
Medium string `json:"medium"`
Address string `json:"address"`
}
// Login represents the shared fields used in all forms of login/sudo endpoints.
type Login struct {
LoginIdentifier // Flat fields deprecated in favour of `identifier`.
Identifier LoginIdentifier `json:"identifier"`
// Both DeviceID and InitialDisplayName can be omitted, or empty strings ("")
// Thus a pointer is needed to differentiate between the two
InitialDisplayName *string `json:"initial_device_display_name"`
DeviceID *string `json:"device_id"`
}
// Username returns the user localpart/user_id in this request, if it exists.
func (r *Login) Username() string {
if r.Identifier.Type == "m.id.user" {
return r.Identifier.User
}
// deprecated but without it Element iOS won't log in
return r.User
}
// ThirdPartyID returns the 3PID medium and address for this login, if it exists.
func (r *Login) ThirdPartyID() (medium, address string) {
if r.Identifier.Type == "m.id.thirdparty" {
return r.Identifier.Medium, r.Identifier.Address
}
// deprecated
if r.Medium == "email" {
return "email", r.Address
}
return "", ""
}
type userInteractiveFlow struct {
Stages []string `json:"stages"`
}
// UserInteractive checks that the user is who they claim to be, via a UI auth.
// This is used for things like device deletion and password reset where
// the user already has a valid access token, but we want to double-check
// that it isn't stolen by re-authenticating them.
type UserInteractive struct {
sync.RWMutex
Flows []userInteractiveFlow
// Map of login type to implementation
Types map[string]Type
// Map of session ID to completed login types, will need to be extended in future
Sessions map[string][]string
}
func NewUserInteractive(userAccountAPI api.UserLoginAPI, cfg *config.ClientAPI) *UserInteractive {
typePassword := &LoginTypePassword{
UserAPI: userAccountAPI,
Config: cfg,
}
return &UserInteractive{
Flows: []userInteractiveFlow{
{
Stages: []string{typePassword.Name()},
},
},
Types: map[string]Type{
typePassword.Name(): typePassword,
},
Sessions: make(map[string][]string),
}
}
func (u *UserInteractive) IsSingleStageFlow(authType string) bool {
u.RLock()
defer u.RUnlock()
for _, f := range u.Flows {
if len(f.Stages) == 1 && f.Stages[0] == authType {
return true
}
}
return false
}
func (u *UserInteractive) AddCompletedStage(sessionID, authType string) {
u.Lock()
// TODO: Handle multi-stage flows
delete(u.Sessions, sessionID)
u.Unlock()
}
type Challenge struct {
Completed []string `json:"completed"`
Flows []userInteractiveFlow `json:"flows"`
Session string `json:"session"`
// TODO: Return any additional `params`
Params map[string]interface{} `json:"params"`
}
// Challenge returns an HTTP 401 with the supported flows for authenticating
func (u *UserInteractive) challenge(sessionID string) *util.JSONResponse {
u.RLock()
completed := u.Sessions[sessionID]
flows := u.Flows
u.RUnlock()
return &util.JSONResponse{
Code: 401,
JSON: Challenge{
Completed: completed,
Flows: flows,
Session: sessionID,
Params: make(map[string]interface{}),
},
}
}
// NewSession returns a challenge with a new session ID and remembers the session ID
func (u *UserInteractive) NewSession() *util.JSONResponse {
sessionID, err := GenerateAccessToken()
if err != nil {
logrus.WithError(err).Error("failed to generate session ID")
return &util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
}
}
u.Lock()
u.Sessions[sessionID] = []string{}
u.Unlock()
return u.challenge(sessionID)
}
// ResponseWithChallenge mixes together a JSON body (e.g an error with errcode/message) with the
// standard challenge response.
func (u *UserInteractive) ResponseWithChallenge(sessionID string, response interface{}) *util.JSONResponse {
mixedObjects := make(map[string]interface{})
b, err := json.Marshal(response)
if err != nil {
return &util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
}
}
_ = json.Unmarshal(b, &mixedObjects)
challenge := u.challenge(sessionID)
b, err = json.Marshal(challenge.JSON)
if err != nil {
return &util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
}
}
_ = json.Unmarshal(b, &mixedObjects)
return &util.JSONResponse{
Code: 401,
JSON: mixedObjects,
}
}
// Verify returns an error/challenge response to send to the client, or nil if the user is authenticated.
// `bodyBytes` is the HTTP request body which must contain an `auth` key.
// Returns the login that was verified for additional checks if required.
func (u *UserInteractive) Verify(ctx context.Context, bodyBytes []byte, device *api.Device) (*Login, *util.JSONResponse) {
// TODO: rate limit
// "A client should first make a request with no auth parameter. The homeserver returns an HTTP 401 response, with a JSON body"
// https://matrix.org/docs/spec/client_server/r0.6.1#user-interactive-api-in-the-rest-api
hasResponse := gjson.GetBytes(bodyBytes, "auth").Exists()
if !hasResponse {
return nil, u.NewSession()
}
// extract the type so we know which login type to use
authType := gjson.GetBytes(bodyBytes, "auth.type").Str
u.RLock()
loginType, ok := u.Types[authType]
u.RUnlock()
if !ok {
return nil, &util.JSONResponse{
Code: http.StatusBadRequest,
JSON: spec.BadJSON("Unknown auth.type: " + authType),
}
}
// retrieve the session
sessionID := gjson.GetBytes(bodyBytes, "auth.session").Str
u.RLock()
_, ok = u.Sessions[sessionID]
u.RUnlock()
if !ok {
// if the login type is part of a single stage flow then allow them to omit the session ID
if !u.IsSingleStageFlow(authType) {
return nil, &util.JSONResponse{
Code: http.StatusBadRequest,
JSON: spec.Unknown("The auth.session is missing or unknown."),
}
}
}
login, cleanup, resErr := loginType.LoginFromJSON(ctx, []byte(gjson.GetBytes(bodyBytes, "auth").Raw))
if resErr != nil {
return nil, u.ResponseWithChallenge(sessionID, resErr.JSON)
}
u.AddCompletedStage(sessionID, authType)
cleanup(ctx, nil)
// TODO: Check if there's more stages to go and return an error
return login, nil
}

View file

@ -1,235 +0,0 @@
package auth
import (
"context"
"encoding/json"
"fmt"
"testing"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib/fclient"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/util"
)
var (
ctx = context.Background()
serverName = spec.ServerName("example.com")
// space separated localpart+password -> account
lookup = make(map[string]*api.Account)
device = &api.Device{
AccessToken: "flibble",
DisplayName: "My Device",
ID: "device_id_goes_here",
}
)
type fakeAccountDatabase struct{}
func (d *fakeAccountDatabase) PerformPasswordUpdate(ctx context.Context, req *api.PerformPasswordUpdateRequest, res *api.PerformPasswordUpdateResponse) error {
return nil
}
func (d *fakeAccountDatabase) PerformAccountDeactivation(ctx context.Context, req *api.PerformAccountDeactivationRequest, res *api.PerformAccountDeactivationResponse) error {
return nil
}
func (d *fakeAccountDatabase) QueryAccountByPassword(ctx context.Context, req *api.QueryAccountByPasswordRequest, res *api.QueryAccountByPasswordResponse) error {
acc, ok := lookup[req.Localpart+" "+req.PlaintextPassword]
if !ok {
return fmt.Errorf("unknown user/password")
}
res.Account = acc
res.Exists = true
return nil
}
func (d *fakeAccountDatabase) QueryAccountByLocalpart(ctx context.Context, req *api.QueryAccountByLocalpartRequest, res *api.QueryAccountByLocalpartResponse) error {
return nil
}
func (d *fakeAccountDatabase) PerformAccountCreation(ctx context.Context, req *api.PerformAccountCreationRequest, res *api.PerformAccountCreationResponse) error {
return nil
}
func setup() *UserInteractive {
cfg := &config.ClientAPI{
Matrix: &config.Global{
SigningIdentity: fclient.SigningIdentity{
ServerName: serverName,
},
},
}
return NewUserInteractive(&fakeAccountDatabase{}, cfg)
}
func TestUserInteractiveChallenge(t *testing.T) {
uia := setup()
// no auth key results in a challenge
_, errRes := uia.Verify(ctx, []byte(`{}`), device)
if errRes == nil {
t.Fatalf("Verify succeeded with {} but expected failure")
}
if errRes.Code != 401 {
t.Errorf("Expected HTTP 401, got %d", errRes.Code)
}
}
func TestUserInteractivePasswordLogin(t *testing.T) {
uia := setup()
// valid password login succeeds when an account exists
lookup["alice herpassword"] = &api.Account{
Localpart: "alice",
ServerName: serverName,
UserID: fmt.Sprintf("@alice:%s", serverName),
}
// valid password requests
testCases := []json.RawMessage{
// deprecated form
[]byte(`{
"auth": {
"type": "m.login.password",
"user": "alice",
"password": "herpassword"
}
}`),
// new form
[]byte(`{
"auth": {
"type": "m.login.password",
"identifier": {
"type": "m.id.user",
"user": "alice"
},
"password": "herpassword"
}
}`),
}
for _, tc := range testCases {
_, errRes := uia.Verify(ctx, tc, device)
if errRes != nil {
t.Errorf("Verify failed but expected success for request: %s - got %+v", string(tc), errRes)
}
}
}
func TestUserInteractivePasswordBadLogin(t *testing.T) {
uia := setup()
// password login fails when an account exists but is specced wrong
lookup["bob hispassword"] = &api.Account{
Localpart: "bob",
ServerName: serverName,
UserID: fmt.Sprintf("@bob:%s", serverName),
}
// invalid password requests
testCases := []struct {
body json.RawMessage
wantRes util.JSONResponse
}{
{
// fields not in an auth dict
body: []byte(`{
"type": "m.login.password",
"user": "bob",
"password": "hispassword"
}`),
wantRes: util.JSONResponse{
Code: 401,
},
},
{
// wrong type
body: []byte(`{
"auth": {
"type": "m.login.not_password",
"identifier": {
"type": "m.id.user",
"user": "bob"
},
"password": "hispassword"
}
}`),
wantRes: util.JSONResponse{
Code: 400,
},
},
{
// identifier type is wrong
body: []byte(`{
"auth": {
"type": "m.login.password",
"identifier": {
"type": "m.id.thirdparty",
"user": "bob"
},
"password": "hispassword"
}
}`),
wantRes: util.JSONResponse{
Code: 401,
},
},
{
// wrong password
body: []byte(`{
"auth": {
"type": "m.login.password",
"identifier": {
"type": "m.id.user",
"user": "bob"
},
"password": "not_his_password"
}
}`),
wantRes: util.JSONResponse{
Code: 401,
},
},
}
for _, tc := range testCases {
_, errRes := uia.Verify(ctx, tc.body, device)
if errRes == nil {
t.Errorf("Verify succeeded but expected failure for request: %s", string(tc.body))
continue
}
if errRes.Code != tc.wantRes.Code {
t.Errorf("got code %d want code %d for request: %s", errRes.Code, tc.wantRes.Code, string(tc.body))
}
}
}
func TestUserInteractive_AddCompletedStage(t *testing.T) {
tests := []struct {
name string
sessionID string
}{
{
name: "first user",
sessionID: util.RandomString(8),
},
{
name: "second user",
sessionID: util.RandomString(8),
},
{
name: "third user",
sessionID: util.RandomString(8),
},
}
u := setup()
ctx := context.Background()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, resp := u.Verify(ctx, []byte("{}"), nil)
challenge, ok := resp.JSON.(Challenge)
if !ok {
t.Fatalf("expected a Challenge, got %T", resp.JSON)
}
if len(challenge.Completed) > 0 {
t.Fatalf("expected 0 completed stages, got %d", len(challenge.Completed))
}
u.AddCompletedStage(tt.sessionID, "")
})
}
}

View file

@ -15,54 +15,60 @@
package clientapi
import (
"github.com/matrix-org/dendrite/internal/httputil"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/dendrite/setup/process"
userapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib/fclient"
appserviceAPI "github.com/matrix-org/dendrite/appservice/api"
"github.com/matrix-org/dendrite/clientapi/api"
"github.com/matrix-org/dendrite/clientapi/auth/storage/accounts"
"github.com/matrix-org/dendrite/clientapi/auth/storage/devices"
"github.com/matrix-org/dendrite/clientapi/consumers"
"github.com/matrix-org/dendrite/clientapi/producers"
"github.com/matrix-org/dendrite/clientapi/routing"
federationAPI "github.com/matrix-org/dendrite/federationapi/api"
"github.com/matrix-org/dendrite/internal/transactions"
"github.com/matrix-org/dendrite/common/basecomponent"
"github.com/matrix-org/dendrite/common/transactions"
federationSenderAPI "github.com/matrix-org/dendrite/federationsender/api"
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/setup/jetstream"
typingServerAPI "github.com/matrix-org/dendrite/typingserver/api"
"github.com/matrix-org/gomatrixserverlib"
"github.com/sirupsen/logrus"
)
// AddPublicRoutes sets up and registers HTTP handlers for the ClientAPI component.
func AddPublicRoutes(
processContext *process.ProcessContext,
routers httputil.Routers,
cfg *config.Dendrite,
natsInstance *jetstream.NATSInstance,
federation fclient.FederationClient,
rsAPI roomserverAPI.ClientRoomserverAPI,
asAPI appserviceAPI.AppServiceInternalAPI,
// SetupClientAPIComponent sets up and registers HTTP handlers for the ClientAPI
// component.
func SetupClientAPIComponent(
base *basecomponent.BaseDendrite,
deviceDB *devices.Database,
accountsDB *accounts.Database,
federation *gomatrixserverlib.FederationClient,
keyRing *gomatrixserverlib.KeyRing,
aliasAPI roomserverAPI.RoomserverAliasAPI,
inputAPI roomserverAPI.RoomserverInputAPI,
queryAPI roomserverAPI.RoomserverQueryAPI,
typingInputAPI typingServerAPI.TypingServerInputAPI,
asAPI appserviceAPI.AppServiceQueryAPI,
transactionsCache *transactions.Cache,
fsAPI federationAPI.ClientFederationAPI,
userAPI userapi.ClientUserAPI,
userDirectoryProvider userapi.QuerySearchProfilesAPI,
extRoomsProvider api.ExtraPublicRoomsProvider, enableMetrics bool,
fedSenderAPI federationSenderAPI.FederationSenderQueryAPI,
) {
js, natsClient := natsInstance.Prepare(processContext, &cfg.Global.JetStream)
roomserverProducer := producers.NewRoomserverProducer(inputAPI)
typingProducer := producers.NewTypingServerProducer(typingInputAPI)
userUpdateProducer := &producers.UserUpdateProducer{
Producer: base.KafkaProducer,
Topic: string(base.Cfg.Kafka.Topics.UserUpdates),
}
syncProducer := &producers.SyncAPIProducer{
JetStream: js,
TopicReceiptEvent: cfg.Global.JetStream.Prefixed(jetstream.OutputReceiptEvent),
TopicSendToDeviceEvent: cfg.Global.JetStream.Prefixed(jetstream.OutputSendToDeviceEvent),
TopicTypingEvent: cfg.Global.JetStream.Prefixed(jetstream.OutputTypingEvent),
TopicPresenceEvent: cfg.Global.JetStream.Prefixed(jetstream.OutputPresenceEvent),
UserAPI: userAPI,
ServerName: cfg.Global.ServerName,
Producer: base.KafkaProducer,
Topic: string(base.Cfg.Kafka.Topics.OutputClientData),
}
consumer := consumers.NewOutputRoomEventConsumer(
base.Cfg, base.KafkaConsumer, accountsDB, queryAPI,
)
if err := consumer.Start(); err != nil {
logrus.WithError(err).Panicf("failed to start room server consumer")
}
routing.Setup(
routers,
cfg, rsAPI, asAPI,
userAPI, userDirectoryProvider, federation,
syncProducer, transactionsCache, fsAPI,
extRoomsProvider, natsClient, enableMetrics,
base.APIMux, *base.Cfg, roomserverProducer, queryAPI, aliasAPI, asAPI,
accountsDB, deviceDB, federation, *keyRing, userUpdateProducer,
syncProducer, typingProducer, transactionsCache, fedSenderAPI,
)
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,144 @@
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package consumers
import (
"context"
"encoding/json"
"github.com/matrix-org/dendrite/clientapi/auth/storage/accounts"
"github.com/matrix-org/dendrite/common"
"github.com/matrix-org/dendrite/common/config"
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/gomatrixserverlib"
log "github.com/sirupsen/logrus"
sarama "gopkg.in/Shopify/sarama.v1"
)
// OutputRoomEventConsumer consumes events that originated in the room server.
type OutputRoomEventConsumer struct {
roomServerConsumer *common.ContinualConsumer
db *accounts.Database
query api.RoomserverQueryAPI
serverName string
}
// NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call Start() to begin consuming from room servers.
func NewOutputRoomEventConsumer(
cfg *config.Dendrite,
kafkaConsumer sarama.Consumer,
store *accounts.Database,
queryAPI api.RoomserverQueryAPI,
) *OutputRoomEventConsumer {
consumer := common.ContinualConsumer{
Topic: string(cfg.Kafka.Topics.OutputRoomEvent),
Consumer: kafkaConsumer,
PartitionStore: store,
}
s := &OutputRoomEventConsumer{
roomServerConsumer: &consumer,
db: store,
query: queryAPI,
serverName: string(cfg.Matrix.ServerName),
}
consumer.ProcessMessage = s.onMessage
return s
}
// Start consuming from room servers
func (s *OutputRoomEventConsumer) Start() error {
return s.roomServerConsumer.Start()
}
// onMessage is called when the sync server receives a new event from the room server output log.
// It is not safe for this function to be called from multiple goroutines, or else the
// sync stream position may race and be incorrectly calculated.
func (s *OutputRoomEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
// Parse out the event JSON
var output api.OutputEvent
if err := json.Unmarshal(msg.Value, &output); err != nil {
// If the message was invalid, log it and move on to the next message in the stream
log.WithError(err).Errorf("roomserver output log: message parse failure")
return nil
}
if output.Type != api.OutputTypeNewRoomEvent {
log.WithField("type", output.Type).Debug(
"roomserver output log: ignoring unknown output type",
)
return nil
}
ev := output.NewRoomEvent.Event
log.WithFields(log.Fields{
"event_id": ev.EventID(),
"room_id": ev.RoomID(),
"type": ev.Type(),
}).Info("received event from roomserver")
events, err := s.lookupStateEvents(output.NewRoomEvent.AddsStateEventIDs, ev)
if err != nil {
return err
}
return s.db.UpdateMemberships(context.TODO(), events, output.NewRoomEvent.RemovesStateEventIDs)
}
// lookupStateEvents looks up the state events that are added by a new event.
func (s *OutputRoomEventConsumer) lookupStateEvents(
addsStateEventIDs []string, event gomatrixserverlib.Event,
) ([]gomatrixserverlib.Event, error) {
// Fast path if there aren't any new state events.
if len(addsStateEventIDs) == 0 {
// If the event is a membership update (e.g. for a profile update), it won't
// show up in AddsStateEventIDs, so we need to add it manually
if event.Type() == "m.room.member" {
return []gomatrixserverlib.Event{event}, nil
}
return nil, nil
}
// Fast path if the only state event added is the event itself.
if len(addsStateEventIDs) == 1 && addsStateEventIDs[0] == event.EventID() {
return []gomatrixserverlib.Event{event}, nil
}
result := []gomatrixserverlib.Event{}
missing := []string{}
for _, id := range addsStateEventIDs {
// Append the current event in the results if its ID is in the events list
if id == event.EventID() {
result = append(result, event)
} else {
// If the event isn't the current one, add it to the list of events
// to retrieve from the roomserver
missing = append(missing, id)
}
}
// Request the missing events from the roomserver
eventReq := api.QueryEventsByIDRequest{EventIDs: missing}
var eventResp api.QueryEventsByIDResponse
if err := s.query.QueryEventsByID(context.TODO(), &eventReq, &eventResp); err != nil {
return nil, err
}
result = append(result, eventResp.Events...)
return result, nil
}

View file

@ -16,47 +16,31 @@ package httputil
import (
"encoding/json"
"io"
"net/http"
"unicode/utf8"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/dendrite/clientapi/jsonerror"
"github.com/matrix-org/util"
)
// UnmarshalJSONRequest into the given interface pointer. Returns an error JSON response if
// there was a problem unmarshalling. Calling this function consumes the request body.
func UnmarshalJSONRequest(req *http.Request, iface interface{}) *util.JSONResponse {
// encoding/json allows invalid utf-8, matrix does not
// https://matrix.org/docs/spec/client_server/r0.6.1#api-standards
body, err := io.ReadAll(req.Body)
if err != nil {
util.GetLogger(req.Context()).WithError(err).Error("io.ReadAll failed")
return &util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
}
}
return UnmarshalJSON(body, iface)
}
func UnmarshalJSON(body []byte, iface interface{}) *util.JSONResponse {
if !utf8.Valid(body) {
return &util.JSONResponse{
Code: http.StatusBadRequest,
JSON: spec.NotJSON("Body contains invalid UTF-8"),
}
}
if err := json.Unmarshal(body, iface); err != nil {
if err := json.NewDecoder(req.Body).Decode(iface); err != nil {
// TODO: We may want to suppress the Error() return in production? It's useful when
// debugging because an error will be produced for both invalid/malformed JSON AND
// valid JSON with incorrect types for values.
return &util.JSONResponse{
Code: http.StatusBadRequest,
JSON: spec.BadJSON("The request body could not be decoded into valid JSON. " + err.Error()),
JSON: jsonerror.BadJSON("The request body could not be decoded into valid JSON. " + err.Error()),
}
}
return nil
}
// LogThenError logs the given error then returns a matrix-compliant 500 internal server error response.
// This should be used to log fatal errors which require investigation. It should not be used
// to log client validation errors, etc.
func LogThenError(req *http.Request, err error) util.JSONResponse {
util.GetLogger(req.Context()).WithError(err).Error("request failed")
return jsonerror.InternalServerError()
}

View file

@ -32,7 +32,7 @@ func ParseTSParam(req *http.Request) (time.Time, error) {
// The parameter exists, parse into a Time object
ts, err := strconv.ParseInt(tsStr, 10, 64)
if err != nil {
return time.Time{}, fmt.Errorf("param 'ts' is no valid int (%s)", err.Error())
return time.Time{}, fmt.Errorf("Param 'ts' is no valid int (%s)", err.Error())
}
return time.Unix(ts/1000, 0), nil

View file

@ -0,0 +1,148 @@
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonerror
import (
"fmt"
"net/http"
"github.com/matrix-org/util"
)
// MatrixError represents the "standard error response" in Matrix.
// http://matrix.org/docs/spec/client_server/r0.2.0.html#api-standards
type MatrixError struct {
ErrCode string `json:"errcode"`
Err string `json:"error"`
}
func (e MatrixError) Error() string {
return fmt.Sprintf("%s: %s", e.ErrCode, e.Err)
}
// InternalServerError returns a 500 Internal Server Error in a matrix-compliant
// format.
func InternalServerError() util.JSONResponse {
return util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: Unknown("Internal Server Error"),
}
}
// Unknown is an unexpected error
func Unknown(msg string) *MatrixError {
return &MatrixError{"M_UNKNOWN", msg}
}
// Forbidden is an error when the client tries to access a resource
// they are not allowed to access.
func Forbidden(msg string) *MatrixError {
return &MatrixError{"M_FORBIDDEN", msg}
}
// BadJSON is an error when the client supplies malformed JSON.
func BadJSON(msg string) *MatrixError {
return &MatrixError{"M_BAD_JSON", msg}
}
// NotJSON is an error when the client supplies something that is not JSON
// to a JSON endpoint.
func NotJSON(msg string) *MatrixError {
return &MatrixError{"M_NOT_JSON", msg}
}
// NotFound is an error when the client tries to access an unknown resource.
func NotFound(msg string) *MatrixError {
return &MatrixError{"M_NOT_FOUND", msg}
}
// MissingArgument is an error when the client tries to access a resource
// without providing an argument that is required.
func MissingArgument(msg string) *MatrixError {
return &MatrixError{"M_MISSING_ARGUMENT", msg}
}
// InvalidArgumentValue is an error when the client tries to provide an
// invalid value for a valid argument
func InvalidArgumentValue(msg string) *MatrixError {
return &MatrixError{"M_INVALID_ARGUMENT_VALUE", msg}
}
// MissingToken is an error when the client tries to access a resource which
// requires authentication without supplying credentials.
func MissingToken(msg string) *MatrixError {
return &MatrixError{"M_MISSING_TOKEN", msg}
}
// UnknownToken is an error when the client tries to access a resource which
// requires authentication and supplies an unrecognised token
func UnknownToken(msg string) *MatrixError {
return &MatrixError{"M_UNKNOWN_TOKEN", msg}
}
// WeakPassword is an error which is returned when the client tries to register
// using a weak password. http://matrix.org/docs/spec/client_server/r0.2.0.html#password-based
func WeakPassword(msg string) *MatrixError {
return &MatrixError{"M_WEAK_PASSWORD", msg}
}
// InvalidUsername is an error returned when the client tries to register an
// invalid username
func InvalidUsername(msg string) *MatrixError {
return &MatrixError{"M_INVALID_USERNAME", msg}
}
// UserInUse is an error returned when the client tries to register an
// username that already exists
func UserInUse(msg string) *MatrixError {
return &MatrixError{"M_USER_IN_USE", msg}
}
// ASExclusive is an error returned when an application service tries to
// register an username that is outside of its registered namespace, or if a
// user attempts to register a username or room alias within an exclusive
// namespace.
func ASExclusive(msg string) *MatrixError {
return &MatrixError{"M_EXCLUSIVE", msg}
}
// GuestAccessForbidden is an error which is returned when the client is
// forbidden from accessing a resource as a guest.
func GuestAccessForbidden(msg string) *MatrixError {
return &MatrixError{"M_GUEST_ACCESS_FORBIDDEN", msg}
}
// LimitExceededError is a rate-limiting error.
type LimitExceededError struct {
MatrixError
RetryAfterMS int64 `json:"retry_after_ms,omitempty"`
}
// LimitExceeded is an error when the client tries to send events too quickly.
func LimitExceeded(msg string, retryAfterMS int64) *LimitExceededError {
return &LimitExceededError{
MatrixError: MatrixError{"M_LIMIT_EXCEEDED", msg},
RetryAfterMS: retryAfterMS,
}
}
// NotTrusted is an error which is returned when the client asks the server to
// proxy a request (e.g. 3PID association) to a server that isn't trusted
func NotTrusted(serverName string) *MatrixError {
return &MatrixError{
ErrCode: "M_SERVER_NOT_TRUSTED",
Err: fmt.Sprintf("Untrusted server '%s'", serverName),
}
}

View file

@ -0,0 +1,44 @@
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonerror
import (
"encoding/json"
"testing"
)
func TestLimitExceeded(t *testing.T) {
e := LimitExceeded("too fast", 5000)
jsonBytes, err := json.Marshal(&e)
if err != nil {
t.Fatalf("TestLimitExceeded: Failed to marshal LimitExceeded error. %s", err.Error())
}
want := `{"errcode":"M_LIMIT_EXCEEDED","error":"too fast","retry_after_ms":5000}`
if string(jsonBytes) != want {
t.Errorf("TestLimitExceeded: want %s, got %s", want, string(jsonBytes))
}
}
func TestForbidden(t *testing.T) {
e := Forbidden("you shall not pass")
jsonBytes, err := json.Marshal(&e)
if err != nil {
t.Fatalf("TestForbidden: Failed to marshal Forbidden error. %s", err.Error())
}
want := `{"errcode":"M_FORBIDDEN","error":"you shall not pass"}`
if string(jsonBytes) != want {
t.Errorf("TestForbidden: want %s, got %s", want, string(jsonBytes))
}
}

View file

@ -0,0 +1,112 @@
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package producers
import (
"context"
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/gomatrixserverlib"
)
// RoomserverProducer produces events for the roomserver to consume.
type RoomserverProducer struct {
InputAPI api.RoomserverInputAPI
}
// NewRoomserverProducer creates a new RoomserverProducer
func NewRoomserverProducer(inputAPI api.RoomserverInputAPI) *RoomserverProducer {
return &RoomserverProducer{
InputAPI: inputAPI,
}
}
// SendEvents writes the given events to the roomserver input log. The events are written with KindNew.
func (c *RoomserverProducer) SendEvents(
ctx context.Context, events []gomatrixserverlib.Event, sendAsServer gomatrixserverlib.ServerName,
txnID *api.TransactionID,
) (string, error) {
ires := make([]api.InputRoomEvent, len(events))
for i, event := range events {
ires[i] = api.InputRoomEvent{
Kind: api.KindNew,
Event: event,
AuthEventIDs: event.AuthEventIDs(),
SendAsServer: string(sendAsServer),
TransactionID: txnID,
}
}
return c.SendInputRoomEvents(ctx, ires)
}
// SendEventWithState writes an event with KindNew to the roomserver input log
// with the state at the event as KindOutlier before it.
func (c *RoomserverProducer) SendEventWithState(
ctx context.Context, state gomatrixserverlib.RespState, event gomatrixserverlib.Event,
) error {
outliers, err := state.Events()
if err != nil {
return err
}
ires := make([]api.InputRoomEvent, len(outliers)+1)
for i, outlier := range outliers {
ires[i] = api.InputRoomEvent{
Kind: api.KindOutlier,
Event: outlier,
AuthEventIDs: outlier.AuthEventIDs(),
}
}
stateEventIDs := make([]string, len(state.StateEvents))
for i := range state.StateEvents {
stateEventIDs[i] = state.StateEvents[i].EventID()
}
ires[len(outliers)] = api.InputRoomEvent{
Kind: api.KindNew,
Event: event,
AuthEventIDs: event.AuthEventIDs(),
HasState: true,
StateEventIDs: stateEventIDs,
}
_, err = c.SendInputRoomEvents(ctx, ires)
return err
}
// SendInputRoomEvents writes the given input room events to the roomserver input API.
func (c *RoomserverProducer) SendInputRoomEvents(
ctx context.Context, ires []api.InputRoomEvent,
) (eventID string, err error) {
request := api.InputRoomEventsRequest{InputRoomEvents: ires}
var response api.InputRoomEventsResponse
err = c.InputAPI.InputRoomEvents(ctx, &request, &response)
eventID = response.EventID
return
}
// SendInvite writes the invite event to the roomserver input API.
// This should only be needed for invite events that occur outside of a known room.
// If we are in the room then the event should be sent using the SendEvents method.
func (c *RoomserverProducer) SendInvite(
ctx context.Context, inviteEvent gomatrixserverlib.Event,
) error {
request := api.InputRoomEventsRequest{
InputInviteEvents: []api.InputInviteEvent{{Event: inviteEvent}},
}
var response api.InputRoomEventsResponse
return c.InputAPI.InputRoomEvents(ctx, &request, &response)
}

View file

@ -15,148 +15,36 @@
package producers
import (
"context"
"encoding/json"
"fmt"
"strconv"
"time"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/nats-io/nats.go"
log "github.com/sirupsen/logrus"
"github.com/matrix-org/dendrite/common"
"github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/syncapi/types"
userapi "github.com/matrix-org/dendrite/userapi/api"
sarama "gopkg.in/Shopify/sarama.v1"
)
// SyncAPIProducer produces events for the sync API server to consume
type SyncAPIProducer struct {
TopicReceiptEvent string
TopicSendToDeviceEvent string
TopicTypingEvent string
TopicPresenceEvent string
JetStream nats.JetStreamContext
ServerName spec.ServerName
UserAPI userapi.ClientUserAPI
Topic string
Producer sarama.SyncProducer
}
func (p *SyncAPIProducer) SendReceipt(
ctx context.Context,
userID, roomID, eventID, receiptType string, timestamp spec.Timestamp,
) error {
m := &nats.Msg{
Subject: p.TopicReceiptEvent,
Header: nats.Header{},
// SendData sends account data to the sync API server
func (p *SyncAPIProducer) SendData(userID string, roomID string, dataType string) error {
var m sarama.ProducerMessage
data := common.AccountData{
RoomID: roomID,
Type: dataType,
}
m.Header.Set(jetstream.UserID, userID)
m.Header.Set(jetstream.RoomID, roomID)
m.Header.Set(jetstream.EventID, eventID)
m.Header.Set("type", receiptType)
m.Header.Set("timestamp", fmt.Sprintf("%d", timestamp))
log.WithFields(log.Fields{}).Tracef("Producing to topic '%s'", p.TopicReceiptEvent)
_, err := p.JetStream.PublishMsg(m, nats.Context(ctx))
return err
}
func (p *SyncAPIProducer) SendToDevice(
ctx context.Context, sender, userID, deviceID, eventType string,
message json.RawMessage,
) error {
devices := []string{}
_, domain, err := gomatrixserverlib.SplitID('@', userID)
value, err := json.Marshal(data)
if err != nil {
return err
}
// If the event is targeted locally then we want to expand the wildcard
// out into individual device IDs so that we can send them to each respective
// device. If the event isn't targeted locally then we can't expand the
// wildcard as we don't know about the remote devices, so instead we leave it
// as-is, so that the federation sender can send it on with the wildcard intact.
if domain == p.ServerName && deviceID == "*" {
var res userapi.QueryDevicesResponse
err = p.UserAPI.QueryDevices(context.TODO(), &userapi.QueryDevicesRequest{
UserID: userID,
}, &res)
if err != nil {
return err
}
for _, dev := range res.Devices {
devices = append(devices, dev.ID)
}
} else {
devices = append(devices, deviceID)
}
m.Topic = string(p.Topic)
m.Key = sarama.StringEncoder(userID)
m.Value = sarama.ByteEncoder(value)
log.WithFields(log.Fields{
"user_id": userID,
"num_devices": len(devices),
"type": eventType,
}).Tracef("Producing to topic '%s'", p.TopicSendToDeviceEvent)
for i, device := range devices {
ote := &types.OutputSendToDeviceEvent{
UserID: userID,
DeviceID: device,
SendToDeviceEvent: gomatrixserverlib.SendToDeviceEvent{
Sender: sender,
Type: eventType,
Content: message,
},
}
eventJSON, err := json.Marshal(ote)
if err != nil {
log.WithError(err).Error("sendToDevice failed json.Marshal")
return err
}
m := nats.NewMsg(p.TopicSendToDeviceEvent)
m.Data = eventJSON
m.Header.Set("sender", sender)
m.Header.Set(jetstream.UserID, userID)
if _, err = p.JetStream.PublishMsg(m, nats.Context(ctx)); err != nil {
if i < len(devices)-1 {
log.WithError(err).Warn("sendToDevice failed to PublishMsg, trying further devices")
continue
}
log.WithError(err).Error("sendToDevice failed to PublishMsg for all devices")
return err
}
}
return nil
}
func (p *SyncAPIProducer) SendTyping(
ctx context.Context, userID, roomID string, typing bool, timeoutMS int64,
) error {
m := &nats.Msg{
Subject: p.TopicTypingEvent,
Header: nats.Header{},
}
m.Header.Set(jetstream.UserID, userID)
m.Header.Set(jetstream.RoomID, roomID)
m.Header.Set("typing", strconv.FormatBool(typing))
m.Header.Set("timeout_ms", strconv.Itoa(int(timeoutMS)))
_, err := p.JetStream.PublishMsg(m, nats.Context(ctx))
return err
}
func (p *SyncAPIProducer) SendPresence(
ctx context.Context, userID string, presence types.Presence, statusMsg *string,
) error {
m := nats.NewMsg(p.TopicPresenceEvent)
m.Header.Set(jetstream.UserID, userID)
m.Header.Set("presence", presence.String())
if statusMsg != nil {
m.Header.Set("status_msg", *statusMsg)
}
m.Header.Set("last_active_ts", strconv.Itoa(int(spec.AsTimestamp(time.Now()))))
_, err := p.JetStream.PublishMsg(m, nats.Context(ctx))
_, _, err = p.Producer.SendMessage(&m)
return err
}

View file

@ -0,0 +1,54 @@
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package producers
import (
"context"
"time"
"github.com/matrix-org/dendrite/typingserver/api"
"github.com/matrix-org/gomatrixserverlib"
)
// TypingServerProducer produces events for the typing server to consume
type TypingServerProducer struct {
InputAPI api.TypingServerInputAPI
}
// NewTypingServerProducer creates a new TypingServerProducer
func NewTypingServerProducer(inputAPI api.TypingServerInputAPI) *TypingServerProducer {
return &TypingServerProducer{
InputAPI: inputAPI,
}
}
// Send typing event to typing server
func (p *TypingServerProducer) Send(
ctx context.Context, userID, roomID string,
typing bool, timeout int64,
) error {
requestData := api.InputTypingEvent{
UserID: userID,
RoomID: roomID,
Typing: typing,
Timeout: timeout,
OriginServerTS: gomatrixserverlib.AsTimestamp(time.Now()),
}
var response api.InputTypingEventResponse
err := p.InputAPI.InputTypingEvent(
ctx, &api.InputTypingEventRequest{InputTypingEvent: requestData}, &response,
)
return err
}

View file

@ -0,0 +1,62 @@
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package producers
import (
"encoding/json"
sarama "gopkg.in/Shopify/sarama.v1"
)
// UserUpdateProducer produces events related to user updates.
type UserUpdateProducer struct {
Topic string
Producer sarama.SyncProducer
}
// TODO: Move this struct to `common` so the components that consume the topic
// can use it when parsing incoming messages
type profileUpdate struct {
Updated string `json:"updated"` // Which attribute is updated (can be either `avatar_url` or `displayname`)
OldValue string `json:"old_value"` // The attribute's value before the update
NewValue string `json:"new_value"` // The attribute's value after the update
}
// SendUpdate sends an update using kafka to notify the roomserver of the
// profile update. Returns an error if the update failed to send.
func (p *UserUpdateProducer) SendUpdate(
userID string, updatedAttribute string, oldValue string, newValue string,
) error {
var update profileUpdate
var m sarama.ProducerMessage
m.Topic = string(p.Topic)
m.Key = sarama.StringEncoder(userID)
update = profileUpdate{
Updated: updatedAttribute,
OldValue: oldValue,
NewValue: newValue,
}
value, err := json.Marshal(update)
if err != nil {
return err
}
m.Value = sarama.ByteEncoder(value)
_, _, err = p.Producer.SendMessage(&m)
return err
}

View file

@ -15,184 +15,51 @@
package routing
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
"github.com/matrix-org/dendrite/clientapi/auth/storage/accounts"
"github.com/matrix-org/dendrite/clientapi/httputil"
"github.com/matrix-org/dendrite/clientapi/jsonerror"
"github.com/matrix-org/dendrite/clientapi/producers"
"github.com/matrix-org/dendrite/internal/eventutil"
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib/spec"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
)
// GetAccountData implements GET /user/{userId}/[rooms/{roomid}/]account_data/{type}
func GetAccountData(
req *http.Request, userAPI api.ClientUserAPI, device *api.Device,
userID string, roomID string, dataType string,
) util.JSONResponse {
if userID != device.UserID {
return util.JSONResponse{
Code: http.StatusForbidden,
JSON: spec.Forbidden("userID does not match the current user"),
}
}
dataReq := api.QueryAccountDataRequest{
UserID: userID,
DataType: dataType,
RoomID: roomID,
}
dataRes := api.QueryAccountDataResponse{}
if err := userAPI.QueryAccountData(req.Context(), &dataReq, &dataRes); err != nil {
util.GetLogger(req.Context()).WithError(err).Error("userAPI.QueryAccountData failed")
return util.ErrorResponse(fmt.Errorf("userAPI.QueryAccountData: %w", err))
}
var data json.RawMessage
var ok bool
if roomID != "" {
data, ok = dataRes.RoomAccountData[roomID][dataType]
} else {
data, ok = dataRes.GlobalAccountData[dataType]
}
if ok {
return util.JSONResponse{
Code: http.StatusOK,
JSON: data,
}
}
return util.JSONResponse{
Code: http.StatusNotFound,
JSON: spec.NotFound("data not found"),
}
}
// SaveAccountData implements PUT /user/{userId}/[rooms/{roomId}/]account_data/{type}
func SaveAccountData(
req *http.Request, userAPI api.ClientUserAPI, device *api.Device,
req *http.Request, accountDB *accounts.Database, device *authtypes.Device,
userID string, roomID string, dataType string, syncProducer *producers.SyncAPIProducer,
) util.JSONResponse {
if userID != device.UserID {
return util.JSONResponse{
Code: http.StatusForbidden,
JSON: spec.Forbidden("userID does not match the current user"),
JSON: jsonerror.Forbidden("userID does not match the current user"),
}
}
localpart, _, err := gomatrixserverlib.SplitID('@', userID)
if err != nil {
return httputil.LogThenError(req, err)
}
defer req.Body.Close() // nolint: errcheck
if req.Body == http.NoBody {
return util.JSONResponse{
Code: http.StatusBadRequest,
JSON: spec.NotJSON("Content not JSON"),
}
}
if dataType == "m.fully_read" || dataType == "m.push_rules" {
return util.JSONResponse{
Code: http.StatusForbidden,
JSON: spec.Forbidden(fmt.Sprintf("Unable to modify %q using this API", dataType)),
}
}
body, err := io.ReadAll(req.Body)
body, err := ioutil.ReadAll(req.Body)
if err != nil {
util.GetLogger(req.Context()).WithError(err).Error("io.ReadAll failed")
return util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
}
return httputil.LogThenError(req, err)
}
if !json.Valid(body) {
return util.JSONResponse{
Code: http.StatusBadRequest,
JSON: spec.BadJSON("Bad JSON content"),
}
if err := accountDB.SaveAccountData(
req.Context(), localpart, roomID, dataType, string(body),
); err != nil {
return httputil.LogThenError(req, err)
}
dataReq := api.InputAccountDataRequest{
UserID: userID,
DataType: dataType,
RoomID: roomID,
AccountData: json.RawMessage(body),
}
dataRes := api.InputAccountDataResponse{}
if err := userAPI.InputAccountData(req.Context(), &dataReq, &dataRes); err != nil {
util.GetLogger(req.Context()).WithError(err).Error("userAPI.InputAccountData failed")
return util.ErrorResponse(err)
}
return util.JSONResponse{
Code: http.StatusOK,
JSON: struct{}{},
}
}
type fullyReadEvent struct {
EventID string `json:"event_id"`
}
// SaveReadMarker implements POST /rooms/{roomId}/read_markers
func SaveReadMarker(
req *http.Request,
userAPI api.ClientUserAPI, rsAPI roomserverAPI.ClientRoomserverAPI,
syncProducer *producers.SyncAPIProducer, device *api.Device, roomID string,
) util.JSONResponse {
deviceUserID, err := spec.NewUserID(device.UserID, true)
if err != nil {
return util.JSONResponse{
Code: http.StatusBadRequest,
JSON: spec.BadJSON("userID for this device is invalid"),
}
}
// Verify that the user is a member of this room
resErr := checkMemberInRoom(req.Context(), rsAPI, *deviceUserID, roomID)
if resErr != nil {
return *resErr
}
var r eventutil.ReadMarkerJSON
resErr = httputil.UnmarshalJSONRequest(req, &r)
if resErr != nil {
return *resErr
}
if r.FullyRead != "" {
data, err := json.Marshal(fullyReadEvent{EventID: r.FullyRead})
if err != nil {
return util.JSONResponse{
Code: http.StatusInternalServerError,
JSON: spec.InternalServerError{},
}
}
dataReq := api.InputAccountDataRequest{
UserID: device.UserID,
DataType: "m.fully_read",
RoomID: roomID,
AccountData: data,
}
dataRes := api.InputAccountDataResponse{}
if err := userAPI.InputAccountData(req.Context(), &dataReq, &dataRes); err != nil {
util.GetLogger(req.Context()).WithError(err).Error("userAPI.InputAccountData failed")
return util.ErrorResponse(err)
}
}
// Handle the read receipts that may be included in the read marker.
if r.Read != "" {
return SetReceipt(req, userAPI, syncProducer, device, roomID, "m.read", r.Read)
}
if r.ReadPrivate != "" {
return SetReceipt(req, userAPI, syncProducer, device, roomID, "m.read.private", r.ReadPrivate)
if err := syncProducer.SendData(userID, roomID, dataType); err != nil {
return httputil.LogThenError(req, err)
}
return util.JSONResponse{

Some files were not shown because too many files have changed in this diff Show more