Docs restructure, update docker etc.

This commit is contained in:
Till Faelligen 2023-01-26 13:00:59 +01:00
parent 5b73592f5a
commit f3e20ec66b
No known key found for this signature in database
GPG key ID: ACCDC9606D472758
38 changed files with 586 additions and 695 deletions

View file

@ -1,15 +1,10 @@
#syntax=docker/dockerfile:1.2
#
# base installs required dependencies and runs go mod download to cache dependencies
#
FROM --platform=${BUILDPLATFORM} docker.io/golang:1.19-alpine AS base
RUN apk --update --no-cache add bash build-base curl
#
# build creates all needed binaries
#
FROM --platform=${BUILDPLATFORM} base AS build
FROM --platform=${BUILDPLATFORM} docker.io/golang:1.19-alpine AS build
RUN apk --update --no-cache add bash build-base curl
WORKDIR /src
ARG TARGETOS
ARG TARGETARCH

View file

@ -16,8 +16,6 @@ repository, run:
```
docker build . --target monolith -t matrixdotorg/dendrite-monolith
docker build . --target polylith -t matrixdotorg/dendrite-monolith
docker build . --target demo-pinecone -t matrixdotorg/dendrite-demo-pinecone
docker build . --target demo-yggdrasil -t matrixdotorg/dendrite-demo-yggdrasil
```
## Compose files

View file

@ -1,17 +1,18 @@
version: "3.4"
services:
postgres:
hostname: postgres
image: postgres:14
image: postgres:15-alpine
restart: always
volumes:
- ./postgres/create_db.sh:/docker-entrypoint-initdb.d/20-create_db.sh
# To persist your PostgreSQL databases outside of the Docker image,
# to prevent data loss, modify the following ./path_to path:
- ./path_to/postgresql:/var/lib/postgresql/data
# This will create a docker volume to persist the database files in.
# If you prefer those files to be outside of docker, you'll need to change this.
- dendrite_postgres_data:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: itsasecret
POSTGRES_USER: dendrite
POSTGRES_DATABASE: dendrite
healthcheck:
test: ["CMD-SHELL", "pg_isready -U dendrite"]
interval: 5s
@ -23,18 +24,19 @@ services:
monolith:
hostname: monolith
image: matrixdotorg/dendrite-monolith:latest
command: [
"--tls-cert=server.crt",
"--tls-key=server.key"
]
ports:
- 8008:8008
- 8448:8448
volumes:
- ./config:/etc/dendrite
- ./media:/var/dendrite/media
# The following volumes use docker volumes, change this
# if you prefer to have those files outside of docker.
- dendrite_media:/var/dendrite/media
- dendrite_jetstream:/var/dendrite/jetstream
- dendrite_search_index:/var/dendrite/searchindex
depends_on:
- postgres
postgres:
condition: service_healthy
networks:
- internal
restart: unless-stopped
@ -42,3 +44,9 @@ services:
networks:
internal:
attachable: true
volumes:
dendrite_postgres_data:
dendrite_media:
dendrite_jetstream:
dendrite_search_index:

View file

@ -1,17 +1,18 @@
version: "3.4"
services:
postgres:
hostname: postgres
image: postgres:14
image: postgres:15-alpine
restart: always
volumes:
- ./postgres/create_db.sh:/docker-entrypoint-initdb.d/20-create_db.sh
# To persist your PostgreSQL databases outside of the Docker image,
# to prevent data loss, modify the following ./path_to path:
- ./path_to/postgresql:/var/lib/postgresql/data
# This will create a docker volume to persist the database files in.
# If you prefer those files to be outside of docker, you'll need to change this.
- dendrite_postgres_data:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: itsasecret
POSTGRES_USER: dendrite
POSTGRES_DATABASE: dendrite
healthcheck:
test: ["CMD-SHELL", "pg_isready -U dendrite"]
interval: 5s
@ -28,116 +29,119 @@ services:
--store_dir /var/lib/nats
--cluster_name Dendrite
volumes:
# To persist your NATS JetStream streams outside of the Docker image,
# prevent data loss, modify the following ./path_to path:
- ./path_to/nats:/var/lib/nats
- dendrite_jetstream:/var/lib/nats
networks:
- internal
# This "component" will exit immediately, it's only purpose is
# to provide the common settings
component:
image: matrixdotorg/dendrite-polylith:latest
restart: "no"
networks:
- internal
volumes:
- ./config:/etc/dendrite
depends_on:
jetstream:
condition: service_started
postgres:
condition: service_healthy
client_api:
extends:
service: component
hostname: client_api
image: matrixdotorg/dendrite-polylith:latest
command: clientapi
volumes:
- ./config:/etc/dendrite
depends_on:
- jetstream
- postgres
networks:
- internal
restart: unless-stopped
depends_on:
user_api:
condition: service_started
media_api:
extends:
service: component
hostname: media_api
image: matrixdotorg/dendrite-polylith:latest
command: mediaapi
volumes:
- ./config:/etc/dendrite
- ./media:/var/dendrite/media
networks:
- internal
restart: unless-stopped
volumes:
- dendrite_media:/var/dendrite/media
sync_api:
extends:
service: component
hostname: sync_api
image: matrixdotorg/dendrite-polylith:latest
command: syncapi
volumes:
- ./config:/etc/dendrite
depends_on:
- jetstream
- postgres
networks:
- internal
restart: unless-stopped
volumes:
- dendrite_search_index:/var/dendrite/searchindex
depends_on:
room_server:
condition: service_started
user_api:
condition: service_started
key_server:
condition: service_started
room_server:
extends:
service: component
hostname: room_server
image: matrixdotorg/dendrite-polylith:latest
command: roomserver
volumes:
- ./config:/etc/dendrite
depends_on:
- jetstream
- postgres
networks:
- internal
restart: unless-stopped
federation_api:
extends:
service: component
hostname: federation_api
image: matrixdotorg/dendrite-polylith:latest
command: federationapi
volumes:
- ./config:/etc/dendrite
depends_on:
- jetstream
- postgres
networks:
- internal
restart: unless-stopped
depends_on:
room_server:
condition: service_started
key_server:
extends:
service: component
hostname: key_server
image: matrixdotorg/dendrite-polylith:latest
command: keyserver
volumes:
- ./config:/etc/dendrite
depends_on:
- jetstream
- postgres
networks:
- internal
restart: unless-stopped
depends_on:
room_server:
condition: service_started
federation_api:
condition: service_started
user_api:
extends:
service: component
hostname: user_api
image: matrixdotorg/dendrite-polylith:latest
command: userapi
volumes:
- ./config:/etc/dendrite
depends_on:
- jetstream
- postgres
networks:
- internal
restart: unless-stopped
depends_on:
room_server:
condition: service_started
key_server:
condition: service_started
appservice_api:
extends:
service: component
hostname: appservice_api
image: matrixdotorg/dendrite-polylith:latest
command: appservice
volumes:
- ./config:/etc/dendrite
networks:
- internal
depends_on:
- jetstream
- postgres
- room_server
- user_api
restart: unless-stopped
depends_on:
room_server:
condition: service_started
user_api:
condition: service_started
networks:
internal:
attachable: true
volumes:
dendrite_jetstream:
dendrite_postgres_data:
dendrite_media:
dendrite_search_index:

View file

@ -1,5 +0,0 @@
#!/bin/sh
for db in userapi_accounts mediaapi syncapi roomserver keyserver federationapi appservice mscs; do
createdb -U dendrite -O dendrite dendrite_$db
done

View file

@ -69,8 +69,7 @@ global:
# e.g. localhost:443
well_known_server_name: ""
# The server name to delegate client-server communications to, with optional port
# e.g. localhost:443
# The base URL to delegate client-server communications to e.g. https://localhost
well_known_client_name: ""
# Lists of domains that the server will trust as identity servers to verify third
@ -111,18 +110,6 @@ global:
# Configuration for NATS JetStream
jetstream:
# A list of NATS Server addresses to connect to. If none are specified, an
# internal NATS server will be started automatically when running Dendrite in
# monolith mode. For polylith deployments, it is required to specify the address
# of at least one NATS Server node.
addresses:
# - localhost:4222
# Disable the validation of TLS certificates of NATS. This is
# not recommended in production since it may allow NATS traffic
# to be sent to an insecure endpoint.
disable_tls_validation: false
# Persistent directory to store JetStream streams in. This directory should be
# preserved across Dendrite restarts.
storage_path: ./

View file

@ -59,8 +59,7 @@ global:
# e.g. localhost:443
well_known_server_name: ""
# The server name to delegate client-server communications to, with optional port
# e.g. localhost:443
# The base URL to delegate client-server communications to e.g. https://localhost
well_known_client_name: ""
# Lists of domains that the server will trust as identity servers to verify third

View file

@ -1,15 +0,0 @@
# Installation
Please note that new installation instructions can be found
on the [new documentation site](https://matrix-org.github.io/dendrite/),
or alternatively, in the [installation](installation/) folder:
1. [Planning your deployment](installation/1_planning.md)
2. [Setting up the domain](installation/2_domainname.md)
3. [Preparing database storage](installation/3_database.md)
4. [Generating signing keys](installation/4_signingkey.md)
5. [Installing as a monolith](installation/5_install_monolith.md)
6. [Installing as a polylith](installation/6_install_polylith.md)
7. [Populate the configuration](installation/7_configuration.md)
8. [Starting the monolith](installation/8_starting_monolith.md)
9. [Starting the polylith](installation/9_starting_polylith.md)

View file

@ -11,10 +11,9 @@ User accounts can be created on a Dendrite instance in a number of ways.
## From the command line
The `create-account` tool is built in the `bin` folder when building Dendrite with
the `build.sh` script.
The `create-account` tool is built in the `bin` folder when [building](../installation/build) Dendrite.
It uses the `dendrite.yaml` configuration file to connect to a running Dendrite instance and requires
It uses the `dendrite.yaml` configuration file to connect to a **running** Dendrite instance and requires
shared secret registration to be enabled as explained below.
An example of using `create-account` to create a **normal account**:

View file

@ -1,6 +1,7 @@
---
title: Supported admin APIs
parent: Administration
nav_order: 4
permalink: /administration/adminapi
---

View file

@ -1,9 +1,9 @@
---
title: Optimise your installation
parent: Installation
parent: Administration
has_toc: true
nav_order: 11
permalink: /installation/start/optimisation
nav_order: 5
permalink: /administration/optimisation
---
# Optimise your installation

View file

@ -1,6 +1,7 @@
---
title: Troubleshooting
parent: Administration
nav_order: 6
permalink: /administration/troubleshooting
---
@ -77,7 +78,7 @@ If there aren't, you will see a log lines like this:
level=warning msg="IMPORTANT: Process file descriptor limit is currently 65535, it is recommended to raise the limit for Dendrite to at least 65535 to avoid issues"
```
Follow the [Optimisation](../installation/11_optimisation.md) instructions to correct the
Follow the [Optimisation](11_optimisation.md) instructions to correct the
available number of file descriptors.
## 6. STUN/TURN Server tester

View file

@ -1,6 +1,7 @@
---
title: Contributing
parent: Development
nav_order: 1
permalink: /development/contributing
---

View file

@ -1,6 +1,7 @@
---
title: Profiling
parent: Development
nav_order: 4
permalink: /development/profiling
---

View file

@ -1,41 +1,25 @@
---
title: Coverage
parent: Development
nav_order: 3
permalink: /development/coverage
---
To generate a test coverage report for Sytest, a small patch needs to be applied to the Sytest repository to compile and use the instrumented binary:
```patch
diff --git a/lib/SyTest/Homeserver/Dendrite.pm b/lib/SyTest/Homeserver/Dendrite.pm
index 8f0e209c..ad057e52 100644
--- a/lib/SyTest/Homeserver/Dendrite.pm
+++ b/lib/SyTest/Homeserver/Dendrite.pm
@@ -337,7 +337,7 @@ sub _start_monolith
$output->diag( "Starting monolith server" );
my @command = (
- $self->{bindir} . '/dendrite-monolith-server',
+ $self->{bindir} . '/dendrite-monolith-server', '--test.coverprofile=' . $self->{hs_dir} . '/integrationcover.log', "DEVEL",
'--config', $self->{paths}{config},
'--http-bind-address', $self->{bind_host} . ':' . $self->unsecure_port,
'--https-bind-address', $self->{bind_host} . ':' . $self->secure_port,
diff --git a/scripts/dendrite_sytest.sh b/scripts/dendrite_sytest.sh
index f009332b..7ea79869 100755
--- a/scripts/dendrite_sytest.sh
+++ b/scripts/dendrite_sytest.sh
@@ -34,7 +34,8 @@ export GOBIN=/tmp/bin
echo >&2 "--- Building dendrite from source"
cd /src
mkdir -p $GOBIN
-go install -v ./cmd/dendrite-monolith-server
+# go install -v ./cmd/dendrite-monolith-server
+go test -c -cover -covermode=atomic -o $GOBIN/dendrite-monolith-server -coverpkg "github.com/matrix-org/..." ./cmd/dendrite-monolith-server
go install -v ./cmd/generate-keys
cd -
```
## Running Sytest with coverage enabled
Then run Sytest. This will generate a new file `integrationcover.log` in each server's directory e.g `server-0/integrationcover.log`. To parse it,
To run Sytest with coverage enabled:
```bash
docker run --rm --name sytest -v "/Users/kegan/github/sytest:/sytest" \
-v "/Users/kegan/github/dendrite:/src" -v "/Users/kegan/logs:/logs" \
-v "/Users/kegan/go/:/gopath" -e "POSTGRES=1" -e "DENDRITE_TRACE_HTTP=1" \
-e "COVER=1" \
matrixdotorg/sytest-dendrite:latest
```
This will generate a new file `integrationcover.log` in each server's directory, e.g `server-0/integrationcover.log`. To parse it,
ensure your working directory is under the Dendrite repository then run:
```bash
go tool cover -func=/path/to/server-0/integrationcover.log
```
@ -56,29 +40,82 @@ github.com/matrix-org/util/unique.go:48: SortAndUnique 100.0%
github.com/matrix-org/util/unique.go:55: UniqueStrings 100.0%
total: (statements) 53.7%
```
The total coverage for this run is the last line at the bottom. However, this value is misleading because Dendrite can run in many different configurations,
which will never be tested in a single test run (e.g sqlite or postgres, monolith or polylith). To get a more accurate value, additional processing is required
to remove packages which will never be tested and extension MSCs:
(The following commands use [gocovmerge](https://github.com/wadey/gocovmerge), to merge two or more coverage logs, so make sure you have that installed)
```bash
# These commands are all similar but change which package paths are _removed_ from the output.
# For Postgres (monolith)
go tool cover -func=/path/to/server-0/integrationcover.log | grep 'github.com/matrix-org/dendrite' | grep -Ev 'inthttp|sqlite|setup/mscs|api_trace' > coverage.txt
find -name 'integrationcover.log' | xargs gocovmerge | grep -Ev 'relayapi|inthttp|sqlite|setup/mscs|api_trace' > final.cov && go tool cover -func=final.cov
# For Postgres (polylith)
go tool cover -func=/path/to/server-0/integrationcover.log | grep 'github.com/matrix-org/dendrite' | grep -Ev 'sqlite|setup/mscs|api_trace' > coverage.txt
find -name 'integrationcover.log' | xargs gocovmerge | grep -Ev 'relayapi|sqlite|setup/mscs|api_trace' > final.cov && go tool cover -func=final.cov
# For SQLite (monolith)
go tool cover -func=/path/to/server-0/integrationcover.log | grep 'github.com/matrix-org/dendrite' | grep -Ev 'inthttp|postgres|setup/mscs|api_trace' > coverage.txt
find -name 'integrationcover.log' | xargs gocovmerge | grep -Ev 'relayapi|inthttp|postgres|setup/mscs|api_trace' > final.cov && go tool cover -func=final.cov
# For SQLite (polylith)
go tool cover -func=/path/to/server-0/integrationcover.log | grep 'github.com/matrix-org/dendrite' | grep -Ev 'postgres|setup/mscs|api_trace' > coverage.txt
find -name 'integrationcover.log' | xargs gocovmerge | grep -Ev 'relayapi|postgres|setup/mscs|api_trace' > final.cov && go tool cover -func=final.cov
```
A total value can then be calculated using:
## Running unit tests with coverage enabled
Running unit tests with coverage enabled can be done with the following command, it will also generate a `integrationcover.log`, which can later be merged
using `gocovmerge`:
```bash
cat coverage.txt | awk -F '\t+' '{x = x + $3} END {print x/NR}'
go test -covermode=atomic -coverpkg=./... -coverprofile=integrationcover.log $(go list ./... | grep -v '/cmd/')
```
## Getting coverage from Complement
We currently do not have a way to combine Sytest/Complement/Unit Tests into a single coverage report.
Getting the coverage for Complement runs is a bit more involved.
First you'll need a docker image compatible with Complement, one can be built using
```bash
docker build -t complement-dendrite -f build/scripts/Complement.Dockerfile .
```
from within the Dendrite repository.
Clone complement to a directory of your liking:
```bash
git clone https://github.com/matrix-org/complement.git
cd complement
```
Next we'll need a script to execute after a test finishes, create a new file `posttest.sh`, make the file executable (`chmod +x posttest.sh`)
and add the following content:
```bash
#!/bin/bash
mkdir -p /tmp/Complement/logs/$2/$1/
docker cp $1:/dendrite/integrationcover.log /tmp/Complement/logs/$2/$1/
```
This will copy the `integrationcover.log` files from each container to something like
`/tmp/Complement/logs/TestLogin/94f9c428de95779d2b62a3ccd8eab9d5ddcf65cc259a40ece06bdc61687ffed3/integrationcover.log`. (`$1` is the containerID, `$2` the test name)
Now that we have set up everything we need, we can finally execute Complement:
```bash
COMPLEMENT_BASE_IMAGE=complement-dendrite \
COMPLEMENT_SHARE_ENV_PREFIX=COMPLEMENT_DENDRITE_ \
COMPLEMENT_DENDRITE_COVER=1 \
COMPLEMENT_POST_TEST_SCRIPT=$(pwd)/posttest.sh \
go test -tags dendrite_blacklist ./tests/... -count=1 -v -timeout=30m -failfast=false
```
Once this is done, you can copy the resulting `integrationcover.log` files to your Dendrite repository for the next step.
```bash
cp -pr /tmp/Complement/logs PathToYourDendriteRepository
```
## Combining the results of all runs
Now that we have all our `integrationcover.log` files within the Dendrite repository, you can now execute the following command, to get the coverage
overall (Note: update `grep -Ev` accordingly, to exclude SQLite for example):
```bash
find -name 'integrationcover.log' | xargs gocovmerge | grep -Ev 'setup/mscs|api_trace' > final.cov && go tool cover -func=final.cov
```

View file

@ -1,6 +1,7 @@
---
title: SyTest
parent: Development
nav_order: 2
permalink: /development/sytest
---
@ -34,8 +35,6 @@ is fully resolved.
### Using the SyTest Docker image
**We strongly recommend using the Docker image to run Sytest.**
Use the following commands to pull the latest SyTest image and run the tests:
```sh
@ -62,6 +61,8 @@ When debugging, the following Docker `run` options may also be useful:
* `-e "DENDRITE_TRACE_INTERNAL=1"`: Adds roomserver internal API tracing to
server logs.
* `-e "DENDRITE_TRACE_SQL=1"`: Adds tracing to all SQL statements to server logs.
* `-e "COVER=1"`: Run Sytest with an instrumented binary, producing a Go coverage file per server.
* `-e "RACE_DETECTION=1"`: Build the binaries with the `-race` flag (Note: This will significantly slow down test runs)
The docker command also supports a single positional argument for the test file to
run, so you can run a single `.pl` file rather than the whole test suite. For example:
@ -72,68 +73,3 @@ docker run --rm --name sytest -v "/Users/kegan/github/sytest:/sytest"
-v "/Users/kegan/go/:/gopath" -e "POSTGRES=1" -e "DENDRITE_TRACE_HTTP=1"
matrixdotorg/sytest-dendrite:latest tests/50federation/40devicelists.pl
```
### Manually Setting up SyTest
**We advise AGAINST using manual SyTest setups.**
If you don't want to use the Docker image, you can also run SyTest by hand. Make
sure you have Perl 5 or above, and get SyTest with:
(Note that this guide assumes your SyTest checkout is next to your
`dendrite` checkout.)
```sh
git clone -b develop https://github.com/matrix-org/sytest
cd sytest
./install-deps.pl
```
Set up the database:
```sh
sudo -u postgres psql -c "CREATE USER dendrite PASSWORD 'itsasecret'"
sudo -u postgres psql -c "ALTER USER dendrite CREATEDB"
for i in dendrite0 dendrite1 sytest_template; do sudo -u postgres psql -c "CREATE DATABASE $i OWNER dendrite;"; done
mkdir -p "server-0"
cat > "server-0/database.yaml" << EOF
args:
user: dendrite
password: itsasecret
database: dendrite0
host: 127.0.0.1
sslmode: disable
type: pg
EOF
mkdir -p "server-1"
cat > "server-1/database.yaml" << EOF
args:
user: dendrite
password: itsasecret
database: dendrite1
host: 127.0.0.1
sslmode: disable
type: pg
EOF
```
Run the tests:
```sh
POSTGRES=1 ./run-tests.pl -I Dendrite::Monolith -d ../dendrite/bin -W ../dendrite/sytest-whitelist -O tap --all | tee results.tap
```
where `tee` lets you see the results while they're being piped to the file, and
`POSTGRES=1` enables testing with PostgeSQL. If the `POSTGRES` environment
variable is not set or is set to 0, SyTest will fall back to SQLite 3. For more
flags and options, see <https://github.com/matrix-org/sytest#running>.
Once the tests are complete, run the helper script to see if you need to add
any newly passing test names to `sytest-whitelist` in the project's root
directory:
```sh
../dendrite/show-expected-fail-tests.sh results.tap ../dendrite/sytest-whitelist ../dendrite/sytest-blacklist
```
If the script prints nothing/exits with 0, then you're good to go.

View file

@ -1,114 +0,0 @@
---
title: OpenTracing
has_children: true
parent: Development
permalink: /development/opentracing
---
# OpenTracing
Dendrite extensively uses the [opentracing.io](http://opentracing.io) framework
to trace work across the different logical components.
At its most basic opentracing tracks "spans" of work; recording start and end
times as well as any parent span that caused the piece of work.
A typical example would be a new span being created on an incoming request that
finishes when the response is sent. When the code needs to hit out to a
different component a new span is created with the initial span as its parent.
This would end up looking roughly like:
```
Received request Sent response
|<───────────────────────────────────────>|
|<────────────────────>|
RPC call RPC call returns
```
This is useful to see where the time is being spent processing a request on a
component. However, opentracing allows tracking of spans across components. This
makes it possible to see exactly what work goes into processing a request:
```
Component 1 |<─────────────────── HTTP ────────────────────>|
|<──────────────── RPC ─────────────────>|
Component 2 |<─ SQL ─>| |<── RPC ───>|
Component 3 |<─ SQL ─>|
```
This is achieved by serializing span information during all communication
between components. For HTTP requests, this is achieved by the sender
serializing the span into a HTTP header, and the receiver deserializing the span
on receipt. (Generally a new span is then immediately created with the
deserialized span as the parent).
A collection of spans that are related is called a trace.
Spans are passed through the code via contexts, rather than manually. It is
therefore important that all spans that are created are immediately added to the
current context. Thankfully the opentracing library gives helper functions for
doing this:
```golang
span, ctx := opentracing.StartSpanFromContext(ctx, spanName)
defer span.Finish()
```
This will create a new span, adding any span already in `ctx` as a parent to the
new span.
Adding Information
------------------
Opentracing allows adding information to a trace via three mechanisms:
- "tags" ─ A span can be tagged with a key/value pair. This is typically
information that relates to the span, e.g. for spans created for incoming HTTP
requests could include the request path and response codes as tags, spans for
SQL could include the query being executed.
- "logs" ─ Key/value pairs can be looged at a particular instance in a trace.
This can be useful to log e.g. any errors that happen.
- "baggage" ─ Arbitrary key/value pairs can be added to a span to which all
child spans have access. Baggage isn't saved and so isn't available when
inspecting the traces, but can be used to add context to logs or tags in child
spans.
See
[specification.md](https://github.com/opentracing/specification/blob/master/specification.md)
for some of the common tags and log fields used.
Span Relationships
------------------
Spans can be related to each other. The most common relation is `childOf`, which
indicates the child span somehow depends on the parent span ─ typically the
parent span cannot complete until all child spans are completed.
A second relation type is `followsFrom`, where the parent has no dependence on
the child span. This usually indicates some sort of fire and forget behaviour,
e.g. adding a message to a pipeline or inserting into a kafka topic.
Jaeger
------
Opentracing is just a framework. We use
[jaeger](https://github.com/jaegertracing/jaeger) as the actual implementation.
Jaeger is responsible for recording, sending and saving traces, as well as
giving a UI for viewing and interacting with traces.
To enable jaeger a `Tracer` object must be instansiated from the config (as well
as having a jaeger server running somewhere, usually locally). A `Tracer` does
several things:
- Decides which traces to save and send to the server. There are multiple
schemes for doing this, with a simple example being to save a certain fraction
of traces.
- Communicating with the jaeger backend. If not explicitly specified uses the
default port on localhost.
- Associates a service name to all spans created by the tracer. This service
name equates to a logical component, e.g. spans created by clientapi will have
a different service name than ones created by the syncapi. Database access
will also typically use a different service name.
This means that there is a tracer per service name/component.

View file

@ -1,57 +0,0 @@
---
title: Setup
parent: OpenTracing
grand_parent: Development
permalink: /development/opentracing/setup
---
# OpenTracing Setup
Dendrite uses [Jaeger](https://www.jaegertracing.io/) for tracing between microservices.
Tracing shows the nesting of logical spans which provides visibility on how the microservices interact.
This document explains how to set up Jaeger locally on a single machine.
## Set up the Jaeger backend
The [easiest way](https://www.jaegertracing.io/docs/1.18/getting-started/) is to use the all-in-one Docker image:
```
$ docker run -d --name jaeger \
-e COLLECTOR_ZIPKIN_HTTP_PORT=9411 \
-p 5775:5775/udp \
-p 6831:6831/udp \
-p 6832:6832/udp \
-p 5778:5778 \
-p 16686:16686 \
-p 14268:14268 \
-p 14250:14250 \
-p 9411:9411 \
jaegertracing/all-in-one:1.18
```
## Configuring Dendrite to talk to Jaeger
Modify your config to look like: (this will send every single span to Jaeger which will be slow on large instances, but for local testing it's fine)
```
tracing:
enabled: true
jaeger:
serviceName: "dendrite"
disabled: false
rpc_metrics: true
tags: []
sampler:
type: const
param: 1
```
then run the monolith server with `--api true` to use polylith components which do tracing spans:
```
./dendrite-monolith-server --tls-cert server.crt --tls-key server.key --config dendrite.yaml --api true
```
## Checking traces
Visit <http://localhost:16686> to see traces under `DendriteMonolith`.

10
docs/experimental.md Normal file
View file

@ -0,0 +1,10 @@
---
title: Experimental
has_children: true
nav_order: 5
permalink: /experimental
---
# Installation
This section contains documentation on experimental things in Dendrite.

View file

@ -0,0 +1,175 @@
---
title: Installing as a polylith
parent: Experimental
nav_order: 1
permalink: /installation/experimental/polylith
---
# Installing as a polylith
## Using docker-compose
Dendrite provides an [example](https://github.com/matrix-org/dendrite/blob/main/build/docker/docker-compose.polylith.yml)
Docker compose file, which needs some preparation to start successfully.
Please note that this compose file has Postgres and NATS JetStream as a dependency, and you need to configure
a [reverse proxy](../planning#reverse-proxy).
### Preparations
Note that we're going to use the `dendrite-monolith` image in the next steps, as the `dendrite-polylith` image does
not provide the needed binaries to generate keys and configs.
#### Generate a private key
First we'll generate private key, which is used to sign events, the following will create one in `./config`:
```bash
mkdir -p ./config
docker run --rm --entrypoint="/usr/bin/generate-keys" \
-v $(pwd)/config:/mnt \
matrixdotorg/dendrite-monolith:latest \
-private-key /mnt/matrix_key.pem
```
(**NOTE**: This only needs to be executed **once**, as you otherwise overwrite the key)
#### Generate a config
Similar to the command above, we can generate a config to be used, which will use the correct paths
as specified in the example docker-compose file. Change `server` to your domain and `db` according to your changes
to the docker-compose file (`services.postgres.environment` values):
```bash
mkdir -p ./config
docker run --rm --entrypoint="/bin/sh" \
-v $(pwd)/config:/mnt \
matrixdotorg/dendrite-monolith:latest \
-c "/usr/bin/generate-config \
-polylith \
-db postgres://dendrite:itsasecret@postgres/dendrite?sslmode=disable \
-server YourDomainHere > /mnt/dendrite.yaml"
```
We now need to modify the generated config, since `-polylith` generates one to be used on the same machine:
Set the Jetstream configuration to:
```yaml
global:
jetstream:
storage_path: /var/dendrite/
addresses:
- nats://jetstream:4222
topic_prefix: Dendrite
in_memory: false
disable_tls_validation: false # Required when using the example compose file
```
For each component defined, remove the `internal_api.listen` hostname (`localhost`) and change the `internal_api.connect` hostname
to the corresponding hostname from the `docker-compose.polylith.yml`, e.g. the result for `user_api`:
```yaml
user_api:
internal_api:
listen: http://:7781
connect: http://user_api:7781
```
#### Starting Dendrite
Once you're done changing the config, you can now start up Dendrite with
```bash
docker-compose -f docker-compose.polylith.yml up
```
## Manual installation
You can install the Dendrite polylith binary into `$GOPATH/bin` by using `go install`:
```sh
go install ./cmd/dendrite-polylith-multi
```
Alternatively, you can specify a custom path for the binary to be written to using `go build`:
```sh
go build -o /usr/local/bin/ ./cmd/dendrite-polylith-multi
```
The `dendrite-polylith-multi` binary is a "multi-personality" binary which can run as
any of the components depending on the supplied command line parameters.
### Reverse proxy
A reverse proxy such as [Caddy](https://caddyserver.com), [NGINX](https://www.nginx.com) or
[HAProxy](http://www.haproxy.org) is required for polylith deployments.
Configuring those not covered in this documentation, although sample configurations
for [Caddy](https://github.com/matrix-org/dendrite/blob/main/docs/caddy) and
[NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx) are provided.
### NATS Server
Polylith deployments currently need a standalone NATS Server installation with JetStream
enabled.
To do so, follow the [NATS Server installation instructions](https://docs.nats.io/running-a-nats-service/introduction/installation) and then [start your NATS deployment](https://docs.nats.io/running-a-nats-service/introduction/running). JetStream must be enabled, either by passing the `-js` flag to `nats-server`,
or by specifying the `store_dir` option in the the `jetstream` configuration.
### Starting the polylith
Once you have completed all preparation and installation steps,
you can start your Dendrite polylith deployment by starting the various components
using the `dendrite-polylith-multi` personalities.
### Starting the components
Each component must be started individually:
#### Client API
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml clientapi
```
#### Sync API
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml syncapi
```
#### Media API
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml mediaapi
```
#### Federation API
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml federationapi
```
#### Roomserver
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml roomserver
```
#### Appservice API
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml appservice
```
#### User API
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml userapi
```
#### Key server
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml keyserver
```

View file

@ -1,73 +0,0 @@
---
title: Starting the polylith
parent: Installation
has_toc: true
nav_order: 10
permalink: /installation/start/polylith
---
# Starting the polylith
Once you have completed all of the preparation and installation steps,
you can start your Dendrite polylith deployment by starting the various components
using the `dendrite-polylith-multi` personalities.
## Start the reverse proxy
Ensure that your reverse proxy is started and is proxying the correct
endpoints to the correct components. Software such as [NGINX](https://www.nginx.com) or
[HAProxy](http://www.haproxy.org) can be used for this purpose. A [sample configuration
for NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/polylith-sample.conf)
is provided.
## Starting the components
Each component must be started individually:
### Client API
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml clientapi
```
### Sync API
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml syncapi
```
### Media API
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml mediaapi
```
### Federation API
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml federationapi
```
### Roomserver
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml roomserver
```
### Appservice API
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml appservice
```
### User API
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml userapi
```
### Key server
```bash
./dendrite-polylith-multi -config /path/to/dendrite.yaml keyserver
```

View file

@ -7,22 +7,6 @@ permalink: /installation/planning
# Planning your installation
## Modes
Dendrite consists of several components, each responsible for a different aspect of the Matrix protocol.
Users can run Dendrite in one of two modes which dictate how these components are executed and communicate.
* **Monolith mode** runs all components in a single process. Components communicate through an internal NATS
server with generally low overhead. This mode dramatically simplifies deployment complexity and offers the
best balance between performance and resource usage for low-to-mid volume deployments.
* **Polylith mode** runs all components in isolated processes. Components communicate through an external NATS
server and HTTP APIs, which incur considerable overhead. While this mode allows for more granular control of
resources dedicated toward individual processes, given the additional communications overhead, it is only
necessary for very large deployments.
Given our current state of development, **we recommend monolith mode** for all deployments.
## Databases
Dendrite can run with either a PostgreSQL or a SQLite backend. There are considerable tradeoffs
@ -83,23 +67,11 @@ you should check (by running `go version`) that you are using a suitable version
If using the PostgreSQL database engine, you should install PostgreSQL 12 or later.
### NATS Server
Monolith deployments come with a built-in [NATS Server](https://github.com/nats-io/nats-server) and
therefore do not need this to be manually installed. If you are planning a monolith installation, you
do not need to do anything.
Polylith deployments, however, currently need a standalone NATS Server installation with JetStream
enabled.
To do so, follow the [NATS Server installation instructions](https://docs.nats.io/running-a-nats-service/introduction/installation) and then [start your NATS deployment](https://docs.nats.io/running-a-nats-service/introduction/running). JetStream must be enabled, either by passing the `-js` flag to `nats-server`,
or by specifying the `store_dir` option in the the `jetstream` configuration.
### Reverse proxy
A reverse proxy such as [Caddy](https://caddyserver.com), [NGINX](https://www.nginx.com) or
[HAProxy](http://www.haproxy.org) is required for polylith deployments and is useful for monolith
deployments. Configuring those is not covered in this documentation, although sample configurations
[HAProxy](http://www.haproxy.org) is recommended for monolith deployments.
Configuring those not covered in this documentation, although sample configurations
for [Caddy](https://github.com/matrix-org/dendrite/blob/main/docs/caddy) and
[NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx) are provided.

View file

@ -1,21 +0,0 @@
---
title: Installing as a monolith
parent: Installation
has_toc: true
nav_order: 5
permalink: /installation/install/monolith
---
# Installing as a monolith
You can install the Dendrite monolith binary into `$GOPATH/bin` by using `go install`:
```sh
go install ./cmd/dendrite-monolith-server
```
Alternatively, you can specify a custom path for the binary to be written to using `go build`:
```sh
go build -o /usr/local/bin/ ./cmd/dendrite-monolith-server
```

View file

@ -1,34 +0,0 @@
---
title: Installing as a polylith
parent: Installation
has_toc: true
nav_order: 6
permalink: /installation/install/polylith
---
# Installing as a polylith
You can install the Dendrite polylith binary into `$GOPATH/bin` by using `go install`:
```sh
go install ./cmd/dendrite-polylith-multi
```
Alternatively, you can specify a custom path for the binary to be written to using `go build`:
```sh
go build -o /usr/local/bin/ ./cmd/dendrite-polylith-multi
```
The `dendrite-polylith-multi` binary is a "multi-personality" binary which can run as
any of the components depending on the supplied command line parameters.
## Reverse proxy
Polylith deployments require a reverse proxy in order to ensure that requests are
sent to the correct endpoint. You must ensure that a suitable reverse proxy is installed
and configured.
Sample configurations are provided
for [Caddy](https://github.com/matrix-org/dendrite/blob/main/docs/caddy/polylith/Caddyfile)
and [NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/polylith-sample.conf).

View file

@ -1,42 +0,0 @@
---
title: Starting the monolith
parent: Installation
has_toc: true
nav_order: 9
permalink: /installation/start/monolith
---
# Starting the monolith
Once you have completed all of the preparation and installation steps,
you can start your Dendrite monolith deployment by starting the `dendrite-monolith-server`:
```bash
./dendrite-monolith-server -config /path/to/dendrite.yaml
```
By default, Dendrite will listen HTTP on port 8008. If you want to change the addresses
or ports that Dendrite listens on, you can use the `-http-bind-address` and
`-https-bind-address` command line arguments:
```bash
./dendrite-monolith-server -config /path/to/dendrite.yaml \
-http-bind-address 1.2.3.4:12345 \
-https-bind-address 1.2.3.4:54321
```
## Running under systemd
A common deployment pattern is to run the monolith under systemd. For this, you
will need to create a service unit file. An example service unit file is available
in the [GitHub repository](https://github.com/matrix-org/dendrite/blob/main/docs/systemd/monolith-example.service).
Once you have installed the service unit, you can notify systemd, enable and start
the service:
```bash
systemctl daemon-reload
systemctl enable dendrite
systemctl start dendrite
journalctl -fu dendrite
```

View file

@ -0,0 +1,11 @@
---
title: Docker
parent: Installation
has_children: true
nav_order: 4
permalink: /docker
---
# Installation using Docker
This section contains documentation how to install Dendrite using Docker

View file

@ -0,0 +1,57 @@
---
title: Installation
parent: Docker
grand_parent: Installation
has_toc: true
nav_order: 1
permalink: /installation/docker/install
---
# Installing Dendrite using Docker Compose
Dendrite provides an [example](https://github.com/matrix-org/dendrite/blob/main/build/docker/docker-compose.monolith.yml)
Docker compose file, which needs some preparation to start successfully.
Please note that this compose file only has Postgres as a dependency, and you need to configure
a [reverse proxy](../planning#reverse-proxy).
## Preparations
### Generate a private key
First we'll generate private key, which is used to sign events, the following will create one in `./config`:
```bash
mkdir -p ./config
docker run --rm --entrypoint="/usr/bin/generate-keys" \
-v $(pwd)/config:/mnt \
matrixdotorg/dendrite-monolith:latest \
-private-key /mnt/matrix_key.pem
```
(**NOTE**: This only needs to be executed **once**, as you otherwise overwrite the key)
### Generate a config
Similar to the command above, we can generate a config to be used, which will use the correct paths
as specified in the example docker-compose file. Change `server` to your domain and `db` according to your changes
to the docker-compose file (`services.postgres.environment` values):
```bash
mkdir -p ./config
docker run --rm --entrypoint="/bin/sh" \
-v $(pwd)/config:/mnt \
matrixdotorg/dendrite-monolith:latest \
-c "/usr/bin/generate-config \
-dir /var/dendrite/ \
-db postgres://dendrite:itsasecret@postgres/dendrite?sslmode=disable \
-server YourDomainHere > /mnt/dendrite.yaml"
```
You can then change `config/dendrite.yaml` to your liking.
## Starting Dendrite
Once you're done changing the config, you can now start up Dendrite with
```bash
docker-compose -f docker-compose.monolith.yml up
```

11
docs/installation/helm.md Normal file
View file

@ -0,0 +1,11 @@
---
title: Helm
parent: Installation
has_children: true
nav_order: 3
permalink: /helm
---
# Helm
This section contains documentation how to use [Helm](https://helm.sh/) to install Dendrite on a [Kubernetes](https://kubernetes.io/) cluster.

View file

@ -0,0 +1,58 @@
---
title: Installation
parent: Helm
grand_parent: Installation
has_toc: true
nav_order: 1
permalink: /installation/helm/install
---
# Installing Dendrite using Helm
To install Dendrite using the Helm chart, you first have to add the repository using the following commands:
```bash
helm repo add dendrite https://matrix-org.github.io/dendrite/
helm repo update
```
Next you'll need to create a `values.yaml` file and configure it to your liking. All possible values can be found
[here](https://github.com/matrix-org/dendrite/blob/main/helm/dendrite/values.yaml), but at least you need to configure
a `server_name`, otherwise the chart will complain about it:
```yaml
dendrite_config:
global:
server_name: "localhost"
```
If you are going to use an existing Postgres database, you'll also need to configure this connection:
```yaml
dendrite_config:
global:
database:
connection_string: "postgresql://PostgresUser:PostgresPassword@PostgresHostName/DendriteDatabaseName"
max_open_conns: 90
max_idle_conns: 5
conn_max_lifetime: -1
```
## Installing with PostgreSQL
The chart comes with a dependency on Postgres, which can be installed alongside Dendrite, this needs to be enabled in
the `values.yaml`:
```yaml
postgresql:
enabled: true # this installs Postgres
primary:
persistence:
size: 1Gi # defines the size for $PGDATA
dendrite_config:
global:
server_name: "localhost"
```
Using this option, the `database.connection_string` will be set for you automatically.

View file

@ -0,0 +1,11 @@
---
title: Manual
parent: Installation
has_children: true
nav_order: 5
permalink: /manual
---
# Manual Installation
This section contains documentation how to manually install Dendrite

View file

@ -1,31 +1,26 @@
---
title: Building Dendrite
parent: Installation
title: Building/Installing Dendrite
parent: Manual
grand_parent: Installation
has_toc: true
nav_order: 3
permalink: /installation/build
nav_order: 1
permalink: /installation/manual/build
---
# Build all Dendrite commands
Dendrite has numerous utility commands in addition to the actual server binaries.
Build them all from the root of the source repo with `build.sh` (Linux/Mac):
Build them all from the root of the source repo with:
```sh
./build.sh
```
or `build.cmd` (Windows):
```powershell
build.cmd
go build -o bin/ ./cmd/...
```
The resulting binaries will be placed in the `bin` subfolder.
# Installing as a monolith
# Installing Dendrite
You can install the Dendrite monolith binary into `$GOPATH/bin` by using `go install`:
You can install the Dendrite binary into `$GOPATH/bin` by using `go install`:
```sh
go install ./cmd/dendrite-monolith-server

View file

@ -1,8 +1,10 @@
---
title: Preparing database storage
parent: Installation
nav_order: 3
permalink: /installation/database
nav_order: 2
parent: Manual
grand_parent: Installation
permalink: /installation/manual/database
---
# Preparing database storage

View file

@ -1,8 +1,9 @@
---
title: Configuring Dendrite
parent: Installation
nav_order: 7
permalink: /installation/configuration
parent: Manual
grand_parent: Installation
nav_order: 3
permalink: /installation/manual/configuration
---
# Configuring Dendrite
@ -21,7 +22,7 @@ sections:
First of all, you will need to configure the server name of your Matrix homeserver.
This must match the domain name that you have selected whilst [configuring the domain
name delegation](domainname).
name delegation](domainname#delegation).
In the `global` section, set the `server_name` to your delegated domain name:
@ -43,61 +44,33 @@ global:
private_key: /path/to/matrix_key.pem
```
## JetStream configuration
### Built-in NATS Server
Monolith deployments can use the built-in NATS Server rather than running a standalone
server. If you are building a polylith deployment, or you want to use a standalone NATS
Server anyway, you can also configure that too.
### Built-in NATS Server (monolith only)
In the `global` section, under the `jetstream` key, ensure that no server addresses are
configured and set a `storage_path` to a persistent folder on the filesystem:
In the `global` section, under the `jetstream` key, set a `storage_path` to a persistent folder on the filesystem:
```yaml
global:
# ...
jetstream:
in_memory: false
storage_path: /path/to/storage/folder
topic_prefix: Dendrite
```
### Standalone NATS Server (monolith and polylith)
To use a standalone NATS Server instance, you will need to configure `addresses` field
to point to the port that your NATS Server is listening on:
```yaml
global:
# ...
jetstream:
addresses:
- localhost:4222
topic_prefix: Dendrite
```
You do not need to configure the `storage_path` when using a standalone NATS Server instance.
In the case that you are connecting to a multi-node NATS cluster, you can configure more than
one address in the `addresses` field.
## Database connections
Configuring database connections varies based on the [database configuration](database)
that you chose.
### Global connection pool (monolith with a single PostgreSQL database only)
### Global connection pool (with a single PostgreSQL database only)
If you are running a monolith deployment and want to use a single connection pool to a
single PostgreSQL database, then you must uncomment and configure the `database` section
within the `global` section:
If you want to use a single connection pool to a single PostgreSQL database,
then you must uncomment and configure the `database` section within the `global` section:
```yaml
global:
# ...
database:
connection_string: postgres://user:pass@hostname/database?sslmode=disable
max_open_conns: 100
max_open_conns: 90
max_idle_conns: 5
conn_max_lifetime: -1
```
@ -109,10 +82,11 @@ override the `global` database configuration.
### Per-component connections (all other configurations)
If you are building a polylith deployment, are using SQLite databases or separate PostgreSQL
databases per component, then you must instead configure the `database` sections under each
If you are using SQLite databases or separate PostgreSQL databases per component,
then you must instead configure the `database` sections under each
of the component blocks ,e.g. under the `app_service_api`, `federation_api`, `key_server`,
`media_api`, `mscs`, `room_server`, `sync_api` and `user_api` blocks.
`media_api`, `mscs`, `room_server`, `sync_api` and `user_api` blocks. Make sure the **sum** of all
`max_open_conns` does **not** exceed the `max_connections` configured for Postgres.
For example, with PostgreSQL:
@ -140,9 +114,11 @@ room_server:
## Full-text search
Dendrite supports experimental full-text indexing using [Bleve](https://github.com/blevesearch/bleve). It is configured in the `sync_api` section as follows.
Dendrite supports full-text indexing using [Bleve](https://github.com/blevesearch/bleve). It is configured in the `sync_api` section as follows.
Depending on the language most likely to be used on the server, it might make sense to change the `language` used when indexing, to ensure the returned results match the expectations. A full list of possible languages can be found [here](https://github.com/blevesearch/bleve/tree/master/analysis/lang).
Depending on the language most likely to be used on the server, it might make sense to change the `language` used when indexing,
to ensure the returned results match the expectations. A full list of possible languages
can be found [here](https://github.com/matrix-org/dendrite/blob/5b73592f5a4dddf64184fcbe33f4c1835c656480/internal/fulltext/bleve.go#L25-L46).
```yaml
sync_api:

View file

@ -1,8 +1,9 @@
---
title: Generating signing keys
parent: Installation
nav_order: 8
permalink: /installation/signingkeys
parent: Manual
grand_parent: Installation
nav_order: 4
permalink: /installation/manual/signingkeys
---
# Generating signing keys

View file

@ -0,0 +1,26 @@
---
title: Starting Dendrite
parent: Manual
grand_parent: Installation
nav_order: 5
permalink: /installation/manual/start
---
# Starting Dendrite
Once you have completed all preparation and installation steps,
you can start your Dendrite deployment by starting the `dendrite-monolith-server`:
```bash
./dendrite-monolith-server -config /path/to/dendrite.yaml
```
By default, Dendrite will listen HTTP on port 8008. If you want to change the addresses
or ports that Dendrite listens on, you can use the `-http-bind-address` and
`-https-bind-address` command line arguments:
```bash
./dendrite-monolith-server -config /path/to/dendrite.yaml \
-http-bind-address 1.2.3.4:12345 \
-https-bind-address 1.2.3.4:54321
```

View file

@ -1,19 +0,0 @@
[Unit]
Description=Dendrite (Matrix Homeserver)
After=syslog.target
After=network.target
After=postgresql.service
[Service]
Environment=GODEBUG=madvdontneed=1
RestartSec=2s
Type=simple
User=dendrite
Group=dendrite
WorkingDirectory=/opt/dendrite/
ExecStart=/opt/dendrite/bin/dendrite-monolith-server
Restart=always
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target

View file

@ -58,7 +58,7 @@ Create a folder `appservices` and place your configurations in there. The confi
| dendrite_config.global.server_name | string | `""` | **REQUIRED** Servername for this Dendrite deployment. |
| dendrite_config.global.private_key | string | `"/etc/dendrite/secrets/signing.key"` | The private key to use. (**NOTE**: This is overriden in Helm) |
| dendrite_config.global.well_known_server_name | string | `""` | The server name to delegate server-server communications to, with optional port e.g. localhost:443 |
| dendrite_config.global.well_known_client_name | string | `""` | The server name to delegate client-server communications to, with optional port e.g. localhost:443 |
| dendrite_config.global.well_known_client_name | string | `""` | The base URL to delegate client-server communications to e.g. https://localhost |
| dendrite_config.global.trusted_third_party_id_servers | list | `["matrix.org","vector.im"]` | Lists of domains that the server will trust as identity servers to verify third party identifiers such as phone numbers and email addresses. |
| dendrite_config.global.old_private_keys | string | `nil` | The paths and expiry timestamps (as a UNIX timestamp in millisecond precision) to old signing keys that were formerly in use on this domain name. These keys will not be used for federation request or event signing, but will be provided to any other homeserver that asks when trying to verify old events. |
| dendrite_config.global.disable_federation | bool | `false` | Disable federation. Dendrite will not be able to make any outbound HTTP requests to other servers and the federation API will not be exposed. |

View file

@ -56,8 +56,7 @@ dendrite_config:
# e.g. localhost:443
well_known_server_name: ""
# -- The server name to delegate client-server communications to, with optional port
# e.g. localhost:443
# -- The base URL to delegate client-server communications to e.g. https://localhost
well_known_client_name: ""
# -- Lists of domains that the server will trust as identity servers to verify third