Merge commit 'f2b1b0ddf8fe7e59a20f866efed10ecb60a46b5f'

This commit is contained in:
Brian Meek 2022-08-05 09:03:59 -07:00
commit d3f11acee9
133 changed files with 1879 additions and 1510 deletions

View file

@ -97,7 +97,7 @@ jobs:
strategy:
fail-fast: false
matrix:
go: ["1.18"]
go: ["1.18", "1.19"]
steps:
- uses: actions/checkout@v3
- name: Setup go
@ -127,7 +127,7 @@ jobs:
strategy:
fail-fast: false
matrix:
go: ["1.18"]
go: ["1.18", "1.19"]
goos: ["linux"]
goarch: ["amd64", "386"]
steps:
@ -151,6 +151,7 @@ jobs:
GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goarch }}
CGO_ENABLED: 1
CGO_CFLAGS: -fno-stack-protector
run: go build -trimpath -v -o "bin/" ./cmd/...
# build for Windows 64-bit
@ -160,7 +161,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
go: ["1.18"]
go: ["1.18", "1.19"]
goos: ["windows"]
goarch: ["amd64"]
steps:
@ -223,6 +224,31 @@ jobs:
- name: Test upgrade
run: ./dendrite-upgrade-tests --head .
# run database upgrade tests, skipping over one version
upgrade_test_direct:
name: Upgrade tests from HEAD-2
timeout-minutes: 20
needs: initial-tests-done
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup go
uses: actions/setup-go@v2
with:
go-version: "1.18"
- uses: actions/cache@v3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-upgrade-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-upgrade
- name: Build upgrade-tests
run: go build ./cmd/dendrite-upgrade-tests
- name: Test upgrade
run: ./dendrite-upgrade-tests -direct -from HEAD-2 --head .
# run Sytest in different variations
sytest:
timeout-minutes: 20
@ -359,7 +385,14 @@ jobs:
integration-tests-done:
name: Integration tests passed
needs: [initial-tests-done, upgrade_test, sytest, complement]
needs:
[
initial-tests-done,
upgrade_test,
upgrade_test_direct,
sytest,
complement,
]
runs-on: ubuntu-latest
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
steps:

View file

@ -1,5 +1,49 @@
# Changelog
## Dendrite 0.9.1 (2022-08-03)
### Fixes
* Upgrades a dependency which caused issues building Dendrite with Go 1.19
* The roomserver will no longer give up prematurely after failing to call `/state_ids`
* Removes the faulty room info cache, which caused of a number of race conditions and occasional bugs (including when creating and joining rooms)
* The media endpoint now sets the `Cache-Control` header correctly to prevent web-based clients from hitting media endpoints excessively
* The sync API will now advance the PDU stream position correctly in all cases (contributed by [sergekh2](https://github.com/sergekh2))
* The sync API will now delete the correct range of send-to-device messages when advancing the stream position
* The device list `changed` key in the `/sync` response should now return the correct users
* A data race when looking up missing state has been fixed
* The `/send_join` API is now applying stronger validation to the received membership event
## Dendrite 0.9.0 (2022-08-01)
### Features
* Dendrite now uses Ristretto for managing in-memory caches
* Should improve cache utilisation considerably over time by more intelligently selecting and managing cache entries compared to the previous LRU-based cache
* Defaults to a 1GB cache size if not configured otherwise
* The estimated cache size in memory and maximum age can now be configured with new [configuration options](https://github.com/matrix-org/dendrite/blob/e94ef84aaba30e12baf7f524c4e7a36d2fdeb189/dendrite-sample.monolith.yaml#L44-L61) to prevent unbounded cache growth
* Added support for serving the `/.well-known/matrix/client` hint directly from Dendrite
* Configurable with the new [configuration option](https://github.com/matrix-org/dendrite/blob/e94ef84aaba30e12baf7f524c4e7a36d2fdeb189/dendrite-sample.monolith.yaml#L67-L69)
* Refactored membership updater, which should eliminate some bugs caused by the membership table getting out of sync with the room state
* The User API is now responsible for sending account data updates to other components, which may fix some races and duplicate account data events
* Optimised database query for checking whether a remote server is allowed to request an event over federation without using anywhere near as much CPU time (PostgreSQL only)
* Database migrations have been refactored to eliminate some problems that were present with `goose` and upgrading from older Dendrite versions
* Media fetching will now use the `/v3` endpoints for downloading media from remote homeservers
* HTTP 404 and HTTP 405 errors from the client-facing APIs should now be returned with CORS headers so that web-based clients do not produce incorrect access control warnings for unknown endpoints
* Some preparation work for full history visibility support
### Fixes
* Fixes a crash that could occur during event redaction
* The `/members` endpoint will no longer incorrectly return HTTP 500 as a result of some invite events
* Send-to-device messages should now be ordered more reliably and the last position in the stream updated correctly
* Parsing of appservice configuration files is now less strict (contributed by [Kab1r](https://github.com/Kab1r))
* The sync API should now identify shared users correctly when waking up for E2EE key changes
* The federation `/state` endpoint will now return a HTTP 403 when the state before an event isn't known instead of a HTTP 500
* Presence timestamps should now be calculated with the correct precision
* A race condition in the roomserver's room info has been fixed
* A race condition in the sync API has been fixed
## Dendrite 0.8.9 (2022-07-01)
### Features

View file

@ -8,7 +8,6 @@ COPY . /build
RUN mkdir -p bin
RUN go build -trimpath -o bin/ ./cmd/dendrite-monolith-server
RUN go build -trimpath -o bin/ ./cmd/goose
RUN go build -trimpath -o bin/ ./cmd/create-account
RUN go build -trimpath -o bin/ ./cmd/generate-keys

View file

@ -8,7 +8,6 @@ COPY . /build
RUN mkdir -p bin
RUN go build -trimpath -o bin/ ./cmd/dendrite-polylith-multi
RUN go build -trimpath -o bin/ ./cmd/goose
RUN go build -trimpath -o bin/ ./cmd/create-account
RUN go build -trimpath -o bin/ ./cmd/generate-keys

View file

@ -48,7 +48,6 @@ func AddPublicRoutes(
syncProducer := &producers.SyncAPIProducer{
JetStream: js,
TopicClientData: cfg.Matrix.JetStream.Prefixed(jetstream.OutputClientData),
TopicReceiptEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputReceiptEvent),
TopicSendToDeviceEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputSendToDeviceEvent),
TopicTypingEvent: cfg.Matrix.JetStream.Prefixed(jetstream.OutputTypingEvent),
@ -59,6 +58,7 @@ func AddPublicRoutes(
routing.Setup(
base.PublicClientAPIMux,
base.PublicWellKnownAPIMux,
base.SynapseAdminMux,
base.DendriteAdminMux,
cfg, rsAPI, asAPI,

View file

@ -21,7 +21,6 @@ import (
"strconv"
"time"
"github.com/matrix-org/dendrite/internal/eventutil"
"github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/syncapi/types"
userapi "github.com/matrix-org/dendrite/userapi/api"
@ -32,7 +31,6 @@ import (
// SyncAPIProducer produces events for the sync API server to consume
type SyncAPIProducer struct {
TopicClientData string
TopicReceiptEvent string
TopicSendToDeviceEvent string
TopicTypingEvent string
@ -42,36 +40,6 @@ type SyncAPIProducer struct {
UserAPI userapi.ClientUserAPI
}
// SendData sends account data to the sync API server
func (p *SyncAPIProducer) SendData(userID string, roomID string, dataType string, readMarker *eventutil.ReadMarkerJSON, ignoredUsers *types.IgnoredUsers) error {
m := &nats.Msg{
Subject: p.TopicClientData,
Header: nats.Header{},
}
m.Header.Set(jetstream.UserID, userID)
data := eventutil.AccountData{
RoomID: roomID,
Type: dataType,
ReadMarker: readMarker,
IgnoredUsers: ignoredUsers,
}
var err error
m.Data, err = json.Marshal(data)
if err != nil {
return err
}
log.WithFields(log.Fields{
"user_id": userID,
"room_id": roomID,
"data_type": dataType,
}).Tracef("Producing to topic '%s'", p.TopicClientData)
_, err = p.JetStream.PublishMsg(m)
return err
}
func (p *SyncAPIProducer) SendReceipt(
ctx context.Context,
userID, roomID, eventID, receiptType string, timestamp gomatrixserverlib.Timestamp,

View file

@ -25,7 +25,6 @@ import (
"github.com/matrix-org/dendrite/clientapi/producers"
"github.com/matrix-org/dendrite/internal/eventutil"
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/syncapi/types"
"github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/util"
@ -127,18 +126,6 @@ func SaveAccountData(
return util.ErrorResponse(err)
}
var ignoredUsers *types.IgnoredUsers
if dataType == "m.ignored_user_list" {
ignoredUsers = &types.IgnoredUsers{}
_ = json.Unmarshal(body, ignoredUsers)
}
// TODO: user API should do this since it's account data
if err := syncProducer.SendData(userID, roomID, dataType, nil, ignoredUsers); err != nil {
util.GetLogger(req.Context()).WithError(err).Error("syncProducer.SendData failed")
return jsonerror.InternalServerError()
}
return util.JSONResponse{
Code: http.StatusOK,
JSON: struct{}{},
@ -191,11 +178,6 @@ func SaveReadMarker(
return util.ErrorResponse(err)
}
if err := syncProducer.SendData(device.UserID, roomID, "m.fully_read", &r, nil); err != nil {
util.GetLogger(req.Context()).WithError(err).Error("syncProducer.SendData failed")
return jsonerror.InternalServerError()
}
// Handle the read receipt that may be included in the read marker
if r.Read != "" {
return SetReceipt(req, syncProducer, device, roomID, "m.read", r.Read)

View file

@ -18,8 +18,6 @@ import (
"encoding/json"
"net/http"
"github.com/sirupsen/logrus"
"github.com/matrix-org/dendrite/clientapi/httputil"
"github.com/matrix-org/dendrite/clientapi/jsonerror"
"github.com/matrix-org/dendrite/clientapi/producers"
@ -98,10 +96,6 @@ func PutTag(
return jsonerror.InternalServerError()
}
if err = syncProducer.SendData(userID, roomID, "m.tag", nil, nil); err != nil {
logrus.WithError(err).Error("Failed to send m.tag account data update to syncapi")
}
return util.JSONResponse{
Code: http.StatusOK,
JSON: struct{}{},
@ -150,11 +144,6 @@ func DeleteTag(
return jsonerror.InternalServerError()
}
// TODO: user API should do this since it's account data
if err := syncProducer.SendData(userID, roomID, "m.tag", nil, nil); err != nil {
logrus.WithError(err).Error("Failed to send m.tag account data update to syncapi")
}
return util.JSONResponse{
Code: http.StatusOK,
JSON: struct{}{},

View file

@ -48,7 +48,7 @@ import (
// applied:
// nolint: gocyclo
func Setup(
publicAPIMux, synapseAdminRouter, dendriteAdminRouter *mux.Router,
publicAPIMux, wkMux, synapseAdminRouter, dendriteAdminRouter *mux.Router,
cfg *config.ClientAPI,
rsAPI roomserverAPI.ClientRoomserverAPI,
asAPI appserviceAPI.AppServiceInternalAPI,
@ -74,6 +74,26 @@ func Setup(
unstableFeatures["org.matrix."+msc] = true
}
if cfg.Matrix.WellKnownClientName != "" {
logrus.Infof("Setting m.homeserver base_url as %s at /.well-known/matrix/client", cfg.Matrix.WellKnownClientName)
wkMux.Handle("/client", httputil.MakeExternalAPI("wellknown", func(r *http.Request) util.JSONResponse {
return util.JSONResponse{
Code: http.StatusOK,
JSON: struct {
HomeserverName struct {
BaseUrl string `json:"base_url"`
} `json:"m.homeserver"`
}{
HomeserverName: struct {
BaseUrl string `json:"base_url"`
}{
BaseUrl: cfg.Matrix.WellKnownClientName,
},
},
}
})).Methods(http.MethodGet, http.MethodOptions)
}
publicAPIMux.Handle("/versions",
httputil.MakeExternalAPI("versions", func(req *http.Request) util.JSONResponse {
return util.JSONResponse{

View file

@ -37,6 +37,7 @@ var (
flagBuildConcurrency = flag.Int("build-concurrency", runtime.NumCPU(), "The amount of build concurrency when building images")
flagHead = flag.String("head", "", "Location to a dendrite repository to treat as HEAD instead of Github")
flagDockerHost = flag.String("docker-host", "localhost", "The hostname of the docker client. 'localhost' if running locally, 'host.docker.internal' if running in Docker.")
flagDirect = flag.Bool("direct", false, "If a direct upgrade from the defined FROM version to TO should be done")
alphaNumerics = regexp.MustCompile("[^a-zA-Z0-9]+")
)
@ -229,7 +230,7 @@ func getAndSortVersionsFromGithub(httpClient *http.Client) (semVers []*semver.Ve
return semVers, nil
}
func calculateVersions(cli *http.Client, from, to string) []string {
func calculateVersions(cli *http.Client, from, to string, direct bool) []string {
semvers, err := getAndSortVersionsFromGithub(cli)
if err != nil {
log.Fatalf("failed to collect semvers from github: %s", err)
@ -284,6 +285,9 @@ func calculateVersions(cli *http.Client, from, to string) []string {
if to == HEAD {
versions = append(versions, HEAD)
}
if direct {
versions = []string{versions[0], versions[len(versions)-1]}
}
return versions
}
@ -461,7 +465,7 @@ func main() {
os.Exit(1)
}
cleanup(dockerClient)
versions := calculateVersions(httpClient, *flagFrom, *flagTo)
versions := calculateVersions(httpClient, *flagFrom, *flagTo, *flagDirect)
log.Printf("Testing dendrite versions: %v\n", versions)
branchToImageID := buildDendriteImages(httpClient, dockerClient, *flagTempDir, *flagBuildConcurrency, versions)

View file

@ -1,109 +0,0 @@
## Database migrations
We use [goose](https://github.com/pressly/goose) to handle database migrations. This allows us to execute
both SQL deltas (e.g `ALTER TABLE ...`) as well as manipulate data in the database in Go using Go functions.
To run a migration, the `goose` binary in this directory needs to be built:
```
$ go build ./cmd/goose
```
This binary allows Dendrite databases to be upgraded and downgraded. Sample usage for upgrading the roomserver database:
```
# for sqlite
$ ./goose -dir roomserver/storage/sqlite3/deltas sqlite3 ./roomserver.db up
# for postgres
$ ./goose -dir roomserver/storage/postgres/deltas postgres "user=dendrite dbname=dendrite sslmode=disable" up
```
For a full list of options, including rollbacks, see https://github.com/pressly/goose or use `goose` with no args.
### Rationale
Dendrite creates tables on startup using `CREATE TABLE IF NOT EXISTS`, so you might think that we should also
apply version upgrades on startup as well. This is convenient and doesn't involve an additional binary to run
which complicates upgrades. However, combining the upgrade mechanism and the server binary makes it difficult
to handle rollbacks. Firstly, how do you specify you wish to rollback? We would have to add additional flags
to the main server binary to say "rollback to version X". Secondly, if you roll back the server binary from
version 5 to version 4, the version 4 binary doesn't know how to rollback the database from version 5 to
version 4! For these reasons, we prefer to have a separate "upgrade" binary which is run for database upgrades.
Rather than roll-our-own migration tool, we decided to use [goose](https://github.com/pressly/goose) as it supports
complex migrations in Go code in addition to just executing SQL deltas. Other alternatives like
`github.com/golang-migrate/migrate` [do not support](https://github.com/golang-migrate/migrate/issues/15) these
kinds of complex migrations.
### Adding new deltas
You can add `.sql` or `.go` files manually or you can use goose to create them for you.
If you only want to add a SQL delta then run:
```
$ ./goose -dir serverkeyapi/storage/sqlite3/deltas sqlite3 ./foo.db create new_col sql
2020/09/09 14:37:43 Created new file: serverkeyapi/storage/sqlite3/deltas/20200909143743_new_col.sql
```
In this case, the version number is `20200909143743`. The important thing is that it is always increasing.
Then add up/downgrade SQL commands to the created file which looks like:
```sql
-- +goose Up
-- +goose StatementBegin
SELECT 'up SQL query';
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
SELECT 'down SQL query';
-- +goose StatementEnd
```
You __must__ keep the `+goose` annotations. You'll need to repeat this process for Postgres.
For complex Go migrations:
```
$ ./goose -dir serverkeyapi/storage/sqlite3/deltas sqlite3 ./foo.db create complex_update go
2020/09/09 14:40:38 Created new file: serverkeyapi/storage/sqlite3/deltas/20200909144038_complex_update.go
```
Then modify the created `.go` file which looks like:
```go
package migrations
import (
"database/sql"
"fmt"
"github.com/pressly/goose"
)
func init() {
goose.AddMigration(upComplexUpdate, downComplexUpdate)
}
func upComplexUpdate(tx *sql.Tx) error {
// This code is executed when the migration is applied.
return nil
}
func downComplexUpdate(tx *sql.Tx) error {
// This code is executed when the migration is rolled back.
return nil
}
```
You __must__ import the package in `/cmd/goose/main.go` so `func init()` gets called.
#### Database limitations
- SQLite3 does NOT support `ALTER TABLE table_name DROP COLUMN` - you would have to rename the column or drop the table
entirely and recreate it. ([example](https://github.com/matrix-org/dendrite/blob/master/userapi/storage/accounts/sqlite3/deltas/20200929203058_is_active.sql))
More information: [sqlite.org](https://www.sqlite.org/lang_altertable.html)

View file

@ -1,154 +0,0 @@
// This is custom goose binary
package main
import (
"flag"
"fmt"
"log"
"os"
"github.com/pressly/goose"
pgusers "github.com/matrix-org/dendrite/userapi/storage/postgres/deltas"
slusers "github.com/matrix-org/dendrite/userapi/storage/sqlite3/deltas"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
)
const (
AppService = "appservice"
FederationSender = "federationapi"
KeyServer = "keyserver"
MediaAPI = "mediaapi"
RoomServer = "roomserver"
SigningKeyServer = "signingkeyserver"
SyncAPI = "syncapi"
UserAPI = "userapi"
)
var (
dir = flags.String("dir", "", "directory with migration files")
flags = flag.NewFlagSet("goose", flag.ExitOnError)
component = flags.String("component", "", "dendrite component name")
knownDBs = []string{
AppService, FederationSender, KeyServer, MediaAPI, RoomServer, SigningKeyServer, SyncAPI, UserAPI,
}
)
// nolint: gocyclo
func main() {
err := flags.Parse(os.Args[1:])
if err != nil {
panic(err.Error())
}
args := flags.Args()
if len(args) < 3 {
fmt.Println(
`Usage: goose [OPTIONS] DRIVER DBSTRING COMMAND
Drivers:
postgres
sqlite3
Examples:
goose -component roomserver sqlite3 ./roomserver.db status
goose -component roomserver sqlite3 ./roomserver.db up
goose -component roomserver postgres "user=dendrite dbname=dendrite sslmode=disable" status
Options:
-component string
Dendrite component name e.g roomserver, signingkeyserver, clientapi, syncapi
-table string
migrations table name (default "goose_db_version")
-h print help
-v enable verbose mode
-dir string
directory with migration files, only relevant when creating new migrations.
-version
print version
Commands:
up Migrate the DB to the most recent version available
up-by-one Migrate the DB up by 1
up-to VERSION Migrate the DB to a specific VERSION
down Roll back the version by 1
down-to VERSION Roll back to a specific VERSION
redo Re-run the latest migration
reset Roll back all migrations
status Dump the migration status for the current DB
version Print the current version of the database
create NAME [sql|go] Creates new migration file with the current timestamp
fix Apply sequential ordering to migrations`,
)
return
}
engine := args[0]
if engine != "sqlite3" && engine != "postgres" {
fmt.Println("engine must be one of 'sqlite3' or 'postgres'")
return
}
knownComponent := false
for _, c := range knownDBs {
if c == *component {
knownComponent = true
break
}
}
if !knownComponent {
fmt.Printf("component must be one of %v\n", knownDBs)
return
}
if engine == "sqlite3" {
loadSQLiteDeltas(*component)
} else {
loadPostgresDeltas(*component)
}
dbstring, command := args[1], args[2]
db, err := goose.OpenDBWithDriver(engine, dbstring)
if err != nil {
log.Fatalf("goose: failed to open DB: %v\n", err)
}
defer func() {
if err := db.Close(); err != nil {
log.Fatalf("goose: failed to close DB: %v\n", err)
}
}()
arguments := []string{}
if len(args) > 3 {
arguments = append(arguments, args[3:]...)
}
// goose demands a directory even though we don't use it for upgrades
d := *dir
if d == "" {
d = os.TempDir()
}
if err := goose.Run(command, db, d, arguments...); err != nil {
log.Fatalf("goose %v: %v", command, err)
}
}
func loadSQLiteDeltas(component string) {
switch component {
case UserAPI:
slusers.LoadFromGoose()
}
}
func loadPostgresDeltas(component string) {
switch component {
case UserAPI:
pgusers.LoadFromGoose()
}
}

View file

@ -64,6 +64,10 @@ global:
# e.g. localhost:443
well_known_server_name: ""
# The server name to delegate client-server communications to, with optional port
# e.g. localhost:443
well_known_client_name: ""
# Lists of domains that the server will trust as identity servers to verify third
# party identifiers such as phone numbers and email addresses.
trusted_third_party_id_servers:
@ -109,6 +113,11 @@ global:
addresses:
# - localhost:4222
# Disable the validation of TLS certificates of NATS. This is
# not recommended in production since it may allow NATS traffic
# to be sent to an insecure endpoint.
disable_tls_validation: false
# Persistent directory to store JetStream streams in. This directory should be
# preserved across Dendrite restarts.
storage_path: ./
@ -193,7 +202,7 @@ client_api:
# and appservice users are exempt from rate limiting by default.
rate_limiting:
enabled: true
threshold: 5
threshold: 20
cooloff_ms: 500
exempt_user_ids:
# - "@user:domain.com"

View file

@ -54,6 +54,10 @@ global:
# e.g. localhost:443
well_known_server_name: ""
# The server name to delegate client-server communications to, with optional port
# e.g. localhost:443
well_known_client_name: ""
# Lists of domains that the server will trust as identity servers to verify third
# party identifiers such as phone numbers and email addresses.
trusted_third_party_id_servers:
@ -99,6 +103,11 @@ global:
addresses:
- hostname:4222
# Disable the validation of TLS certificates of NATS. This is
# not recommended in production since it may allow NATS traffic
# to be sent to an insecure endpoint.
disable_tls_validation: false
# The prefix to use for stream names for this homeserver - really only useful
# if you are running more than one Dendrite server on the same NATS deployment.
topic_prefix: Dendrite
@ -196,7 +205,7 @@ client_api:
# and appservice users are exempt from rate limiting by default.
rate_limiting:
enabled: true
threshold: 5
threshold: 20
cooloff_ms: 500
exempt_user_ids:
# - "@user:domain.com"

View file

@ -24,7 +24,7 @@ Unfortunately we can't accept contributions without it.
## Getting up and running
See the [Installation](INSTALL.md) section for information on how to build an
See the [Installation](installation) section for information on how to build an
instance of Dendrite. You will likely need this in order to test your changes.
## Code style

View file

@ -233,6 +233,8 @@ GEM
multipart-post (2.1.1)
nokogiri (1.13.6-arm64-darwin)
racc (~> 1.4)
nokogiri (1.13.6-x86_64-linux)
racc (~> 1.4)
octokit (4.22.0)
faraday (>= 0.9)
sawyer (~> 0.8.0, >= 0.5.3)
@ -263,7 +265,7 @@ GEM
thread_safe (0.3.6)
typhoeus (1.4.0)
ethon (>= 0.9.0)
tzinfo (1.2.9)
tzinfo (1.2.10)
thread_safe (~> 0.1)
unf (0.1.4)
unf_ext
@ -273,11 +275,11 @@ GEM
PLATFORMS
arm64-darwin-21
x86_64-linux
DEPENDENCIES
github-pages (~> 226)
jekyll-feed (~> 0.15.1)
minima (~> 2.5.1)
BUNDLED WITH
2.3.7

View file

@ -1,68 +0,0 @@
{
# debug
admin off
email example@example.com
default_sni example.com
# Debug endpoint
# acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
}
#######################################################################
# Snippets
#______________________________________________________________________
(handle_errors_maintenance) {
handle_errors {
@maintenance expression {http.error.status_code} == 502
rewrite @maintenance maintenance.html
root * "/path/to/service/pages"
file_server
}
}
(matrix-well-known-header) {
# Headers
header Access-Control-Allow-Origin "*"
header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"
header Content-Type "application/json"
}
#######################################################################
example.com {
# ...
handle /.well-known/matrix/server {
import matrix-well-known-header
respond `{ "m.server": "matrix.example.com:443" }` 200
}
handle /.well-known/matrix/client {
import matrix-well-known-header
respond `{ "m.homeserver": { "base_url": "https://matrix.example.com" } }` 200
}
import handle_errors_maintenance
}
example.com:8448 {
# server<->server HTTPS traffic
reverse_proxy http://dendrite-host:8008
}
matrix.example.com {
handle /_matrix/* {
# client<->server HTTPS traffic
reverse_proxy http://dendrite-host:8008
}
handle_path /* {
# Client webapp (Element SPA or ...)
file_server {
root /path/to/www/example.com/matrix-web-client/
}
}
}

View file

@ -0,0 +1,57 @@
# Sample Caddyfile for using Caddy in front of Dendrite.
#
# Customize email address and domain names.
# Optional settings commented out.
#
# BE SURE YOUR DOMAINS ARE POINTED AT YOUR SERVER FIRST.
# Documentation: https://caddyserver.com/docs/
#
# Bonus tip: If your IP address changes, use Caddy's
# dynamic DNS plugin to update your DNS records to
# point to your new IP automatically:
# https://github.com/mholt/caddy-dynamicdns
#
# Global options block
{
# In case there is a problem with your certificates.
# email example@example.com
# Turn off the admin endpoint if you don't need graceful config
# changes and/or are running untrusted code on your machine.
# admin off
# Enable this if your clients don't send ServerName in TLS handshakes.
# default_sni example.com
# Enable debug mode for verbose logging.
# debug
# Use Let's Encrypt's staging endpoint for testing.
# acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
# If you're port-forwarding HTTP/HTTPS ports from 80/443 to something
# else, enable these and put the alternate port numbers here.
# http_port 8080
# https_port 8443
}
# The server name of your matrix homeserver. This example shows
# "well-known delegation" from the registered domain to a subdomain,
# which is only needed if your server_name doesn't match your Matrix
# homeserver URL (i.e. you can show users a vanity domain that looks
# nice and is easy to remember but still have your Matrix server on
# its own subdomain or hosted service).
example.com {
header /.well-known/matrix/* Content-Type application/json
header /.well-known/matrix/* Access-Control-Allow-Origin *
respond /.well-known/matrix/server `{"m.server": "matrix.example.com:443"}`
respond /.well-known/matrix/client `{"m.homeserver": {"base_url": "https://matrix.example.com"}}`
}
# The actual domain name whereby your Matrix server is accessed.
matrix.example.com {
# Set localhost:8008 to the address of your Dendrite server, if different
reverse_proxy /_matrix/* localhost:8008
}

View file

@ -0,0 +1,66 @@
# Sample Caddyfile for using Caddy in front of Dendrite.
#
# Customize email address and domain names.
# Optional settings commented out.
#
# BE SURE YOUR DOMAINS ARE POINTED AT YOUR SERVER FIRST.
# Documentation: https://caddyserver.com/docs/
#
# Bonus tip: If your IP address changes, use Caddy's
# dynamic DNS plugin to update your DNS records to
# point to your new IP automatically:
# https://github.com/mholt/caddy-dynamicdns
#
# Global options block
{
# In case there is a problem with your certificates.
# email example@example.com
# Turn off the admin endpoint if you don't need graceful config
# changes and/or are running untrusted code on your machine.
# admin off
# Enable this if your clients don't send ServerName in TLS handshakes.
# default_sni example.com
# Enable debug mode for verbose logging.
# debug
# Use Let's Encrypt's staging endpoint for testing.
# acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
# If you're port-forwarding HTTP/HTTPS ports from 80/443 to something
# else, enable these and put the alternate port numbers here.
# http_port 8080
# https_port 8443
}
# The server name of your matrix homeserver. This example shows
# "well-known delegation" from the registered domain to a subdomain,
# which is only needed if your server_name doesn't match your Matrix
# homeserver URL (i.e. you can show users a vanity domain that looks
# nice and is easy to remember but still have your Matrix server on
# its own subdomain or hosted service).
example.com {
header /.well-known/matrix/* Content-Type application/json
header /.well-known/matrix/* Access-Control-Allow-Origin *
respond /.well-known/matrix/server `{"m.server": "matrix.example.com:443"}`
respond /.well-known/matrix/client `{"m.homeserver": {"base_url": "https://matrix.example.com"}}`
}
# The actual domain name whereby your Matrix server is accessed.
matrix.example.com {
# Change the end of each reverse_proxy line to the correct
# address for your various services.
@sync_api {
path_regexp /_matrix/client/.*?/(sync|user/.*?/filter/?.*|keys/changes|rooms/.*?/messages)$
}
reverse_proxy @sync_api sync_api:8073
reverse_proxy /_matrix/client* client_api:8071
reverse_proxy /_matrix/federation* federation_api:8071
reverse_proxy /_matrix/key* federation_api:8071
reverse_proxy /_matrix/media* media_api:8071
}

View file

@ -2,7 +2,7 @@
title: Starting the polylith
parent: Installation
has_toc: true
nav_order: 9
nav_order: 10
permalink: /installation/start/polylith
---

View file

@ -2,7 +2,7 @@
title: Optimise your installation
parent: Installation
has_toc: true
nav_order: 10
nav_order: 11
permalink: /installation/start/optimisation
---

View file

@ -95,12 +95,13 @@ enabled.
To do so, follow the [NATS Server installation instructions](https://docs.nats.io/running-a-nats-service/introduction/installation) and then [start your NATS deployment](https://docs.nats.io/running-a-nats-service/introduction/running). JetStream must be enabled, either by passing the `-js` flag to `nats-server`,
or by specifying the `store_dir` option in the the `jetstream` configuration.
### Reverse proxy (polylith deployments)
### Reverse proxy
Polylith deployments require a reverse proxy, such as [NGINX](https://www.nginx.com) or
[HAProxy](http://www.haproxy.org). Configuring those is not covered in this documentation,
although a [sample configuration for NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/polylith-sample.conf)
is provided.
A reverse proxy such as [Caddy](https://caddyserver.com), [NGINX](https://www.nginx.com) or
[HAProxy](http://www.haproxy.org) is required for polylith deployments and is useful for monolith
deployments. Configuring those is not covered in this documentation, although sample configurations
for [Caddy](https://github.com/matrix-org/dendrite/blob/main/docs/caddy) and
[NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx) are provided.
### Windows

View file

@ -29,15 +29,23 @@ The exact details of how server name resolution works can be found in
## TLS certificates
Matrix federation requires that valid TLS certificates are present on the domain. You must
obtain certificates from a publicly accepted Certificate Authority (CA). [LetsEncrypt](https://letsencrypt.org)
is an example of such a CA that can be used. Self-signed certificates are not suitable for
federation and will typically not be accepted by other homeservers.
obtain certificates from a publicly-trusted certificate authority (CA). [Let's Encrypt](https://letsencrypt.org)
is a popular choice of CA because the certificates are publicly-trusted, free, and automated
via the ACME protocol. (Self-signed certificates are not suitable for federation and will typically
not be accepted by other homeservers.)
A common practice to help ease the management of certificates is to install a reverse proxy in
front of Dendrite which manages the TLS certificates and HTTPS proxying itself. Software such as
[NGINX](https://www.nginx.com) and [HAProxy](http://www.haproxy.org) can be used for the task.
Although the finer details of configuring these are not described here, you must reverse proxy
all `/_matrix` paths to your Dendrite server.
Automating the renewal of TLS certificates is best practice. There are many tools for this,
but the simplest way to achieve TLS automation is to have your reverse proxy do it for you.
[Caddy](https://caddyserver.com) is recommended as a production-grade reverse proxy with
automatic TLS which is commonly used in front of Dendrite. It obtains and renews TLS certificates
automatically and by default as long as your domain name is pointed at your server first.
Although the finer details of [configuring Caddy](https://caddyserver.com/docs/) is not described
here, in general, you must reverse proxy all `/_matrix` paths to your Dendrite server. For example,
with Caddy:
```
reverse_proxy /_matrix/* localhost:8008
```
It is possible for the reverse proxy to listen on the standard HTTPS port TCP/443 so long as your
domain delegation is configured to point to port TCP/443.
@ -76,6 +84,16 @@ and contain the following JSON document:
}
```
For example, this can be done with the following Caddy config:
```
handle /.well-known/matrix/client {
header Content-Type application/json
header Access-Control-Allow-Origin *
respond `{"m.homeserver": {"base_url": "https://matrix.example.com:8448"}}`
}
```
You can also serve `.well-known` with Dendrite itself by setting the `well_known_server_name` config
option to the value you want for `m.server`. This is primarily useful if Dendrite is exposed on
`example.com:443` and you don't want to set up a separate webserver just for serving the `.well-known`

View file

@ -0,0 +1,38 @@
---
title: Building Dendrite
parent: Installation
has_toc: true
nav_order: 3
permalink: /installation/build
---
# Build all Dendrite commands
Dendrite has numerous utility commands in addition to the actual server binaries.
Build them all from the root of the source repo with `build.sh` (Linux/Mac):
```sh
./build.sh
```
or `build.cmd` (Windows):
```powershell
build.cmd
```
The resulting binaries will be placed in the `bin` subfolder.
# Installing as a monolith
You can install the Dendrite monolith binary into `$GOPATH/bin` by using `go install`:
```sh
go install ./cmd/dendrite-monolith-server
```
Alternatively, you can specify a custom path for the binary to be written to using `go build`:
```sh
go build -o /usr/local/bin/ ./cmd/dendrite-monolith-server
```

View file

@ -17,7 +17,9 @@ filenames in the Dendrite configuration file and start Dendrite. The databases w
and populated automatically.
Note that Dendrite **cannot share a single SQLite database across multiple components**. Each
component must be configured with its own SQLite database filename.
component must be configured with its own SQLite database filename. You will have to remove
the `global.database` section from your Dendrite config and add it to each individual section
instead in order to use SQLite.
### Connection strings

View file

@ -29,5 +29,6 @@ Polylith deployments require a reverse proxy in order to ensure that requests ar
sent to the correct endpoint. You must ensure that a suitable reverse proxy is installed
and configured.
A [sample configuration file](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/polylith-sample.conf)
is provided for [NGINX](https://www.nginx.com).
Sample configurations are provided
for [Caddy](https://github.com/matrix-org/dendrite/blob/main/docs/caddy/polylith/Caddyfile)
and [NGINX](https://github.com/matrix-org/dendrite/blob/main/docs/nginx/polylith-sample.conf).

View file

@ -1,13 +1,13 @@
---
title: Populate the configuration
title: Configuring Dendrite
parent: Installation
nav_order: 7
permalink: /installation/configuration
---
# Populate the configuration
# Configuring Dendrite
The configuration file is used to configure Dendrite. Sample configuration files are
A YAML configuration file is used to configure Dendrite. Sample configuration files are
present in the top level of the Dendrite repository:
* [`dendrite-sample.monolith.yaml`](https://github.com/matrix-org/dendrite/blob/main/dendrite-sample.monolith.yaml)

View file

@ -1,7 +1,7 @@
---
title: Generating signing keys
parent: Installation
nav_order: 4
nav_order: 8
permalink: /installation/signingkeys
---

View file

@ -15,8 +15,9 @@ you can start your Dendrite monolith deployment by starting the `dendrite-monoli
./dendrite-monolith-server -config /path/to/dendrite.yaml
```
If you want to change the addresses or ports that Dendrite listens on, you
can use the `-http-bind-address` and `-https-bind-address` command line arguments:
By default, Dendrite will listen HTTP on port 8008. If you want to change the addresses
or ports that Dendrite listens on, you can use the `-http-bind-address` and
`-https-bind-address` command line arguments:
```bash
./dendrite-monolith-server -config /path/to/dendrite.yaml \

View file

@ -26,7 +26,6 @@ import (
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
"github.com/sirupsen/logrus"
)
// InviteV2 implements /_matrix/federation/v2/invite/{roomID}/{eventID}
@ -144,7 +143,6 @@ func processInvite(
// Check that the event is signed by the server sending the request.
redacted, err := gomatrixserverlib.RedactEventJSON(event.JSON(), event.Version())
if err != nil {
logrus.WithError(err).Errorf("XXX: invite.go")
return util.JSONResponse{
Code: http.StatusBadRequest,
JSON: jsonerror.BadJSON("The event JSON could not be redacted"),

View file

@ -202,6 +202,14 @@ func SendJoin(
}
}
// Check that the event is from the server sending the request.
if event.Origin() != request.Origin() {
return util.JSONResponse{
Code: http.StatusForbidden,
JSON: jsonerror.Forbidden("The join must be sent by the server it originated on"),
}
}
// Check that a state key is provided.
if event.StateKey() == nil || event.StateKeyEquals("") {
return util.JSONResponse{
@ -216,6 +224,22 @@ func SendJoin(
}
}
// Check that the sender belongs to the server that is sending us
// the request. By this point we've already asserted that the sender
// and the state key are equal so we don't need to check both.
var domain gomatrixserverlib.ServerName
if _, domain, err = gomatrixserverlib.SplitID('@', event.Sender()); err != nil {
return util.JSONResponse{
Code: http.StatusForbidden,
JSON: jsonerror.Forbidden("The sender of the join is invalid"),
}
} else if domain != request.Origin() {
return util.JSONResponse{
Code: http.StatusForbidden,
JSON: jsonerror.Forbidden("The sender of the join must belong to the origin server"),
}
}
// Check that the room ID is correct.
if event.RoomID() != roomID {
return util.JSONResponse{
@ -242,14 +266,6 @@ func SendJoin(
}
}
// Check that the event is from the server sending the request.
if event.Origin() != request.Origin() {
return util.JSONResponse{
Code: http.StatusForbidden,
JSON: jsonerror.Forbidden("The join must be sent by the server it originated on"),
}
}
// Check that this is in fact a join event
membership, err := event.Membership()
if err != nil {

View file

@ -15,23 +15,13 @@
package deltas
import (
"context"
"database/sql"
"fmt"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/pressly/goose"
)
func LoadFromGoose() {
goose.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
}
func LoadRemoveRoomsTable(m *sqlutil.Migrations) {
m.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
}
func UpRemoveRoomsTable(tx *sql.Tx) error {
_, err := tx.Exec(`
func UpRemoveRoomsTable(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
DROP TABLE IF EXISTS federationsender_rooms;
`)
if err != nil {

View file

@ -82,9 +82,13 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions,
if err != nil {
return nil, err
}
m := sqlutil.NewMigrations()
deltas.LoadRemoveRoomsTable(m)
if err = m.RunDeltas(d.db, dbProperties); err != nil {
m := sqlutil.NewMigrator(d.db)
m.AddMigrations(sqlutil.Migration{
Version: "federationsender: drop federationsender_rooms",
Up: deltas.UpRemoveRoomsTable,
})
err = m.Up(base.Context())
if err != nil {
return nil, err
}
d.Database = shared.Database{

View file

@ -15,23 +15,13 @@
package deltas
import (
"context"
"database/sql"
"fmt"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/pressly/goose"
)
func LoadFromGoose() {
goose.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
}
func LoadRemoveRoomsTable(m *sqlutil.Migrations) {
m.AddMigration(UpRemoveRoomsTable, DownRemoveRoomsTable)
}
func UpRemoveRoomsTable(tx *sql.Tx) error {
_, err := tx.Exec(`
func UpRemoveRoomsTable(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
DROP TABLE IF EXISTS federationsender_rooms;
`)
if err != nil {

View file

@ -81,9 +81,13 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions,
if err != nil {
return nil, err
}
m := sqlutil.NewMigrations()
deltas.LoadRemoveRoomsTable(m)
if err = m.RunDeltas(d.db, dbProperties); err != nil {
m := sqlutil.NewMigrator(d.db)
m.AddMigrations(sqlutil.Migration{
Version: "federationsender: drop federationsender_rooms",
Up: deltas.UpRemoveRoomsTable,
})
err = m.Up(base.Context())
if err != nil {
return nil, err
}
d.Database = shared.Database{

28
go.mod
View file

@ -1,9 +1,5 @@
module github.com/matrix-org/dendrite
replace github.com/nats-io/nats-server/v2 => github.com/neilalexander/nats-server/v2 v2.8.3-0.20220513095553-73a9a246d34f
replace github.com/nats-io/nats.go => github.com/neilalexander/nats.go v1.13.1-0.20220621084451-ac518c356673
require (
github.com/Arceliar/ironwood v0.0.0-20220306165321-319147a02d98
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979
@ -25,19 +21,18 @@ require (
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
github.com/matrix-org/gomatrixserverlib v0.0.0-20220718085240-f08f98af7d2d
github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a
github.com/matrix-org/gomatrixserverlib v0.0.0-20220725104114-b6003e522771
github.com/matrix-org/pinecone v0.0.0-20220803093810-b7a830c08fb9
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
github.com/mattn/go-sqlite3 v1.14.13
github.com/nats-io/nats-server/v2 v2.7.4-0.20220309205833-773636c1c5bb
github.com/nats-io/nats.go v1.14.0
github.com/nats-io/nats-server/v2 v2.8.5-0.20220731184415-903a06a5b4ee
github.com/nats-io/nats.go v1.16.1-0.20220731182438-87bbea85922b
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
github.com/ngrok/sqlmw v0.0.0-20220520173518-97c9c04efc79
github.com/opentracing/opentracing-go v1.2.0
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/errors v0.9.1
github.com/pressly/goose v2.7.0+incompatible
github.com/prometheus/client_golang v1.12.2
github.com/sirupsen/logrus v1.8.1
github.com/spruceid/siwe-go v0.2.0
@ -52,7 +47,7 @@ require (
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
golang.org/x/image v0.0.0-20220413100746-70e8d0d3baa9
golang.org/x/mobile v0.0.0-20220518205345-8578da9835fd
golang.org/x/net v0.0.0-20220524220425-1d687d428aca
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
gopkg.in/h2non/bimg.v1 v1.1.9
gopkg.in/yaml.v2 v2.4.0
@ -83,17 +78,18 @@ require (
github.com/h2non/filetype v1.1.3 // indirect
github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 // indirect
github.com/juju/testing v0.0.0-20220203020004-a0ff61f03494 // indirect
github.com/klauspost/compress v1.14.4 // indirect
github.com/lucas-clemente/quic-go v0.26.0 // indirect
github.com/klauspost/compress v1.15.9 // indirect
github.com/lucas-clemente/quic-go v0.28.1 // indirect
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
github.com/marten-seemann/qtls-go1-17 v0.1.1 // indirect
github.com/marten-seemann/qtls-go1-18 v0.1.1 // indirect
github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect
github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/miekg/dns v1.1.49 // indirect
github.com/minio/highwayhash v1.0.2 // indirect
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a // indirect
github.com/nats-io/jwt/v2 v2.3.0 // indirect
github.com/nats-io/nkeys v0.3.0 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/nxadm/tail v1.4.8 // indirect
@ -111,7 +107,7 @@ require (
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b // indirect
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect
golang.org/x/tools v0.1.10 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/protobuf v1.27.1 // indirect

51
go.sum
View file

@ -426,8 +426,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.14.4 h1:eijASRJcobkVtSt81Olfh7JX43osYLwy5krOJo6YEu4=
github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@ -454,8 +454,8 @@ github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgx
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ=
github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lucas-clemente/quic-go v0.26.0 h1:ALBQXr9UJ8A1LyzvceX4jd9QFsHvlI0RR6BkV16o00A=
github.com/lucas-clemente/quic-go v0.26.0/go.mod h1:AzgQoPda7N+3IqMMMkywBKggIFo2KT6pfnlrQ2QieeI=
github.com/lucas-clemente/quic-go v0.28.1 h1:Uo0lvVxWg5la9gflIF9lwa39ONq85Xq2D91YNEIslzU=
github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
@ -465,10 +465,12 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ=
github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk=
github.com/marten-seemann/qtls-go1-17 v0.1.1 h1:DQjHPq+aOzUeh9/lixAGunn6rIOQyWChPSI4+hgW7jc=
github.com/marten-seemann/qtls-go1-17 v0.1.1/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s=
github.com/marten-seemann/qtls-go1-18 v0.1.1 h1:qp7p7XXUFL7fpBvSS1sWD+uSqPvzNQK43DH+/qEkj0Y=
github.com/marten-seemann/qtls-go1-18 v0.1.1/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4=
github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ=
github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s=
github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM=
github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4=
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1 h1:7m/WlWcSROrcK5NxuXaxYD32BZqe/LEEnBrWcH/cOqQ=
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI=
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e h1:DP5RC0Z3XdyBEW5dKt8YPeN6vZbm6OzVaGVp7f1BQRM=
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e/go.mod h1:NgPCr+UavRGH6n5jmdX8DuqFZ4JiCWIJoZiuhTRLSUg=
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91 h1:s7fexw2QV3YD/fRrzEDPNGgTlJlvXY0EHHnT87wF3OA=
@ -476,10 +478,10 @@ github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91/go.mod h1
github.com/matrix-org/gomatrix v0.0.0-20190528120928-7df988a63f26/go.mod h1:3fxX6gUjWyI/2Bt7J1OLhpCzOfO/bB3AiX0cJtEKud0=
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 h1:ZtO5uywdd5dLDCud4r0r55eP4j9FuUNpl60Gmntcop4=
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
github.com/matrix-org/gomatrixserverlib v0.0.0-20220718085240-f08f98af7d2d h1:BWInUURXVOW+OiifMapoRIS7i122KWdEKj6fnDFXgBo=
github.com/matrix-org/gomatrixserverlib v0.0.0-20220718085240-f08f98af7d2d/go.mod h1:jX38yp3SSLJNftBg3PXU1ayd0PCLIiDHQ4xAc9DIixk=
github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a h1:DdG8vXMlZ65EAtc4V+3t7zHZ2Gqs24pSnyXS+4BRHUs=
github.com/matrix-org/pinecone v0.0.0-20220708135211-1ce778fcde6a/go.mod h1:ulJzsVOTssIVp1j/m5eI//4VpAGDkMt5NrRuAVX7wpc=
github.com/matrix-org/gomatrixserverlib v0.0.0-20220725104114-b6003e522771 h1:ZIPHFIPNDS9dmEbPEiJbNmyCGJtn9exfpLC7JOcn/bE=
github.com/matrix-org/gomatrixserverlib v0.0.0-20220725104114-b6003e522771/go.mod h1:jX38yp3SSLJNftBg3PXU1ayd0PCLIiDHQ4xAc9DIixk=
github.com/matrix-org/pinecone v0.0.0-20220803093810-b7a830c08fb9 h1:ed8yvWhTLk7+sNeK/eOZRTvESFTOHDRevoRoyeqPtvY=
github.com/matrix-org/pinecone v0.0.0-20220803093810-b7a830c08fb9/go.mod h1:P4MqPf+u83OPulPJ+XTbSDbbWrdFYNY4LZ/B1PIduFE=
github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 h1:eCEHXWDv9Rm335MSuB49mFUK44bwZPFSDde3ORE3syk=
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
@ -532,8 +534,12 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a h1:lem6QCvxR0Y28gth9P+wV2K/zYUUAkJ+55U8cpS0p5I=
github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI=
github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
github.com/nats-io/nats-server/v2 v2.8.5-0.20220731184415-903a06a5b4ee h1:vAtoZ+LW6eIUjkCWWwO1DZ6o16UGrVOG+ot/AkwejO8=
github.com/nats-io/nats-server/v2 v2.8.5-0.20220731184415-903a06a5b4ee/go.mod h1:3Yg3ApyQxPlAs1KKHKV5pobV5VtZk+TtOiUJx/iqkkg=
github.com/nats-io/nats.go v1.16.1-0.20220731182438-87bbea85922b h1:CE9wSYLvwq8aC/0+6zH8lhhtZYvJ9p8PzwvZeYgdBc0=
github.com/nats-io/nats.go v1.16.1-0.20220731182438-87bbea85922b/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
@ -541,10 +547,6 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/neilalexander/nats-server/v2 v2.8.3-0.20220513095553-73a9a246d34f h1:Fc+TjdV1mOy0oISSzfoxNWdTqjg7tN/Vdgf+B2cwvdo=
github.com/neilalexander/nats-server/v2 v2.8.3-0.20220513095553-73a9a246d34f/go.mod h1:vIdpKz3OG+DCg4q/xVPdXHoztEyKDWRtykQ4N7hd7C4=
github.com/neilalexander/nats.go v1.13.1-0.20220621084451-ac518c356673 h1:TcKfa3Tf0qwUotv63PQVu2d1bBoLi2iEA4RHVMGDh5M=
github.com/neilalexander/nats.go v1.13.1-0.20220621084451-ac518c356673/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9 h1:lrVQzBtkeQEGGYUHwSX1XPe1E5GL6U3KYCNe2G4bncQ=
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9/go.mod h1:NPHGhPc0/wudcaCqL/H5AOddkRf8GPRhzOujuUKGQu8=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
@ -595,8 +597,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose v2.7.0+incompatible h1:PWejVEv07LCerQEzMMeAtjuyCKbyprZ/LBa6K5P0OCQ=
github.com/pressly/goose v2.7.0+incompatible/go.mod h1:m+QHWCqxR3k8D9l7qfzuC/djtlfzxr34mozWDYEu1z8=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
@ -746,7 +746,6 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
@ -866,8 +865,8 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210927181540-4e4d966f7476/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211011170408-caeb26a5c8c0/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211101193420-4a448f8816b3/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220524220425-1d687d428aca h1:xTaFYiPROfpPhqrfTIDXj0ri1SpfueYT951s4bAuDO8=
golang.org/x/net v0.0.0-20220524220425-1d687d428aca/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -969,6 +968,7 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbuf
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -979,6 +979,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b h1:NXqSWXSRUSCaFuvitrWtU169I3876zRTalMRbfd6LL0=
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -987,8 +988,8 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M=
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w=
golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View file

@ -1,33 +0,0 @@
package caching
import (
"github.com/matrix-org/dendrite/roomserver/types"
)
// WARNING: This cache is mutable because it's entirely possible that
// the IsStub or StateSnaphotNID fields can change, even though the
// room version and room NID fields will not. This is only safe because
// the RoomInfoCache is used ONLY within the roomserver and because it
// will be kept up-to-date by the latest events updater. It MUST NOT be
// used from other components as we currently have no way to invalidate
// the cache in downstream components.
// RoomInfosCache contains the subset of functions needed for
// a room Info cache. It must only be used from the roomserver only
// It is not safe for use from other components.
type RoomInfoCache interface {
GetRoomInfo(roomID string) (roomInfo *types.RoomInfo, ok bool)
StoreRoomInfo(roomID string, roomInfo *types.RoomInfo)
}
// GetRoomInfo must only be called from the roomserver only. It is not
// safe for use from other components.
func (c Caches) GetRoomInfo(roomID string) (*types.RoomInfo, bool) {
return c.RoomInfos.Get(roomID)
}
// StoreRoomInfo must only be called from the roomserver only. It is not
// safe for use from other components.
func (c Caches) StoreRoomInfo(roomID string, roomInfo *types.RoomInfo) {
c.RoomInfos.Set(roomID, roomInfo)
}

View file

@ -7,7 +7,6 @@ import (
type RoomServerCaches interface {
RoomServerNIDsCache
RoomVersionCache
RoomInfoCache
RoomServerEventsCache
EventStateKeyCache
}

View file

@ -29,7 +29,6 @@ type Caches struct {
RoomServerRoomIDs Cache[types.RoomNID, string] // room NID -> room ID
RoomServerEvents Cache[int64, *gomatrixserverlib.Event] // event NID -> event
RoomServerStateKeys Cache[types.EventStateKeyNID, string] // event NID -> event state key
RoomInfos Cache[string, *types.RoomInfo] // room ID -> room info
FederationPDUs Cache[int64, *gomatrixserverlib.HeaderedEvent] // queue NID -> PDU
FederationEDUs Cache[int64, *gomatrixserverlib.EDU] // queue NID -> EDU
SpaceSummaryRooms Cache[string, gomatrixserverlib.MSC2946SpacesResponse] // room ID -> space response

View file

@ -35,7 +35,6 @@ const (
roomNIDsCache
roomIDsCache
roomEventsCache
roomInfosCache
federationPDUsCache
federationEDUsCache
spaceSummaryRoomsCache
@ -106,12 +105,6 @@ func NewRistrettoCache(maxCost config.DataUnit, maxAge time.Duration, enableProm
Prefix: eventStateKeyCache,
MaxAge: maxAge,
},
RoomInfos: &RistrettoCachePartition[string, *types.RoomInfo]{ // room ID -> room info
cache: cache,
Prefix: roomInfosCache,
Mutable: true,
MaxAge: maxAge,
},
FederationPDUs: &RistrettoCostedCachePartition[int64, *gomatrixserverlib.HeaderedEvent]{ // queue NID -> PDU
&RistrettoCachePartition[int64, *gomatrixserverlib.HeaderedEvent]{
cache: cache,

View file

@ -1,130 +1,142 @@
// Copyright 2022 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sqlutil
import (
"context"
"database/sql"
"fmt"
"runtime"
"sort"
"sync"
"time"
"github.com/matrix-org/dendrite/setup/config"
"github.com/pressly/goose"
"github.com/matrix-org/dendrite/internal"
"github.com/sirupsen/logrus"
)
type Migrations struct {
registeredGoMigrations map[int64]*goose.Migration
const createDBMigrationsSQL = "" +
"CREATE TABLE IF NOT EXISTS db_migrations (" +
" version TEXT PRIMARY KEY NOT NULL," +
" time TEXT NOT NULL," +
" dendrite_version TEXT NOT NULL" +
");"
const insertVersionSQL = "" +
"INSERT INTO db_migrations (version, time, dendrite_version)" +
" VALUES ($1, $2, $3)"
const selectDBMigrationsSQL = "SELECT version FROM db_migrations"
// Migration defines a migration to be run.
type Migration struct {
// Version is a simple description/name of this migration.
Version string
// Up defines the function to execute for an upgrade.
Up func(ctx context.Context, txn *sql.Tx) error
// Down defines the function to execute for a downgrade (not implemented yet).
Down func(ctx context.Context, txn *sql.Tx) error
}
func NewMigrations() *Migrations {
return &Migrations{
registeredGoMigrations: make(map[int64]*goose.Migration),
// Migrator
type Migrator struct {
db *sql.DB
migrations []Migration
knownMigrations map[string]struct{}
mutex *sync.Mutex
}
// NewMigrator creates a new DB migrator.
func NewMigrator(db *sql.DB) *Migrator {
return &Migrator{
db: db,
migrations: []Migration{},
knownMigrations: make(map[string]struct{}),
mutex: &sync.Mutex{},
}
}
// Copy-pasted from goose directly to store migrations into a map we control
// AddMigration adds a migration.
func (m *Migrations) AddMigration(up func(*sql.Tx) error, down func(*sql.Tx) error) {
_, filename, _, _ := runtime.Caller(1)
m.AddNamedMigration(filename, up, down)
}
// AddNamedMigration : Add a named migration.
func (m *Migrations) AddNamedMigration(filename string, up func(*sql.Tx) error, down func(*sql.Tx) error) {
v, _ := goose.NumericComponent(filename)
migration := &goose.Migration{Version: v, Next: -1, Previous: -1, Registered: true, UpFn: up, DownFn: down, Source: filename}
if existing, ok := m.registeredGoMigrations[v]; ok {
panic(fmt.Sprintf("failed to add migration %q: version conflicts with %q", filename, existing.Source))
// AddMigrations appends migrations to the list of migrations. Migrations are executed
// in the order they are added to the list. De-duplicates migrations using their Version field.
func (m *Migrator) AddMigrations(migrations ...Migration) {
m.mutex.Lock()
defer m.mutex.Unlock()
for _, mig := range migrations {
if _, ok := m.knownMigrations[mig.Version]; !ok {
m.migrations = append(m.migrations, mig)
m.knownMigrations[mig.Version] = struct{}{}
}
}
m.registeredGoMigrations[v] = migration
}
// RunDeltas up to the latest version.
func (m *Migrations) RunDeltas(db *sql.DB, props *config.DatabaseOptions) error {
maxVer := goose.MaxVersion
minVer := int64(0)
migrations, err := m.collect(minVer, maxVer)
// Up executes all migrations in order they were added.
func (m *Migrator) Up(ctx context.Context) error {
var (
err error
dendriteVersion = internal.VersionString()
)
// ensure there is a table for known migrations
executedMigrations, err := m.ExecutedMigrations(ctx)
if err != nil {
return fmt.Errorf("runDeltas: Failed to collect migrations: %w", err)
return fmt.Errorf("unable to create/get migrations: %w", err)
}
if props.ConnectionString.IsPostgres() {
if err = goose.SetDialect("postgres"); err != nil {
return err
}
} else if props.ConnectionString.IsSQLite() {
if err = goose.SetDialect("sqlite3"); err != nil {
return err
}
} else {
return fmt.Errorf("unknown connection string: %s", props.ConnectionString)
}
for {
current, err := goose.EnsureDBVersion(db)
if err != nil {
return fmt.Errorf("runDeltas: Failed to EnsureDBVersion: %w", err)
}
next, err := migrations.Next(current)
if err != nil {
if err == goose.ErrNoNextVersion {
return nil
return WithTransaction(m.db, func(txn *sql.Tx) error {
for i := range m.migrations {
now := time.Now().UTC().Format(time.RFC3339)
migration := m.migrations[i]
logrus.Debugf("Executing database migration '%s'", migration.Version)
// Skip migration if it was already executed
if _, ok := executedMigrations[migration.Version]; ok {
continue
}
err = migration.Up(ctx, txn)
if err != nil {
return fmt.Errorf("unable to execute migration '%s': %w", migration.Version, err)
}
_, err = txn.ExecContext(ctx, insertVersionSQL,
migration.Version,
now,
dendriteVersion,
)
if err != nil {
return fmt.Errorf("unable to insert executed migrations: %w", err)
}
return fmt.Errorf("runDeltas: Failed to load next migration to %+v : %w", next, err)
}
if err = next.Up(db); err != nil {
return fmt.Errorf("runDeltas: Failed run migration: %w", err)
}
}
return nil
})
}
func (m *Migrations) collect(current, target int64) (goose.Migrations, error) {
var migrations goose.Migrations
// Go migrations registered via goose.AddMigration().
for _, migration := range m.registeredGoMigrations {
v, err := goose.NumericComponent(migration.Source)
if err != nil {
return nil, err
}
if versionFilter(v, current, target) {
migrations = append(migrations, migration)
// ExecutedMigrations returns a map with already executed migrations in addition to creating the
// migrations table, if it doesn't exist.
func (m *Migrator) ExecutedMigrations(ctx context.Context) (map[string]struct{}, error) {
result := make(map[string]struct{})
_, err := m.db.ExecContext(ctx, createDBMigrationsSQL)
if err != nil {
return nil, fmt.Errorf("unable to create db_migrations: %w", err)
}
rows, err := m.db.QueryContext(ctx, selectDBMigrationsSQL)
if err != nil {
return nil, fmt.Errorf("unable to query db_migrations: %w", err)
}
defer internal.CloseAndLogIfError(ctx, rows, "ExecutedMigrations: rows.close() failed")
var version string
for rows.Next() {
if err = rows.Scan(&version); err != nil {
return nil, fmt.Errorf("unable to scan version: %w", err)
}
result[version] = struct{}{}
}
migrations = sortAndConnectMigrations(migrations)
return migrations, nil
}
func sortAndConnectMigrations(migrations goose.Migrations) goose.Migrations {
sort.Sort(migrations)
// now that we're sorted in the appropriate direction,
// populate next and previous for each migration
for i, m := range migrations {
prev := int64(-1)
if i > 0 {
prev = migrations[i-1].Version
migrations[i-1].Next = m.Version
}
migrations[i].Previous = prev
}
return migrations
}
func versionFilter(v, current, target int64) bool {
if target > current {
return v > current && v <= target
}
if target < current {
return v <= current && v > target
}
return false
return result, rows.Err()
}

View file

@ -0,0 +1,112 @@
package sqlutil_test
import (
"context"
"database/sql"
"fmt"
"reflect"
"testing"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/test"
_ "github.com/mattn/go-sqlite3"
)
var dummyMigrations = []sqlutil.Migration{
{
Version: "init",
Up: func(ctx context.Context, txn *sql.Tx) error {
_, err := txn.ExecContext(ctx, "CREATE TABLE IF NOT EXISTS dummy ( test TEXT );")
return err
},
},
{
Version: "v2",
Up: func(ctx context.Context, txn *sql.Tx) error {
_, err := txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test2 TEXT;")
return err
},
},
{
Version: "v2", // duplicate, this migration will be skipped
Up: func(ctx context.Context, txn *sql.Tx) error {
_, err := txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test2 TEXT;")
return err
},
},
{
Version: "multiple execs",
Up: func(ctx context.Context, txn *sql.Tx) error {
_, err := txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test3 TEXT;")
if err != nil {
return err
}
_, err = txn.ExecContext(ctx, "ALTER TABLE dummy ADD COLUMN test4 TEXT;")
return err
},
},
}
var failMigration = sqlutil.Migration{
Version: "iFail",
Up: func(ctx context.Context, txn *sql.Tx) error {
return fmt.Errorf("iFail")
},
Down: nil,
}
func Test_migrations_Up(t *testing.T) {
withFail := append(dummyMigrations, failMigration)
tests := []struct {
name string
migrations []sqlutil.Migration
wantResult map[string]struct{}
wantErr bool
}{
{
name: "dummy migration",
migrations: dummyMigrations,
wantResult: map[string]struct{}{
"init": {},
"v2": {},
"multiple execs": {},
},
},
{
name: "with fail",
migrations: withFail,
wantErr: true,
},
}
ctx := context.Background()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) {
conStr, close := test.PrepareDBConnectionString(t, dbType)
defer close()
driverName := "sqlite3"
if dbType == test.DBTypePostgres {
driverName = "postgres"
}
db, err := sql.Open(driverName, conStr)
if err != nil {
t.Errorf("unable to open database: %v", err)
}
m := sqlutil.NewMigrator(db)
m.AddMigrations(tt.migrations...)
if err = m.Up(ctx); (err != nil) != tt.wantErr {
t.Errorf("Up() error = %v, wantErr %v", err, tt.wantErr)
}
result, err := m.ExecutedMigrations(ctx)
if err != nil {
t.Errorf("unable to get executed migrations: %v", err)
}
if !tt.wantErr && !reflect.DeepEqual(result, tt.wantResult) {
t.Errorf("expected: %+v, got %v", tt.wantResult, result)
}
})
})
}
}

View file

@ -16,8 +16,8 @@ var build string
const (
VersionMajor = 0
VersionMinor = 8
VersionPatch = 9
VersionMinor = 9
VersionPatch = 1
VersionTag = "" // example: "rc1"
)

View file

@ -18,6 +18,7 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"sync"
"time"
@ -314,6 +315,11 @@ func (a *KeyInternalAPI) QueryKeys(ctx context.Context, req *api.QueryKeysReques
for targetKeyID := range masterKey.Keys {
sigMap, err := a.DB.CrossSigningSigsForTarget(ctx, req.UserID, targetUserID, targetKeyID)
if err != nil {
// Stop executing the function if the context was canceled/the deadline was exceeded,
// as we can't continue without a valid context.
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return
}
logrus.WithError(err).Errorf("a.DB.CrossSigningSigsForTarget failed")
continue
}
@ -335,6 +341,11 @@ func (a *KeyInternalAPI) QueryKeys(ctx context.Context, req *api.QueryKeysReques
for targetKeyID, key := range forUserID {
sigMap, err := a.DB.CrossSigningSigsForTarget(ctx, req.UserID, targetUserID, gomatrixserverlib.KeyID(targetKeyID))
if err != nil {
// Stop executing the function if the context was canceled/the deadline was exceeded,
// as we can't continue without a valid context.
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return
}
logrus.WithError(err).Errorf("a.DB.CrossSigningSigsForTarget failed")
continue
}

View file

@ -21,6 +21,7 @@ import (
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/keyserver/storage/postgres/deltas"
"github.com/matrix-org/dendrite/keyserver/storage/tables"
"github.com/matrix-org/dendrite/keyserver/types"
"github.com/matrix-org/gomatrixserverlib"
@ -66,6 +67,16 @@ func NewPostgresCrossSigningSigsTable(db *sql.DB) (tables.CrossSigningSigs, erro
if err != nil {
return nil, err
}
m := sqlutil.NewMigrator(db)
m.AddMigrations(sqlutil.Migration{
Version: "keyserver: cross signing signature indexes",
Up: deltas.UpFixCrossSigningSignatureIndexes,
})
if err = m.Up(context.Background()); err != nil {
return nil, err
}
return s, sqlutil.StatementList{
{&s.selectCrossSigningSigsForTargetStmt, selectCrossSigningSigsForTargetSQL},
{&s.upsertCrossSigningSigsForTargetStmt, upsertCrossSigningSigsForTargetSQL},

View file

@ -15,37 +15,27 @@
package deltas
import (
"context"
"database/sql"
"fmt"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/pressly/goose"
)
func LoadFromGoose() {
goose.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges)
}
func LoadRefactorKeyChanges(m *sqlutil.Migrations) {
m.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges)
}
func UpRefactorKeyChanges(tx *sql.Tx) error {
func UpRefactorKeyChanges(ctx context.Context, tx *sql.Tx) error {
// start counting from the last max offset, else 0. We need to do a count(*) first to see if there
// even are entries in this table to know if we can query for log_offset. Without the count then
// the query to SELECT the max log offset fails on new Dendrite instances as log_offset doesn't
// exist on that table. Even though we discard the error, the txn is tainted and gets aborted :/
var count int
_ = tx.QueryRow(`SELECT count(*) FROM keyserver_key_changes`).Scan(&count)
_ = tx.QueryRowContext(ctx, `SELECT count(*) FROM keyserver_key_changes`).Scan(&count)
if count > 0 {
var maxOffset int64
_ = tx.QueryRow(`SELECT coalesce(MAX(log_offset), 0) AS offset FROM keyserver_key_changes`).Scan(&maxOffset)
if _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE IF NOT EXISTS keyserver_key_changes_seq START %d`, maxOffset)); err != nil {
_ = tx.QueryRowContext(ctx, `SELECT coalesce(MAX(log_offset), 0) AS offset FROM keyserver_key_changes`).Scan(&maxOffset)
if _, err := tx.ExecContext(ctx, fmt.Sprintf(`CREATE SEQUENCE IF NOT EXISTS keyserver_key_changes_seq START %d`, maxOffset)); err != nil {
return fmt.Errorf("failed to CREATE SEQUENCE for key changes, starting at %d: %s", maxOffset, err)
}
}
_, err := tx.Exec(`
_, err := tx.ExecContext(ctx, `
-- make the new table
DROP TABLE IF EXISTS keyserver_key_changes;
CREATE TABLE IF NOT EXISTS keyserver_key_changes (
@ -60,8 +50,8 @@ func UpRefactorKeyChanges(tx *sql.Tx) error {
return nil
}
func DownRefactorKeyChanges(tx *sql.Tx) error {
_, err := tx.Exec(`
func DownRefactorKeyChanges(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
-- Drop all data and revert back, we can't keep the data as Kafka offsets determine the numbers
DROP SEQUENCE IF EXISTS keyserver_key_changes_seq;
DROP TABLE IF EXISTS keyserver_key_changes;

View file

@ -15,18 +15,13 @@
package deltas
import (
"context"
"database/sql"
"fmt"
"github.com/matrix-org/dendrite/internal/sqlutil"
)
func LoadFixCrossSigningSignatureIndexes(m *sqlutil.Migrations) {
m.AddMigration(UpFixCrossSigningSignatureIndexes, DownFixCrossSigningSignatureIndexes)
}
func UpFixCrossSigningSignatureIndexes(tx *sql.Tx) error {
_, err := tx.Exec(`
func UpFixCrossSigningSignatureIndexes(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
ALTER TABLE keyserver_cross_signing_sigs DROP CONSTRAINT keyserver_cross_signing_sigs_pkey;
ALTER TABLE keyserver_cross_signing_sigs ADD PRIMARY KEY (origin_user_id, origin_key_id, target_user_id, target_key_id);
@ -38,8 +33,8 @@ func UpFixCrossSigningSignatureIndexes(tx *sql.Tx) error {
return nil
}
func DownFixCrossSigningSignatureIndexes(tx *sql.Tx) error {
_, err := tx.Exec(`
func DownFixCrossSigningSignatureIndexes(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
ALTER TABLE keyserver_cross_signing_sigs DROP CONSTRAINT keyserver_cross_signing_sigs_pkey;
ALTER TABLE keyserver_cross_signing_sigs ADD PRIMARY KEY (origin_user_id, target_user_id, target_key_id);

View file

@ -19,6 +19,8 @@ import (
"database/sql"
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/keyserver/storage/postgres/deltas"
"github.com/matrix-org/dendrite/keyserver/storage/tables"
)
@ -55,7 +57,23 @@ func NewPostgresKeyChangesTable(db *sql.DB) (tables.KeyChanges, error) {
db: db,
}
_, err := db.Exec(keyChangesSchema)
return s, err
if err != nil {
return s, err
}
// TODO: Remove when we are sure we are not having goose artefacts in the db
// This forces an error, which indicates the migration is already applied, since the
// column partition was removed from the table
err = db.QueryRow("SELECT partition FROM keyserver_key_changes LIMIT 1;").Scan()
if err == nil {
m := sqlutil.NewMigrator(db)
m.AddMigrations(sqlutil.Migration{
Version: "keyserver: refactor key changes",
Up: deltas.UpRefactorKeyChanges,
})
return s, m.Up(context.Background())
}
return s, nil
}
func (s *keyChangesStatements) Prepare() (err error) {

View file

@ -16,7 +16,6 @@ package postgres
import (
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/keyserver/storage/postgres/deltas"
"github.com/matrix-org/dendrite/keyserver/storage/shared"
"github.com/matrix-org/dendrite/setup/base"
"github.com/matrix-org/dendrite/setup/config"
@ -53,12 +52,6 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions)
if err != nil {
return nil, err
}
m := sqlutil.NewMigrations()
deltas.LoadRefactorKeyChanges(m)
deltas.LoadFixCrossSigningSignatureIndexes(m)
if err = m.RunDeltas(db, dbProperties); err != nil {
return nil, err
}
if err = kc.Prepare(); err != nil {
return nil, err
}

View file

@ -21,6 +21,7 @@ import (
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/keyserver/storage/sqlite3/deltas"
"github.com/matrix-org/dendrite/keyserver/storage/tables"
"github.com/matrix-org/dendrite/keyserver/types"
"github.com/matrix-org/gomatrixserverlib"
@ -65,6 +66,15 @@ func NewSqliteCrossSigningSigsTable(db *sql.DB) (tables.CrossSigningSigs, error)
if err != nil {
return nil, err
}
m := sqlutil.NewMigrator(db)
m.AddMigrations(sqlutil.Migration{
Version: "keyserver: cross signing signature indexes",
Up: deltas.UpFixCrossSigningSignatureIndexes,
})
if err = m.Up(context.Background()); err != nil {
return nil, err
}
return s, sqlutil.StatementList{
{&s.selectCrossSigningSigsForTargetStmt, selectCrossSigningSigsForTargetSQL},
{&s.upsertCrossSigningSigsForTargetStmt, upsertCrossSigningSigsForTargetSQL},

View file

@ -15,28 +15,18 @@
package deltas
import (
"context"
"database/sql"
"fmt"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/pressly/goose"
)
func LoadFromGoose() {
goose.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges)
}
func LoadRefactorKeyChanges(m *sqlutil.Migrations) {
m.AddMigration(UpRefactorKeyChanges, DownRefactorKeyChanges)
}
func UpRefactorKeyChanges(tx *sql.Tx) error {
func UpRefactorKeyChanges(ctx context.Context, tx *sql.Tx) error {
// start counting from the last max offset, else 0.
var maxOffset int64
var userID string
_ = tx.QueryRow(`SELECT user_id, MAX(log_offset) FROM keyserver_key_changes GROUP BY user_id`).Scan(&userID, &maxOffset)
_ = tx.QueryRowContext(ctx, `SELECT user_id, MAX(log_offset) FROM keyserver_key_changes GROUP BY user_id`).Scan(&userID, &maxOffset)
_, err := tx.Exec(`
_, err := tx.ExecContext(ctx, `
-- make the new table
DROP TABLE IF EXISTS keyserver_key_changes;
CREATE TABLE IF NOT EXISTS keyserver_key_changes (
@ -51,14 +41,14 @@ func UpRefactorKeyChanges(tx *sql.Tx) error {
}
// to start counting from maxOffset, insert a row with that value
if userID != "" {
_, err = tx.Exec(`INSERT INTO keyserver_key_changes(change_id, user_id) VALUES($1, $2)`, maxOffset, userID)
_, err = tx.ExecContext(ctx, `INSERT INTO keyserver_key_changes(change_id, user_id) VALUES($1, $2)`, maxOffset, userID)
return err
}
return nil
}
func DownRefactorKeyChanges(tx *sql.Tx) error {
_, err := tx.Exec(`
func DownRefactorKeyChanges(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
-- Drop all data and revert back, we can't keep the data as Kafka offsets determine the numbers
DROP TABLE IF EXISTS keyserver_key_changes;
CREATE TABLE IF NOT EXISTS keyserver_key_changes (

View file

@ -15,18 +15,13 @@
package deltas
import (
"context"
"database/sql"
"fmt"
"github.com/matrix-org/dendrite/internal/sqlutil"
)
func LoadFixCrossSigningSignatureIndexes(m *sqlutil.Migrations) {
m.AddMigration(UpFixCrossSigningSignatureIndexes, DownFixCrossSigningSignatureIndexes)
}
func UpFixCrossSigningSignatureIndexes(tx *sql.Tx) error {
_, err := tx.Exec(`
func UpFixCrossSigningSignatureIndexes(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
CREATE TABLE IF NOT EXISTS keyserver_cross_signing_sigs_tmp (
origin_user_id TEXT NOT NULL,
origin_key_id TEXT NOT NULL,
@ -50,8 +45,8 @@ func UpFixCrossSigningSignatureIndexes(tx *sql.Tx) error {
return nil
}
func DownFixCrossSigningSignatureIndexes(tx *sql.Tx) error {
_, err := tx.Exec(`
func DownFixCrossSigningSignatureIndexes(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
CREATE TABLE IF NOT EXISTS keyserver_cross_signing_sigs_tmp (
origin_user_id TEXT NOT NULL,
origin_key_id TEXT NOT NULL,

View file

@ -19,6 +19,8 @@ import (
"database/sql"
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/keyserver/storage/sqlite3/deltas"
"github.com/matrix-org/dendrite/keyserver/storage/tables"
)
@ -53,7 +55,23 @@ func NewSqliteKeyChangesTable(db *sql.DB) (tables.KeyChanges, error) {
db: db,
}
_, err := db.Exec(keyChangesSchema)
return s, err
if err != nil {
return s, err
}
// TODO: Remove when we are sure we are not having goose artefacts in the db
// This forces an error, which indicates the migration is already applied, since the
// column partition was removed from the table
err = db.QueryRow("SELECT partition FROM keyserver_key_changes LIMIT 1;").Scan()
if err == nil {
m := sqlutil.NewMigrator(db)
m.AddMigrations(sqlutil.Migration{
Version: "keyserver: refactor key changes",
Up: deltas.UpRefactorKeyChanges,
})
return s, m.Up(context.Background())
}
return s, nil
}
func (s *keyChangesStatements) Prepare() (err error) {

View file

@ -17,7 +17,6 @@ package sqlite3
import (
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/keyserver/storage/shared"
"github.com/matrix-org/dendrite/keyserver/storage/sqlite3/deltas"
"github.com/matrix-org/dendrite/setup/base"
"github.com/matrix-org/dendrite/setup/config"
)
@ -52,12 +51,6 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions)
return nil, err
}
m := sqlutil.NewMigrations()
deltas.LoadRefactorKeyChanges(m)
deltas.LoadFixCrossSigningSignatureIndexes(m)
if err = m.RunDeltas(db, dbProperties); err != nil {
return nil, err
}
if err = kc.Prepare(); err != nil {
return nil, err
}

View file

@ -149,6 +149,9 @@ func makeDownloadAPI(
}
}
// Cache media for at least one day.
w.Header().Set("Cache-Control", "public,max-age=86400,s-maxage=86400")
Download(
w,
req,

View file

@ -50,14 +50,14 @@ func CheckForSoftFail(
if err != nil {
return false, fmt.Errorf("db.RoomNID: %w", err)
}
if roomInfo == nil || roomInfo.IsStub {
if roomInfo == nil || roomInfo.IsStub() {
return false, nil
}
// Then get the state entries for the current state snapshot.
// We'll use this to check if the event is allowed right now.
roomState := state.NewStateResolution(db, roomInfo)
authStateEntries, err = roomState.LoadStateAtSnapshot(ctx, roomInfo.StateSnapshotNID)
authStateEntries, err = roomState.LoadStateAtSnapshot(ctx, roomInfo.StateSnapshotNID())
if err != nil {
return true, fmt.Errorf("roomState.LoadStateAtSnapshot: %w", err)
}

View file

@ -12,6 +12,7 @@ import (
"github.com/matrix-org/dendrite/roomserver/state"
"github.com/matrix-org/dendrite/roomserver/storage"
"github.com/matrix-org/dendrite/roomserver/storage/shared"
"github.com/matrix-org/dendrite/roomserver/storage/tables"
"github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
@ -21,14 +22,14 @@ import (
// Move these to a more sensible place.
func UpdateToInviteMembership(
mu *shared.MembershipUpdater, add *gomatrixserverlib.Event, updates []api.OutputEvent,
mu *shared.MembershipUpdater, add *types.Event, updates []api.OutputEvent,
roomVersion gomatrixserverlib.RoomVersion,
) ([]api.OutputEvent, error) {
// We may have already sent the invite to the user, either because we are
// reprocessing this event, or because the we received this invite from a
// remote server via the federation invite API. In those cases we don't need
// to send the event.
needsSending, err := mu.SetToInvite(add)
needsSending, retired, err := mu.Update(tables.MembershipStateInvite, add)
if err != nil {
return nil, err
}
@ -38,13 +39,23 @@ func UpdateToInviteMembership(
// room event stream. This ensures that the consumers only have to
// consider a single stream of events when determining whether a user
// is invited, rather than having to combine multiple streams themselves.
onie := api.OutputNewInviteEvent{
Event: add.Headered(roomVersion),
RoomVersion: roomVersion,
}
updates = append(updates, api.OutputEvent{
Type: api.OutputTypeNewInviteEvent,
NewInviteEvent: &onie,
Type: api.OutputTypeNewInviteEvent,
NewInviteEvent: &api.OutputNewInviteEvent{
Event: add.Headered(roomVersion),
RoomVersion: roomVersion,
},
})
}
for _, eventID := range retired {
updates = append(updates, api.OutputEvent{
Type: api.OutputTypeRetireInviteEvent,
RetireInviteEvent: &api.OutputRetireInviteEvent{
EventID: eventID,
Membership: gomatrixserverlib.Join,
RetiredByEventID: add.EventID(),
TargetUserID: *add.StateKey(),
},
})
}
return updates, nil
@ -225,13 +236,34 @@ func LoadStateEvents(
func CheckServerAllowedToSeeEvent(
ctx context.Context, db storage.Database, info *types.RoomInfo, eventID string, serverName gomatrixserverlib.ServerName, isServerInRoom bool,
) (bool, error) {
stateAtEvent, err := db.GetHistoryVisibilityState(ctx, info, eventID, string(serverName))
switch err {
case nil:
// No error, so continue normally
case tables.OptimisationNotSupportedError:
// The database engine didn't support this optimisation, so fall back to using
// the old and slow method
stateAtEvent, err = slowGetHistoryVisibilityState(ctx, db, info, eventID, serverName)
if err != nil {
return false, err
}
default:
// Something else went wrong
return false, err
}
return auth.IsServerAllowed(serverName, isServerInRoom, stateAtEvent), nil
}
func slowGetHistoryVisibilityState(
ctx context.Context, db storage.Database, info *types.RoomInfo, eventID string, serverName gomatrixserverlib.ServerName,
) ([]*gomatrixserverlib.Event, error) {
roomState := state.NewStateResolution(db, info)
stateEntries, err := roomState.LoadStateAtEvent(ctx, eventID)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return false, nil
return nil, nil
}
return false, fmt.Errorf("roomState.LoadStateAtEvent: %w", err)
return nil, fmt.Errorf("roomState.LoadStateAtEvent: %w", err)
}
// Extract all of the event state key NIDs from the room state.
@ -243,7 +275,7 @@ func CheckServerAllowedToSeeEvent(
// Then request those state key NIDs from the database.
stateKeys, err := db.EventStateKeys(ctx, stateKeyNIDs)
if err != nil {
return false, fmt.Errorf("db.EventStateKeys: %w", err)
return nil, fmt.Errorf("db.EventStateKeys: %w", err)
}
// If the event state key doesn't match the given servername
@ -266,15 +298,10 @@ func CheckServerAllowedToSeeEvent(
}
if len(filteredEntries) == 0 {
return false, nil
return nil, nil
}
stateAtEvent, err := LoadStateEvents(ctx, db, filteredEntries)
if err != nil {
return false, err
}
return auth.IsServerAllowed(serverName, isServerInRoom, stateAtEvent), nil
return LoadStateEvents(ctx, db, filteredEntries)
}
// TODO: Remove this when we have tests to assert correctness of this function
@ -382,7 +409,7 @@ func QueryLatestEventsAndState(
if err != nil {
return err
}
if roomInfo == nil || roomInfo.IsStub {
if roomInfo == nil || roomInfo.IsStub() {
response.RoomExists = false
return nil
}

View file

@ -21,6 +21,7 @@ import (
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/roomserver/internal/helpers"
"github.com/matrix-org/dendrite/roomserver/storage/shared"
"github.com/matrix-org/dendrite/roomserver/storage/tables"
"github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/gomatrixserverlib"
"github.com/opentracing/opentracing-go"
@ -60,20 +61,14 @@ func (r *Inputer) updateMemberships(
var updates []api.OutputEvent
for _, change := range changes {
var ae *gomatrixserverlib.Event
var re *gomatrixserverlib.Event
var ae *types.Event
var re *types.Event
targetUserNID := change.EventStateKeyNID
if change.removedEventNID != 0 {
ev, _ := helpers.EventMap(events).Lookup(change.removedEventNID)
if ev != nil {
re = ev.Event
}
re, _ = helpers.EventMap(events).Lookup(change.removedEventNID)
}
if change.addedEventNID != 0 {
ev, _ := helpers.EventMap(events).Lookup(change.addedEventNID)
if ev != nil {
ae = ev.Event
}
ae, _ = helpers.EventMap(events).Lookup(change.addedEventNID)
}
if updates, err = r.updateMembership(updater, targetUserNID, re, ae, updates); err != nil {
return nil, err
@ -85,30 +80,27 @@ func (r *Inputer) updateMemberships(
func (r *Inputer) updateMembership(
updater *shared.RoomUpdater,
targetUserNID types.EventStateKeyNID,
remove, add *gomatrixserverlib.Event,
remove, add *types.Event,
updates []api.OutputEvent,
) ([]api.OutputEvent, error) {
var err error
// Default the membership to Leave if no event was added or removed.
oldMembership := gomatrixserverlib.Leave
newMembership := gomatrixserverlib.Leave
if remove != nil {
oldMembership, err = remove.Membership()
if err != nil {
return nil, err
}
}
if add != nil {
newMembership, err = add.Membership()
if err != nil {
return nil, err
}
}
if oldMembership == newMembership && newMembership != gomatrixserverlib.Join {
// If the membership is the same then nothing changed and we can return
// immediately, unless it's a Join update (e.g. profile update).
return updates, nil
var targetLocal bool
if add != nil {
targetLocal = r.isLocalTarget(add)
}
mu, err := updater.MembershipUpdater(targetUserNID, targetLocal)
if err != nil {
return nil, err
}
// In an ideal world, we shouldn't ever have "add" be nil and "remove" be
@ -120,17 +112,10 @@ func (r *Inputer) updateMembership(
// after a state reset, often thinking that the user was still joined to
// the room even though the room state said otherwise, and this would prevent
// the user from being able to attempt to rejoin the room without modifying
// the database. So instead what we'll do is we'll just update the membership
// table to say that the user is "leave" and we'll use the old event to
// avoid nil pointer exceptions on the code path that follows.
if add == nil {
add = remove
newMembership = gomatrixserverlib.Leave
}
mu, err := updater.MembershipUpdater(targetUserNID, r.isLocalTarget(add))
if err != nil {
return nil, err
// the database. So instead we're going to remove the membership from the
// database altogether, so that it doesn't create future problems.
if add == nil && remove != nil {
return nil, mu.Delete()
}
switch newMembership {
@ -149,7 +134,7 @@ func (r *Inputer) updateMembership(
}
}
func (r *Inputer) isLocalTarget(event *gomatrixserverlib.Event) bool {
func (r *Inputer) isLocalTarget(event *types.Event) bool {
isTargetLocalUser := false
if statekey := event.StateKey(); statekey != nil {
_, domain, _ := gomatrixserverlib.SplitID('@', *statekey)
@ -159,81 +144,61 @@ func (r *Inputer) isLocalTarget(event *gomatrixserverlib.Event) bool {
}
func updateToJoinMembership(
mu *shared.MembershipUpdater, add *gomatrixserverlib.Event, updates []api.OutputEvent,
mu *shared.MembershipUpdater, add *types.Event, updates []api.OutputEvent,
) ([]api.OutputEvent, error) {
// If the user is already marked as being joined, we call SetToJoin to update
// the event ID then we can return immediately. Retired is ignored as there
// is no invite event to retire.
if mu.IsJoin() {
_, err := mu.SetToJoin(add.Sender(), add.EventID(), true)
if err != nil {
return nil, err
}
return updates, nil
}
// When we mark a user as being joined we will invalidate any invites that
// are active for that user. We notify the consumers that the invites have
// been retired using a special event, even though they could infer this
// by studying the state changes in the room event stream.
retired, err := mu.SetToJoin(add.Sender(), add.EventID(), false)
_, retired, err := mu.Update(tables.MembershipStateJoin, add)
if err != nil {
return nil, err
}
for _, eventID := range retired {
orie := api.OutputRetireInviteEvent{
EventID: eventID,
Membership: gomatrixserverlib.Join,
RetiredByEventID: add.EventID(),
TargetUserID: *add.StateKey(),
}
updates = append(updates, api.OutputEvent{
Type: api.OutputTypeRetireInviteEvent,
RetireInviteEvent: &orie,
Type: api.OutputTypeRetireInviteEvent,
RetireInviteEvent: &api.OutputRetireInviteEvent{
EventID: eventID,
Membership: gomatrixserverlib.Join,
RetiredByEventID: add.EventID(),
TargetUserID: *add.StateKey(),
},
})
}
return updates, nil
}
func updateToLeaveMembership(
mu *shared.MembershipUpdater, add *gomatrixserverlib.Event,
mu *shared.MembershipUpdater, add *types.Event,
newMembership string, updates []api.OutputEvent,
) ([]api.OutputEvent, error) {
// If the user is already neither joined, nor invited to the room then we
// can return immediately.
if mu.IsLeave() {
return updates, nil
}
// When we mark a user as having left we will invalidate any invites that
// are active for that user. We notify the consumers that the invites have
// been retired using a special event, even though they could infer this
// by studying the state changes in the room event stream.
retired, err := mu.SetToLeave(add.Sender(), add.EventID())
_, retired, err := mu.Update(tables.MembershipStateLeaveOrBan, add)
if err != nil {
return nil, err
}
for _, eventID := range retired {
orie := api.OutputRetireInviteEvent{
EventID: eventID,
Membership: newMembership,
RetiredByEventID: add.EventID(),
TargetUserID: *add.StateKey(),
}
updates = append(updates, api.OutputEvent{
Type: api.OutputTypeRetireInviteEvent,
RetireInviteEvent: &orie,
Type: api.OutputTypeRetireInviteEvent,
RetireInviteEvent: &api.OutputRetireInviteEvent{
EventID: eventID,
Membership: newMembership,
RetiredByEventID: add.EventID(),
TargetUserID: *add.StateKey(),
},
})
}
return updates, nil
}
func updateToKnockMembership(
mu *shared.MembershipUpdater, add *gomatrixserverlib.Event, updates []api.OutputEvent,
mu *shared.MembershipUpdater, add *types.Event, updates []api.OutputEvent,
) ([]api.OutputEvent, error) {
if mu.IsLeave() {
_, err := mu.SetToKnock(add)
if err != nil {
return nil, err
}
if _, _, err := mu.Update(tables.MembershipStateKnock, add); err != nil {
return nil, err
}
return updates, nil
}

View file

@ -326,8 +326,10 @@ func (t *missingStateReq) lookupStateAfterEvent(ctx context.Context, roomVersion
return respState, true, nil
}
logrus.WithContext(ctx).Warnf("State for event %s not available locally, falling back to federation (via %d servers)", eventID, len(t.servers))
respState, err := t.lookupStateBeforeEvent(ctx, roomVersion, roomID, eventID)
if err != nil {
logrus.WithContext(ctx).WithError(err).Errorf("Failed to look up state before event %s", eventID)
return nil, false, fmt.Errorf("t.lookupStateBeforeEvent: %w", err)
}
@ -339,6 +341,7 @@ func (t *missingStateReq) lookupStateAfterEvent(ctx context.Context, roomVersion
case nil:
// do nothing
default:
logrus.WithContext(ctx).WithError(err).Errorf("Failed to look up event %s", eventID)
return nil, false, fmt.Errorf("t.lookupEvent: %w", err)
}
h = t.cacheAndReturn(h)
@ -375,11 +378,7 @@ func (t *missingStateReq) lookupStateAfterEventLocally(ctx context.Context, room
defer span.Finish()
var res parsedRespState
roomInfo, err := t.db.RoomInfo(ctx, roomID)
if err != nil {
return nil
}
roomState := state.NewStateResolution(t.db, roomInfo)
roomState := state.NewStateResolution(t.db, t.roomInfo)
stateAtEvents, err := t.db.StateAtEventIDs(ctx, []string{eventID})
if err != nil {
util.GetLogger(ctx).WithField("room_id", roomID).WithError(err).Warnf("failed to get state after %s locally", eventID)
@ -666,9 +665,22 @@ func (t *missingStateReq) lookupMissingStateViaStateIDs(ctx context.Context, roo
util.GetLogger(ctx).WithField("room_id", roomID).Infof("lookupMissingStateViaStateIDs %s", eventID)
// fetch the state event IDs at the time of the event
stateIDs, err := t.federation.LookupStateIDs(ctx, t.origin, roomID, eventID)
var stateIDs gomatrixserverlib.RespStateIDs
var err error
count := 0
totalctx, totalcancel := context.WithTimeout(ctx, time.Minute*5)
for _, serverName := range t.servers {
reqctx, reqcancel := context.WithTimeout(totalctx, time.Second*20)
stateIDs, err = t.federation.LookupStateIDs(reqctx, serverName, roomID, eventID)
reqcancel()
if err == nil {
break
}
count++
}
totalcancel()
if err != nil {
return nil, err
return nil, fmt.Errorf("t.federation.LookupStateIDs tried %d server(s), last error: %w", count, err)
}
// work out which auth/state IDs are missing
wantIDs := append(stateIDs.StateEventIDs, stateIDs.AuthEventIDs...)
@ -754,9 +766,8 @@ func (t *missingStateReq) lookupMissingStateViaStateIDs(ctx context.Context, roo
// Define what we'll do in order to fetch the missing event ID.
fetch := func(missingEventID string) {
var h *gomatrixserverlib.Event
h, err = t.lookupEvent(ctx, roomVersion, roomID, missingEventID, false)
switch err.(type) {
h, herr := t.lookupEvent(ctx, roomVersion, roomID, missingEventID, false)
switch herr.(type) {
case verifySigError:
return
case nil:
@ -765,7 +776,7 @@ func (t *missingStateReq) lookupMissingStateViaStateIDs(ctx context.Context, roo
util.GetLogger(ctx).WithFields(logrus.Fields{
"event_id": missingEventID,
"room_id": roomID,
}).Warn("Failed to fetch missing event")
}).WithError(herr).Warn("Failed to fetch missing event")
return
}
haveEventsMutex.Lock()

View file

@ -52,7 +52,7 @@ func (r *Admin) PerformAdminEvacuateRoom(
}
return
}
if roomInfo == nil || roomInfo.IsStub {
if roomInfo == nil || roomInfo.IsStub() {
res.Error = &api.PerformError{
Code: api.PerformErrorNoRoom,
Msg: fmt.Sprintf("Room %s not found", req.RoomID),

View file

@ -73,7 +73,7 @@ func (r *Backfiller) PerformBackfill(
if err != nil {
return err
}
if info == nil || info.IsStub {
if info == nil || info.IsStub() {
return fmt.Errorf("PerformBackfill: missing room info for room %s", request.RoomID)
}
@ -106,7 +106,7 @@ func (r *Backfiller) backfillViaFederation(ctx context.Context, req *api.Perform
if err != nil {
return err
}
if info == nil || info.IsStub {
if info == nil || info.IsStub() {
return fmt.Errorf("backfillViaFederation: missing room info for room %s", req.RoomID)
}
requester := newBackfillRequester(r.DB, r.FSAPI, r.ServerName, req.BackwardsExtremities, r.PreferServers)
@ -434,7 +434,7 @@ FindSuccessor:
logrus.WithError(err).WithField("room_id", roomID).Error("ServersAtEvent: failed to get RoomInfo for room")
return nil
}
if info == nil || info.IsStub {
if info == nil || info.IsStub() {
logrus.WithField("room_id", roomID).Error("ServersAtEvent: failed to get RoomInfo for room, room is missing")
return nil
}

View file

@ -50,7 +50,7 @@ func (r *InboundPeeker) PerformInboundPeek(
if err != nil {
return err
}
if info == nil || info.IsStub {
if info == nil || info.IsStub() {
return nil
}
response.RoomExists = true

View file

@ -39,11 +39,13 @@ type Inviter struct {
Inputer *input.Inputer
}
// nolint:gocyclo
func (r *Inviter) PerformInvite(
ctx context.Context,
req *api.PerformInviteRequest,
res *api.PerformInviteResponse,
) ([]api.OutputEvent, error) {
var outputUpdates []api.OutputEvent
event := req.Event
if event.StateKey() == nil {
return nil, fmt.Errorf("invite must be a state event")
@ -66,6 +68,13 @@ func (r *Inviter) PerformInvite(
}
isTargetLocal := domain == r.Cfg.Matrix.ServerName
isOriginLocal := event.Origin() == r.Cfg.Matrix.ServerName
if !isOriginLocal && !isTargetLocal {
res.Error = &api.PerformError{
Code: api.PerformErrorBadRequest,
Msg: "The invite must be either from or to a local user",
}
return nil, nil
}
logger := util.GetLogger(ctx).WithFields(map[string]interface{}{
"inviter": event.Sender(),
@ -97,6 +106,34 @@ func (r *Inviter) PerformInvite(
}
}
updateMembershipTableManually := func() ([]api.OutputEvent, error) {
var updater *shared.MembershipUpdater
if updater, err = r.DB.MembershipUpdater(ctx, roomID, targetUserID, isTargetLocal, req.RoomVersion); err != nil {
return nil, fmt.Errorf("r.DB.MembershipUpdater: %w", err)
}
outputUpdates, err = helpers.UpdateToInviteMembership(updater, &types.Event{
EventNID: 0,
Event: event.Unwrap(),
}, outputUpdates, req.Event.RoomVersion)
if err != nil {
return nil, fmt.Errorf("updateToInviteMembership: %w", err)
}
if err = updater.Commit(); err != nil {
return nil, fmt.Errorf("updater.Commit: %w", err)
}
logger.Debugf("updated membership to invite and sending invite OutputEvent")
return outputUpdates, nil
}
if (info == nil || info.IsStub()) && !isOriginLocal && isTargetLocal {
// The invite came in over federation for a room that we don't know about
// yet. We need to handle this a bit differently to most invites because
// we don't know the room state, therefore the roomserver can't process
// an input event. Instead we will update the membership table with the
// new invite and generate an output event.
return updateMembershipTableManually()
}
var isAlreadyJoined bool
if info != nil {
_, isAlreadyJoined, _, err = r.DB.GetMembership(ctx, info.RoomNID, *event.StateKey())
@ -140,31 +177,13 @@ func (r *Inviter) PerformInvite(
return nil, nil
}
// If the invite originated remotely then we can't send an
// InputRoomEvent for the invite as it will never pass auth checks
// due to lacking room state, but we still need to tell the client
// about the invite so we can accept it, hence we return an output
// event to send to the Sync API.
if !isOriginLocal {
// The invite originated over federation. Process the membership
// update, which will notify the sync API etc about the incoming
// invite. We do NOT send an InputRoomEvent for the invite as it
// will never pass auth checks due to lacking room state, but we
// still need to tell the client about the invite so we can accept
// it, hence we return an output event to send to the sync api.
var updater *shared.MembershipUpdater
updater, err = r.DB.MembershipUpdater(ctx, roomID, targetUserID, isTargetLocal, req.RoomVersion)
if err != nil {
return nil, fmt.Errorf("r.DB.MembershipUpdater: %w", err)
}
unwrapped := event.Unwrap()
var outputUpdates []api.OutputEvent
outputUpdates, err = helpers.UpdateToInviteMembership(updater, unwrapped, nil, req.Event.RoomVersion)
if err != nil {
return nil, fmt.Errorf("updateToInviteMembership: %w", err)
}
if err = updater.Commit(); err != nil {
return nil, fmt.Errorf("updater.Commit: %w", err)
}
logger.Debugf("updated membership to invite and sending invite OutputEvent")
return outputUpdates, nil
return updateMembershipTableManually()
}
// The invite originated locally. Therefore we have a responsibility to
@ -229,12 +248,11 @@ func (r *Inviter) PerformInvite(
Code: api.PerformErrorNotAllowed,
}
logger.WithError(err).WithField("event_id", event.EventID()).Error("r.InputRoomEvents failed")
return nil, nil
}
// Don't notify the sync api of this event in the same way as a federated invite so the invitee
// gets the invite, as the roomserver will do this when it processes the m.room.member invite.
return nil, nil
return outputUpdates, nil
}
func buildInviteStrippedState(
@ -258,7 +276,7 @@ func buildInviteStrippedState(
}
roomState := state.NewStateResolution(db, info)
stateEntries, err := roomState.LoadStateAtSnapshotForStringTuples(
ctx, info.StateSnapshotNID, stateWanted,
ctx, info.StateSnapshotNID(), stateWanted,
)
if err != nil {
return nil, err

View file

@ -268,21 +268,19 @@ func (r *Joiner) performJoinRoomByID(
case nil:
// The room join is local. Send the new join event into the
// roomserver. First of all check that the user isn't already
// a member of the room.
alreadyJoined := false
for _, se := range buildRes.StateEvents {
if !se.StateKeyEquals(userID) {
continue
}
if membership, merr := se.Membership(); merr == nil {
alreadyJoined = (membership == gomatrixserverlib.Join)
break
}
// a member of the room. This is best-effort (as in we won't
// fail if we can't find the existing membership) because there
// is really no harm in just sending another membership event.
membershipReq := &api.QueryMembershipForUserRequest{
RoomID: req.RoomIDOrAlias,
UserID: userID,
}
membershipRes := &api.QueryMembershipForUserResponse{}
_ = r.Queryer.QueryMembershipForUser(ctx, membershipReq, membershipRes)
// If we haven't already joined the room then send an event
// into the room changing our membership status.
if !alreadyJoined {
if !membershipRes.RoomExists || !membershipRes.IsInRoom {
inputReq := rsAPI.InputRoomEventsRequest{
InputRoomEvents: []rsAPI.InputRoomEvent{
{

View file

@ -228,14 +228,14 @@ func (r *Leaver) performFederatedRejectInvite(
util.GetLogger(ctx).WithError(err).Errorf("failed to get MembershipUpdater, still retiring invite event")
}
if updater != nil {
if _, err = updater.SetToLeave(req.UserID, eventID); err != nil {
util.GetLogger(ctx).WithError(err).Errorf("failed to set membership to leave, still retiring invite event")
if err = updater.Delete(); err != nil {
util.GetLogger(ctx).WithError(err).Errorf("failed to delete membership, still retiring invite event")
if err = updater.Rollback(); err != nil {
util.GetLogger(ctx).WithError(err).Errorf("failed to rollback membership leave, still retiring invite event")
util.GetLogger(ctx).WithError(err).Errorf("failed to rollback deleting membership, still retiring invite event")
}
} else {
if err = updater.Commit(); err != nil {
util.GetLogger(ctx).WithError(err).Errorf("failed to commit membership update, still retiring invite event")
util.GetLogger(ctx).WithError(err).Errorf("failed to commit deleting membership, still retiring invite event")
}
}
}

View file

@ -16,6 +16,7 @@ package query
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
@ -60,7 +61,7 @@ func (r *Queryer) QueryStateAfterEvents(
if err != nil {
return err
}
if info == nil || info.IsStub {
if info == nil || info.IsStub() {
return nil
}
@ -225,6 +226,9 @@ func (r *Queryer) QueryMembershipsForRoom(
var eventNIDs []types.EventNID
eventNIDs, err = r.DB.GetMembershipEventNIDsForRoom(ctx, info.RoomNID, request.JoinedOnly, request.LocalOnly)
if err != nil {
if err == sql.ErrNoRows {
return nil
}
return fmt.Errorf("r.DB.GetMembershipEventNIDsForRoom: %w", err)
}
events, err = r.DB.Events(ctx, eventNIDs)
@ -260,6 +264,9 @@ func (r *Queryer) QueryMembershipsForRoom(
var eventNIDs []types.EventNID
eventNIDs, err = r.DB.GetMembershipEventNIDsForRoom(ctx, info.RoomNID, request.JoinedOnly, false)
if err != nil {
if err == sql.ErrNoRows {
return nil
}
return err
}
@ -295,7 +302,7 @@ func (r *Queryer) QueryServerJoinedToRoom(
if err != nil {
return fmt.Errorf("r.DB.RoomInfo: %w", err)
}
if info == nil || info.IsStub {
if info == nil || info.IsStub() {
return nil
}
response.RoomExists = true
@ -344,8 +351,8 @@ func (r *Queryer) QueryServerAllowedToSeeEvent(
if err != nil {
return err
}
if info == nil {
return fmt.Errorf("QueryServerAllowedToSeeEvent: no room info for room %s", roomID)
if info == nil || info.IsStub() {
return nil
}
response.AllowedToSeeEvent, err = helpers.CheckServerAllowedToSeeEvent(
ctx, r.DB, info, request.EventID, request.ServerName, inRoomRes.IsInRoom,
@ -383,7 +390,7 @@ func (r *Queryer) QueryMissingEvents(
if err != nil {
return err
}
if info == nil || info.IsStub {
if info == nil || info.IsStub() {
return fmt.Errorf("missing RoomInfo for room %s", events[0].RoomID())
}
@ -422,7 +429,7 @@ func (r *Queryer) QueryStateAndAuthChain(
if err != nil {
return err
}
if info == nil || info.IsStub {
if info == nil || info.IsStub() {
return nil
}
response.RoomExists = true
@ -767,7 +774,7 @@ func (r *Queryer) QueryRestrictedJoinAllowed(ctx context.Context, req *api.Query
if err != nil {
return fmt.Errorf("r.DB.RoomInfo: %w", err)
}
if roomInfo == nil || roomInfo.IsStub {
if roomInfo == nil || roomInfo.IsStub() {
return nil // fmt.Errorf("room %q doesn't exist or is stub room", req.RoomID)
}
// If the room version doesn't allow restricted joins then don't
@ -830,7 +837,7 @@ func (r *Queryer) QueryRestrictedJoinAllowed(ctx context.Context, req *api.Query
// See if the room exists. If it doesn't exist or if it's a stub
// room entry then we can't check memberships.
targetRoomInfo, err := r.DB.RoomInfo(ctx, rule.RoomID)
if err != nil || targetRoomInfo == nil || targetRoomInfo.IsStub {
if err != nil || targetRoomInfo == nil || targetRoomInfo.IsStub() {
res.Resident = false
continue
}

View file

@ -124,6 +124,29 @@ func (v *StateResolution) LoadStateAtEvent(
return stateEntries, nil
}
// LoadStateAtEvent loads the full state of a room before a particular event.
func (v *StateResolution) LoadStateAtEventForHistoryVisibility(
ctx context.Context, eventID string,
) ([]types.StateEntry, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "StateResolution.LoadStateAtEvent")
defer span.Finish()
snapshotNID, err := v.db.SnapshotNIDFromEventID(ctx, eventID)
if err != nil {
return nil, fmt.Errorf("LoadStateAtEvent.SnapshotNIDFromEventID failed for event %s : %w", eventID, err)
}
if snapshotNID == 0 {
return nil, fmt.Errorf("LoadStateAtEvent.SnapshotNIDFromEventID(%s) returned 0 NID, was this event stored?", eventID)
}
stateEntries, err := v.LoadStateAtSnapshot(ctx, snapshotNID)
if err != nil {
return nil, err
}
return stateEntries, nil
}
// LoadCombinedStateAfterEvents loads a snapshot of the state after each of the events
// and combines those snapshots together into a single list. At this point it is
// possible to run into duplicate (type, state key) tuples.

View file

@ -166,4 +166,6 @@ type Database interface {
GetKnownRooms(ctx context.Context) ([]string, error)
// ForgetRoom sets a flag in the membership table, that the user wishes to forget a specific room
ForgetRoom(ctx context.Context, userID, roomID string, forget bool) error
GetHistoryVisibilityState(ctx context.Context, roomInfo *types.RoomInfo, eventID string, domain string) ([]*gomatrixserverlib.Event, error)
}

View file

@ -15,32 +15,21 @@
package deltas
import (
"context"
"database/sql"
"fmt"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/pressly/goose"
)
func LoadFromGoose() {
goose.AddMigration(UpAddForgottenColumn, DownAddForgottenColumn)
goose.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)
}
func LoadAddForgottenColumn(m *sqlutil.Migrations) {
m.AddMigration(UpAddForgottenColumn, DownAddForgottenColumn)
}
func UpAddForgottenColumn(tx *sql.Tx) error {
_, err := tx.Exec(`ALTER TABLE roomserver_membership ADD COLUMN IF NOT EXISTS forgotten BOOLEAN NOT NULL DEFAULT false;`)
func UpAddForgottenColumn(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `ALTER TABLE roomserver_membership ADD COLUMN IF NOT EXISTS forgotten BOOLEAN NOT NULL DEFAULT false;`)
if err != nil {
return fmt.Errorf("failed to execute upgrade: %w", err)
}
return nil
}
func DownAddForgottenColumn(tx *sql.Tx) error {
_, err := tx.Exec(`ALTER TABLE roomserver_membership DROP COLUMN IF EXISTS forgotten;`)
func DownAddForgottenColumn(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `ALTER TABLE roomserver_membership DROP COLUMN IF EXISTS forgotten;`)
if err != nil {
return fmt.Errorf("failed to execute downgrade: %w", err)
}

View file

@ -15,11 +15,11 @@
package deltas
import (
"context"
"database/sql"
"fmt"
"github.com/lib/pq"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/util"
"github.com/sirupsen/logrus"
@ -36,48 +36,44 @@ type stateBlockData struct {
EventNIDs types.EventNIDs
}
func LoadStateBlocksRefactor(m *sqlutil.Migrations) {
m.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)
}
// nolint:gocyclo
func UpStateBlocksRefactor(tx *sql.Tx) error {
func UpStateBlocksRefactor(ctx context.Context, tx *sql.Tx) error {
logrus.Warn("Performing state storage upgrade. Please wait, this may take some time!")
defer logrus.Warn("State storage upgrade complete")
var snapshotcount int
var maxsnapshotid int
var maxblockid int
if err := tx.QueryRow(`SELECT COUNT(DISTINCT state_snapshot_nid) FROM roomserver_state_snapshots;`).Scan(&snapshotcount); err != nil {
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
if err := tx.QueryRowContext(ctx, `SELECT COUNT(DISTINCT state_snapshot_nid) FROM roomserver_state_snapshots;`).Scan(&snapshotcount); err != nil {
return fmt.Errorf("tx.QueryRowContext.Scan (count snapshots): %w", err)
}
if err := tx.QueryRow(`SELECT COALESCE(MAX(state_snapshot_nid),0) FROM roomserver_state_snapshots;`).Scan(&maxsnapshotid); err != nil {
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
if err := tx.QueryRowContext(ctx, `SELECT COALESCE(MAX(state_snapshot_nid),0) FROM roomserver_state_snapshots;`).Scan(&maxsnapshotid); err != nil {
return fmt.Errorf("tx.QueryRowContext.Scan (count snapshots): %w", err)
}
if err := tx.QueryRow(`SELECT COALESCE(MAX(state_block_nid),0) FROM roomserver_state_block;`).Scan(&maxblockid); err != nil {
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
if err := tx.QueryRowContext(ctx, `SELECT COALESCE(MAX(state_block_nid),0) FROM roomserver_state_block;`).Scan(&maxblockid); err != nil {
return fmt.Errorf("tx.QueryRowContext.Scan (count snapshots): %w", err)
}
maxsnapshotid++
maxblockid++
if _, err := tx.Exec(`ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil {
return fmt.Errorf("tx.Exec: %w", err)
if _, err := tx.ExecContext(ctx, `ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil {
return fmt.Errorf("tx.ExecContext: %w", err)
}
if _, err := tx.Exec(`ALTER TABLE roomserver_state_snapshots RENAME TO _roomserver_state_snapshots;`); err != nil {
return fmt.Errorf("tx.Exec: %w", err)
if _, err := tx.ExecContext(ctx, `ALTER TABLE roomserver_state_snapshots RENAME TO _roomserver_state_snapshots;`); err != nil {
return fmt.Errorf("tx.ExecContext: %w", err)
}
// We create new sequences starting with the maximum state snapshot and block NIDs.
// This means that all newly created snapshots and blocks by the migration will have
// NIDs higher than these values, so that when we come to update the references to
// these NIDs using UPDATE statements, we can guarantee we are only ever updating old
// values and not accidentally overwriting new ones.
if _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE roomserver_state_block_nid_sequence START WITH %d;`, maxblockid)); err != nil {
return fmt.Errorf("tx.Exec: %w", err)
if _, err := tx.ExecContext(ctx, fmt.Sprintf(`CREATE SEQUENCE roomserver_state_block_nid_sequence START WITH %d;`, maxblockid)); err != nil {
return fmt.Errorf("tx.ExecContext: %w", err)
}
if _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE roomserver_state_snapshot_nid_sequence START WITH %d;`, maxsnapshotid)); err != nil {
return fmt.Errorf("tx.Exec: %w", err)
if _, err := tx.ExecContext(ctx, fmt.Sprintf(`CREATE SEQUENCE roomserver_state_snapshot_nid_sequence START WITH %d;`, maxsnapshotid)); err != nil {
return fmt.Errorf("tx.ExecContext: %w", err)
}
_, err := tx.Exec(`
_, err := tx.ExecContext(ctx, `
CREATE TABLE IF NOT EXISTS roomserver_state_block (
state_block_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_block_nid_sequence'),
state_block_hash BYTEA UNIQUE,
@ -87,7 +83,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
if err != nil {
return fmt.Errorf("tx.Exec (create blocks table): %w", err)
}
_, err = tx.Exec(`
_, err = tx.ExecContext(ctx, `
CREATE TABLE IF NOT EXISTS roomserver_state_snapshots (
state_snapshot_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_snapshot_nid_sequence'),
state_snapshot_hash BYTEA UNIQUE,
@ -104,7 +100,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
// in question a state snapshot NID of 0 to indicate 'no snapshot'.
// If we don't do this, we'll fail the assertions later on which try to ensure we didn't forget
// any snapshots.
_, err = tx.Exec(
_, err = tx.ExecContext(ctx,
`UPDATE roomserver_events SET state_snapshot_nid = 0 WHERE event_type_nid = $1 AND event_state_key_nid = $2`,
types.MRoomCreateNID, types.EmptyStateKeyNID,
)
@ -115,7 +111,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
batchsize := 100
for batchoffset := 0; batchoffset < snapshotcount; batchoffset += batchsize {
var snapshotrows *sql.Rows
snapshotrows, err = tx.Query(`
snapshotrows, err = tx.QueryContext(ctx, `
SELECT
state_snapshot_nid,
room_nid,
@ -146,7 +142,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
state_block_nid;
`, batchsize, batchoffset)
if err != nil {
return fmt.Errorf("tx.Query: %w", err)
return fmt.Errorf("tx.QueryContext: %w", err)
}
logrus.Warnf("Rewriting snapshots %d-%d of %d...", batchoffset, batchoffset+batchsize, snapshotcount)
@ -183,7 +179,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
// fill in bad create snapshots
for _, s := range badCreateSnapshots {
var createEventNID types.EventNID
err = tx.QueryRow(
err = tx.QueryRowContext(ctx,
`SELECT event_nid FROM roomserver_events WHERE state_snapshot_nid = $1 AND event_type_nid = 1`, s.StateSnapshotNID,
).Scan(&createEventNID)
if err == sql.ErrNoRows {
@ -208,7 +204,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
}
var blocknid types.StateBlockNID
err = tx.QueryRow(`
err = tx.QueryRowContext(ctx, `
INSERT INTO roomserver_state_block (state_block_hash, event_nids)
VALUES ($1, $2)
ON CONFLICT (state_block_hash) DO UPDATE SET event_nids=$2
@ -227,7 +223,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
}
var newNID types.StateSnapshotNID
err = tx.QueryRow(`
err = tx.QueryRowContext(ctx, `
INSERT INTO roomserver_state_snapshots (state_snapshot_hash, room_nid, state_block_nids)
VALUES ($1, $2, $3)
ON CONFLICT (state_snapshot_hash) DO UPDATE SET room_nid=$2
@ -237,12 +233,12 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
return fmt.Errorf("tx.QueryRow.Scan (insert new snapshot): %w", err)
}
if _, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil {
return fmt.Errorf("tx.Exec (update events): %w", err)
if _, err = tx.ExecContext(ctx, `UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil {
return fmt.Errorf("tx.ExecContext (update events): %w", err)
}
if _, err = tx.Exec(`UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil {
return fmt.Errorf("tx.Exec (update rooms): %w", err)
if _, err = tx.ExecContext(ctx, `UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil {
return fmt.Errorf("tx.ExecContext (update rooms): %w", err)
}
}
}
@ -252,13 +248,13 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
// in roomserver_state_snapshots
var count int64
if err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_events WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid).Scan(&count); err != nil {
if err = tx.QueryRowContext(ctx, `SELECT COUNT(*) FROM roomserver_events WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid).Scan(&count); err != nil {
return fmt.Errorf("assertion query failed: %s", err)
}
if count > 0 {
var res sql.Result
var c int64
res, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid = 0 WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid)
res, err = tx.ExecContext(ctx, `UPDATE roomserver_events SET state_snapshot_nid = 0 WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid)
if err != nil && err != sql.ErrNoRows {
return fmt.Errorf("failed to reset invalid state snapshots: %w", err)
}
@ -268,13 +264,13 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
return fmt.Errorf("expected to reset %d event(s) but only updated %d event(s)", count, c)
}
}
if err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_rooms WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid).Scan(&count); err != nil {
if err = tx.QueryRowContext(ctx, `SELECT COUNT(*) FROM roomserver_rooms WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid).Scan(&count); err != nil {
return fmt.Errorf("assertion query failed: %s", err)
}
if count > 0 {
var debugRoomID string
var debugSnapNID, debugLastEventNID int64
err = tx.QueryRow(
err = tx.QueryRowContext(ctx,
`SELECT room_id, state_snapshot_nid, last_event_sent_nid FROM roomserver_rooms WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid,
).Scan(&debugRoomID, &debugSnapNID, &debugLastEventNID)
if err != nil {
@ -291,13 +287,13 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
return fmt.Errorf("%d rooms exist in roomserver_rooms which have not been converted to a new state_snapshot_nid; this is a bug, please report", count)
}
if _, err = tx.Exec(`
if _, err = tx.ExecContext(ctx, `
DROP TABLE _roomserver_state_snapshots;
DROP SEQUENCE roomserver_state_snapshot_nid_seq;
`); err != nil {
return fmt.Errorf("tx.Exec (delete old snapshot table): %w", err)
}
if _, err = tx.Exec(`
if _, err = tx.ExecContext(ctx, `
DROP TABLE _roomserver_state_block;
DROP SEQUENCE roomserver_state_block_nid_seq;
`); err != nil {
@ -307,6 +303,6 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
return nil
}
func DownStateBlocksRefactor(tx *sql.Tx) error {
func DownStateBlocksRefactor(ctx context.Context, tx *sql.Tx) error {
panic("Downgrading state storage is not supported")
}

View file

@ -23,6 +23,7 @@ import (
"github.com/lib/pq"
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/roomserver/storage/postgres/deltas"
"github.com/matrix-org/dendrite/roomserver/storage/tables"
"github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/gomatrixserverlib"
@ -86,24 +87,24 @@ const insertMembershipSQL = "" +
const selectMembershipFromRoomAndTargetSQL = "" +
"SELECT membership_nid, event_nid, forgotten FROM roomserver_membership" +
" WHERE room_nid = $1 AND target_nid = $2"
" WHERE room_nid = $1 AND event_nid != 0 AND target_nid = $2"
const selectMembershipsFromRoomAndMembershipSQL = "" +
"SELECT event_nid FROM roomserver_membership" +
" WHERE room_nid = $1 AND membership_nid = $2 and forgotten = false"
" WHERE room_nid = $1 AND event_nid != 0 AND membership_nid = $2 and forgotten = false"
const selectLocalMembershipsFromRoomAndMembershipSQL = "" +
"SELECT event_nid FROM roomserver_membership" +
" WHERE room_nid = $1 AND membership_nid = $2" +
" WHERE room_nid = $1 AND event_nid != 0 AND membership_nid = $2" +
" AND target_local = true and forgotten = false"
const selectMembershipsFromRoomSQL = "" +
"SELECT event_nid FROM roomserver_membership" +
" WHERE room_nid = $1 and forgotten = false"
" WHERE room_nid = $1 AND event_nid != 0 and forgotten = false"
const selectLocalMembershipsFromRoomSQL = "" +
"SELECT event_nid FROM roomserver_membership" +
" WHERE room_nid = $1" +
" WHERE room_nid = $1 AND event_nid != 0" +
" AND target_local = true and forgotten = false"
const selectMembershipForUpdateSQL = "" +
@ -118,6 +119,9 @@ const updateMembershipForgetRoom = "" +
"UPDATE roomserver_membership SET forgotten = $3" +
" WHERE room_nid = $1 AND target_nid = $2"
const deleteMembershipSQL = "" +
"DELETE FROM roomserver_membership WHERE room_nid = $1 AND target_nid = $2"
const selectRoomsWithMembershipSQL = "" +
"SELECT room_nid FROM roomserver_membership WHERE membership_nid = $1 AND target_nid = $2 and forgotten = false"
@ -165,11 +169,20 @@ type membershipStatements struct {
updateMembershipForgetRoomStmt *sql.Stmt
selectLocalServerInRoomStmt *sql.Stmt
selectServerInRoomStmt *sql.Stmt
deleteMembershipStmt *sql.Stmt
}
func CreateMembershipTable(db *sql.DB) error {
_, err := db.Exec(membershipSchema)
return err
if err != nil {
return err
}
m := sqlutil.NewMigrator(db)
m.AddMigrations(sqlutil.Migration{
Version: "roomserver: add forgotten column",
Up: deltas.UpAddForgottenColumn,
})
return m.Up(context.Background())
}
func PrepareMembershipTable(db *sql.DB) (tables.Membership, error) {
@ -191,6 +204,7 @@ func PrepareMembershipTable(db *sql.DB) (tables.Membership, error) {
{&s.updateMembershipForgetRoomStmt, updateMembershipForgetRoom},
{&s.selectLocalServerInRoomStmt, selectLocalServerInRoomSQL},
{&s.selectServerInRoomStmt, selectServerInRoomSQL},
{&s.deleteMembershipStmt, deleteMembershipSQL},
}.Prepare(db)
}
@ -412,3 +426,13 @@ func (s *membershipStatements) SelectServerInRoom(
}
return roomNID == nid, nil
}
func (s *membershipStatements) DeleteMembership(
ctx context.Context, txn *sql.Tx,
roomNID types.RoomNID, targetUserNID types.EventStateKeyNID,
) error {
_, err := sqlutil.TxStmt(txn, s.deleteMembershipStmt).ExecContext(
ctx, roomNID, targetUserNID,
)
return err
}

View file

@ -147,14 +147,16 @@ func (s *roomStatements) InsertRoomNID(
func (s *roomStatements) SelectRoomInfo(ctx context.Context, txn *sql.Tx, roomID string) (*types.RoomInfo, error) {
var info types.RoomInfo
var latestNIDs pq.Int64Array
var stateSnapshotNID types.StateSnapshotNID
stmt := sqlutil.TxStmt(txn, s.selectRoomInfoStmt)
err := stmt.QueryRowContext(ctx, roomID).Scan(
&info.RoomVersion, &info.RoomNID, &info.StateSnapshotNID, &latestNIDs,
&info.RoomVersion, &info.RoomNID, &stateSnapshotNID, &latestNIDs,
)
if err == sql.ErrNoRows {
return nil, nil
}
info.IsStub = len(latestNIDs) == 0
info.SetStateSnapshotNID(stateSnapshotNID)
info.SetIsStub(len(latestNIDs) == 0)
return &info, err
}

View file

@ -72,9 +72,35 @@ const bulkSelectStateBlockNIDsSQL = "" +
"SELECT state_snapshot_nid, state_block_nids FROM roomserver_state_snapshots" +
" WHERE state_snapshot_nid = ANY($1) ORDER BY state_snapshot_nid ASC"
// Looks up both the history visibility event and relevant membership events from
// a given domain name from a given state snapshot. This is used to optimise the
// helpers.CheckServerAllowedToSeeEvent function.
// TODO: There's a sequence scan here because of the hash join strategy, which is
// probably O(n) on state key entries, so there must be a way to avoid that somehow.
// Event type NIDs are:
// - 5: m.room.member as per https://github.com/matrix-org/dendrite/blob/c7f7aec4d07d59120d37d5b16a900f6d608a75c4/roomserver/storage/postgres/event_types_table.go#L40
// - 7: m.room.history_visibility as per https://github.com/matrix-org/dendrite/blob/c7f7aec4d07d59120d37d5b16a900f6d608a75c4/roomserver/storage/postgres/event_types_table.go#L42
const bulkSelectStateForHistoryVisibilitySQL = `
SELECT event_nid FROM (
SELECT event_nid, event_type_nid, event_state_key_nid FROM roomserver_events
WHERE (event_type_nid = 5 OR event_type_nid = 7)
AND event_nid = ANY(
SELECT UNNEST(event_nids) FROM roomserver_state_block
WHERE state_block_nid = ANY(
SELECT UNNEST(state_block_nids) FROM roomserver_state_snapshots
WHERE state_snapshot_nid = $1
)
)
) AS roomserver_events
INNER JOIN roomserver_event_state_keys
ON roomserver_events.event_state_key_nid = roomserver_event_state_keys.event_state_key_nid
AND (event_type_nid = 7 OR event_state_key LIKE '%:' || $2);
`
type stateSnapshotStatements struct {
insertStateStmt *sql.Stmt
bulkSelectStateBlockNIDsStmt *sql.Stmt
insertStateStmt *sql.Stmt
bulkSelectStateBlockNIDsStmt *sql.Stmt
bulkSelectStateForHistoryVisibilityStmt *sql.Stmt
}
func CreateStateSnapshotTable(db *sql.DB) error {
@ -88,6 +114,7 @@ func PrepareStateSnapshotTable(db *sql.DB) (tables.StateSnapshot, error) {
return s, sqlutil.StatementList{
{&s.insertStateStmt, insertStateSQL},
{&s.bulkSelectStateBlockNIDsStmt, bulkSelectStateBlockNIDsSQL},
{&s.bulkSelectStateForHistoryVisibilityStmt, bulkSelectStateForHistoryVisibilitySQL},
}.Prepare(db)
}
@ -136,3 +163,23 @@ func (s *stateSnapshotStatements) BulkSelectStateBlockNIDs(
}
return results, nil
}
func (s *stateSnapshotStatements) BulkSelectStateForHistoryVisibility(
ctx context.Context, txn *sql.Tx, stateSnapshotNID types.StateSnapshotNID, domain string,
) ([]types.EventNID, error) {
stmt := sqlutil.TxStmt(txn, s.bulkSelectStateForHistoryVisibilityStmt)
rows, err := stmt.QueryContext(ctx, stateSnapshotNID, domain)
if err != nil {
return nil, err
}
defer rows.Close() // nolint: errcheck
results := make([]types.EventNID, 0, 16)
for rows.Next() {
var eventNID types.EventNID
if err = rows.Scan(&eventNID); err != nil {
return nil, err
}
results = append(results, eventNID)
}
return results, rows.Err()
}

View file

@ -21,7 +21,6 @@ import (
// Import the postgres database driver.
_ "github.com/lib/pq"
"github.com/matrix-org/dendrite/internal/caching"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/roomserver/storage/postgres/deltas"
@ -45,17 +44,25 @@ func Open(base *base.BaseDendrite, dbProperties *config.DatabaseOptions, cache c
}
// Create the tables.
if err := d.create(db); err != nil {
if err = d.create(db); err != nil {
return nil, err
}
// Then execute the migrations. By this point the tables are created with the latest
// schemas.
m := sqlutil.NewMigrations()
deltas.LoadAddForgottenColumn(m)
deltas.LoadStateBlocksRefactor(m)
if err := m.RunDeltas(db, dbProperties); err != nil {
return nil, err
// Special case, since this migration uses several tables, so it needs to
// be sure that all tables are created first.
// TODO: Remove when we are sure we are not having goose artefacts in the db
// This forces an error, which indicates the migration is already applied, since the
// column event_nid was removed from the table
err = db.QueryRow("SELECT event_nid FROM roomserver_state_block LIMIT 1;").Scan()
if err == nil {
m := sqlutil.NewMigrator(db)
m.AddMigrations(sqlutil.Migration{
Version: "roomserver: state blocks refactor",
Up: deltas.UpStateBlocksRefactor,
})
if err := m.Up(base.Context()); err != nil {
return nil, err
}
}
// Then prepare the statements. Now that the migrations have run, any columns referred

View file

@ -15,7 +15,7 @@ type MembershipUpdater struct {
d *Database
roomNID types.RoomNID
targetUserNID types.EventStateKeyNID
membership tables.MembershipState
oldMembership tables.MembershipState
}
func NewMembershipUpdater(
@ -30,7 +30,6 @@ func NewMembershipUpdater(
if err != nil {
return err
}
targetUserNID, err = d.assignStateKeyNID(ctx, targetUserID)
if err != nil {
return err
@ -73,146 +72,62 @@ func (d *Database) membershipUpdaterTxn(
// IsInvite implements types.MembershipUpdater
func (u *MembershipUpdater) IsInvite() bool {
return u.membership == tables.MembershipStateInvite
return u.oldMembership == tables.MembershipStateInvite
}
// IsJoin implements types.MembershipUpdater
func (u *MembershipUpdater) IsJoin() bool {
return u.membership == tables.MembershipStateJoin
return u.oldMembership == tables.MembershipStateJoin
}
// IsLeave implements types.MembershipUpdater
func (u *MembershipUpdater) IsLeave() bool {
return u.membership == tables.MembershipStateLeaveOrBan
return u.oldMembership == tables.MembershipStateLeaveOrBan
}
// IsKnock implements types.MembershipUpdater
func (u *MembershipUpdater) IsKnock() bool {
return u.membership == tables.MembershipStateKnock
return u.oldMembership == tables.MembershipStateKnock
}
// SetToInvite implements types.MembershipUpdater
func (u *MembershipUpdater) SetToInvite(event *gomatrixserverlib.Event) (bool, error) {
var inserted bool
err := u.d.Writer.Do(u.d.DB, u.txn, func(txn *sql.Tx) error {
func (u *MembershipUpdater) Delete() error {
if _, err := u.d.InvitesTable.UpdateInviteRetired(u.ctx, u.txn, u.roomNID, u.targetUserNID); err != nil {
return err
}
return u.d.MembershipTable.DeleteMembership(u.ctx, u.txn, u.roomNID, u.targetUserNID)
}
func (u *MembershipUpdater) Update(newMembership tables.MembershipState, event *types.Event) (bool, []string, error) {
var inserted bool // Did the query result in a membership change?
var retired []string // Did we retire any updates in the process?
return inserted, retired, u.d.Writer.Do(u.d.DB, u.txn, func(txn *sql.Tx) error {
senderUserNID, err := u.d.assignStateKeyNID(u.ctx, event.Sender())
if err != nil {
return fmt.Errorf("u.d.AssignStateKeyNID: %w", err)
}
inserted, err = u.d.InvitesTable.InsertInviteEvent(
u.ctx, u.txn, event.EventID(), u.roomNID, u.targetUserNID, senderUserNID, event.JSON(),
)
inserted, err = u.d.MembershipTable.UpdateMembership(u.ctx, u.txn, u.roomNID, u.targetUserNID, senderUserNID, newMembership, event.EventNID, false)
if err != nil {
return fmt.Errorf("u.d.InvitesTable.InsertInviteEvent: %w", err)
return fmt.Errorf("u.d.MembershipTable.UpdateMembership: %w", err)
}
// Look up the NID of the invite event
nIDs, err := u.d.eventNIDs(u.ctx, u.txn, []string{event.EventID()}, false)
if err != nil {
return fmt.Errorf("u.d.EventNIDs: %w", err)
if !inserted {
return nil
}
if u.membership != tables.MembershipStateInvite {
if inserted, err = u.d.MembershipTable.UpdateMembership(u.ctx, u.txn, u.roomNID, u.targetUserNID, senderUserNID, tables.MembershipStateInvite, nIDs[event.EventID()], false); err != nil {
return fmt.Errorf("u.d.MembershipTable.UpdateMembership: %w", err)
switch {
case u.oldMembership != tables.MembershipStateInvite && newMembership == tables.MembershipStateInvite:
inserted, err = u.d.InvitesTable.InsertInviteEvent(
u.ctx, u.txn, event.EventID(), u.roomNID, u.targetUserNID, senderUserNID, event.JSON(),
)
if err != nil {
return fmt.Errorf("u.d.InvitesTable.InsertInviteEvent: %w", err)
}
}
return nil
})
return inserted, err
}
// SetToJoin implements types.MembershipUpdater
func (u *MembershipUpdater) SetToJoin(senderUserID string, eventID string, isUpdate bool) ([]string, error) {
var inviteEventIDs []string
err := u.d.Writer.Do(u.d.DB, u.txn, func(txn *sql.Tx) error {
senderUserNID, err := u.d.assignStateKeyNID(u.ctx, senderUserID)
if err != nil {
return fmt.Errorf("u.d.AssignStateKeyNID: %w", err)
}
// If this is a join event update, there is no invite to update
if !isUpdate {
inviteEventIDs, err = u.d.InvitesTable.UpdateInviteRetired(
case u.oldMembership == tables.MembershipStateInvite && newMembership != tables.MembershipStateInvite:
retired, err = u.d.InvitesTable.UpdateInviteRetired(
u.ctx, u.txn, u.roomNID, u.targetUserNID,
)
if err != nil {
return fmt.Errorf("u.d.InvitesTables.UpdateInviteRetired: %w", err)
}
}
// Look up the NID of the new join event
nIDs, err := u.d.eventNIDs(u.ctx, u.txn, []string{eventID}, false)
if err != nil {
return fmt.Errorf("u.d.EventNIDs: %w", err)
}
if u.membership != tables.MembershipStateJoin || isUpdate {
if _, err = u.d.MembershipTable.UpdateMembership(u.ctx, u.txn, u.roomNID, u.targetUserNID, senderUserNID, tables.MembershipStateJoin, nIDs[eventID], false); err != nil {
return fmt.Errorf("u.d.MembershipTable.UpdateMembership: %w", err)
}
}
return nil
})
return inviteEventIDs, err
}
// SetToLeave implements types.MembershipUpdater
func (u *MembershipUpdater) SetToLeave(senderUserID string, eventID string) ([]string, error) {
var inviteEventIDs []string
err := u.d.Writer.Do(u.d.DB, u.txn, func(txn *sql.Tx) error {
senderUserNID, err := u.d.assignStateKeyNID(u.ctx, senderUserID)
if err != nil {
return fmt.Errorf("u.d.AssignStateKeyNID: %w", err)
}
inviteEventIDs, err = u.d.InvitesTable.UpdateInviteRetired(
u.ctx, u.txn, u.roomNID, u.targetUserNID,
)
if err != nil {
return fmt.Errorf("u.d.InvitesTable.updateInviteRetired: %w", err)
}
// Look up the NID of the new leave event
nIDs, err := u.d.eventNIDs(u.ctx, u.txn, []string{eventID}, false)
if err != nil {
return fmt.Errorf("u.d.EventNIDs: %w", err)
}
if u.membership != tables.MembershipStateLeaveOrBan {
if _, err = u.d.MembershipTable.UpdateMembership(u.ctx, u.txn, u.roomNID, u.targetUserNID, senderUserNID, tables.MembershipStateLeaveOrBan, nIDs[eventID], false); err != nil {
return fmt.Errorf("u.d.MembershipTable.UpdateMembership: %w", err)
}
}
return nil
})
return inviteEventIDs, err
}
// SetToKnock implements types.MembershipUpdater
func (u *MembershipUpdater) SetToKnock(event *gomatrixserverlib.Event) (bool, error) {
var inserted bool
err := u.d.Writer.Do(u.d.DB, u.txn, func(txn *sql.Tx) error {
senderUserNID, err := u.d.assignStateKeyNID(u.ctx, event.Sender())
if err != nil {
return fmt.Errorf("u.d.AssignStateKeyNID: %w", err)
}
if u.membership != tables.MembershipStateKnock {
// Look up the NID of the new knock event
nIDs, err := u.d.eventNIDs(u.ctx, u.txn, []string{event.EventID()}, false)
if err != nil {
return fmt.Errorf("u.d.EventNIDs: %w", err)
}
if inserted, err = u.d.MembershipTable.UpdateMembership(u.ctx, u.txn, u.roomNID, u.targetUserNID, senderUserNID, tables.MembershipStateKnock, nIDs[event.EventID()], false); err != nil {
return fmt.Errorf("u.d.MembershipTable.UpdateMembership: %w", err)
}
}
return nil
})
return inserted, err
}

View file

@ -217,6 +217,14 @@ func (u *RoomUpdater) SetLatestEvents(
roomNID types.RoomNID, latest []types.StateAtEventAndReference, lastEventNIDSent types.EventNID,
currentStateSnapshotNID types.StateSnapshotNID,
) error {
switch {
case len(latest) == 0:
return fmt.Errorf("cannot set latest events with no latest event references")
case currentStateSnapshotNID == 0:
return fmt.Errorf("cannot set latest events with invalid state snapshot NID")
case lastEventNIDSent == 0:
return fmt.Errorf("cannot set latest events with invalid latest event NID")
}
eventNIDs := make([]types.EventNID, len(latest))
for i := range latest {
eventNIDs[i] = latest[i].EventNID
@ -229,8 +237,10 @@ func (u *RoomUpdater) SetLatestEvents(
// Since it's entirely possible that this types.RoomInfo came from the
// cache, we should make sure to update that entry so that the next run
// works from live data.
u.roomInfo.StateSnapshotNID = currentStateSnapshotNID
u.roomInfo.IsStub = false
if u.roomInfo != nil {
u.roomInfo.SetStateSnapshotNID(currentStateSnapshotNID)
u.roomInfo.SetIsStub(false)
}
return nil
})
}

View file

@ -156,13 +156,13 @@ func (d *Database) RoomInfo(ctx context.Context, roomID string) (*types.RoomInfo
}
func (d *Database) roomInfo(ctx context.Context, txn *sql.Tx, roomID string) (*types.RoomInfo, error) {
if roomInfo, ok := d.Cache.GetRoomInfo(roomID); ok && roomInfo != nil {
return roomInfo, nil
}
roomInfo, err := d.RoomsTable.SelectRoomInfo(ctx, txn, roomID)
if err == nil && roomInfo != nil {
if err != nil {
return nil, err
}
if roomInfo != nil {
d.Cache.StoreRoomServerRoomID(roomInfo.RoomNID, roomID)
d.Cache.StoreRoomInfo(roomID, roomInfo)
d.Cache.StoreRoomVersion(roomID, roomInfo.RoomVersion)
}
return roomInfo, err
}
@ -489,8 +489,8 @@ func (d *Database) events(
fetchNIDList := make([]types.RoomNID, 0, len(uniqueRoomNIDs))
for n := range uniqueRoomNIDs {
if roomID, ok := d.Cache.GetRoomServerRoomID(n); ok {
if roomInfo, ok := d.Cache.GetRoomInfo(roomID); ok {
roomVersions[n] = roomInfo.RoomVersion
if roomVersion, ok := d.Cache.GetRoomVersion(roomID); ok {
roomVersions[n] = roomVersion
continue
}
}
@ -676,7 +676,7 @@ func (d *Database) storeEvent(
succeeded := false
if updater == nil {
var roomInfo *types.RoomInfo
roomInfo, err = d.RoomInfo(ctx, event.RoomID())
roomInfo, err = d.roomInfo(ctx, txn, event.RoomID())
if err != nil {
return 0, 0, types.StateAtEvent{}, nil, "", fmt.Errorf("d.RoomInfo: %w", err)
}
@ -747,9 +747,6 @@ func (d *Database) MissingAuthPrevEvents(
func (d *Database) assignRoomNID(
ctx context.Context, roomID string, roomVersion gomatrixserverlib.RoomVersion,
) (types.RoomNID, error) {
if roomInfo, ok := d.Cache.GetRoomInfo(roomID); ok {
return roomInfo.RoomNID, nil
}
// Check if we already have a numeric ID in the database.
roomNID, err := d.RoomsTable.SelectRoomNID(ctx, nil, roomID)
if err == sql.ErrNoRows {
@ -822,8 +819,9 @@ func extractRoomVersionFromCreateEvent(event *gomatrixserverlib.Event) (
// "servers should not apply or send redactions to clients until both the redaction event and original event have been seen, and are valid."
// https://matrix.org/docs/spec/rooms/v3#authorization-rules-for-events
// These cases are:
// - This is a redaction event, redact the event it references if we know about it.
// - This is a normal event which may have been previously redacted.
// - This is a redaction event, redact the event it references if we know about it.
// - This is a normal event which may have been previously redacted.
//
// In the first case, check if we have the referenced event then apply the redaction, else store it
// in the redactions table with validated=FALSE. In the second case, check if there is a redaction for it:
// if there is then apply the redactions and set validated=TRUE.
@ -988,6 +986,38 @@ func (d *Database) loadEvent(ctx context.Context, eventID string) *types.Event {
return &evs[0]
}
func (d *Database) GetHistoryVisibilityState(ctx context.Context, roomInfo *types.RoomInfo, eventID string, domain string) ([]*gomatrixserverlib.Event, error) {
eventStates, err := d.EventsTable.BulkSelectStateAtEventByID(ctx, nil, []string{eventID})
if err != nil {
return nil, err
}
stateSnapshotNID := eventStates[0].BeforeStateSnapshotNID
if stateSnapshotNID == 0 {
return nil, nil
}
eventNIDs, err := d.StateSnapshotTable.BulkSelectStateForHistoryVisibility(ctx, nil, stateSnapshotNID, domain)
if err != nil {
return nil, err
}
eventIDs, _ := d.EventsTable.BulkSelectEventID(ctx, nil, eventNIDs)
if err != nil {
eventIDs = map[types.EventNID]string{}
}
events := make([]*gomatrixserverlib.Event, 0, len(eventNIDs))
for _, eventNID := range eventNIDs {
data, err := d.EventJSONTable.BulkSelectEventJSON(ctx, nil, []types.EventNID{eventNID})
if err != nil {
return nil, err
}
ev, err := gomatrixserverlib.NewEventFromTrustedJSONWithEventID(eventIDs[eventNID], data[0].EventJSON, false, roomInfo.RoomVersion)
if err != nil {
return nil, err
}
events = append(events, ev)
}
return events, nil
}
// GetStateEvent returns the current state event of a given type for a given room with a given state key
// If no event could be found, returns nil
// If there was an issue during the retrieval, returns an error
@ -1000,7 +1030,7 @@ func (d *Database) GetStateEvent(ctx context.Context, roomID, evType, stateKey s
return nil, fmt.Errorf("room %s doesn't exist", roomID)
}
// e.g invited rooms
if roomInfo.IsStub {
if roomInfo.IsStub() {
return nil, nil
}
eventTypeNID, err := d.EventTypesTable.SelectEventTypeNID(ctx, nil, evType)
@ -1019,7 +1049,7 @@ func (d *Database) GetStateEvent(ctx context.Context, roomID, evType, stateKey s
if err != nil {
return nil, err
}
entries, err := d.loadStateAtSnapshot(ctx, roomInfo.StateSnapshotNID)
entries, err := d.loadStateAtSnapshot(ctx, roomInfo.StateSnapshotNID())
if err != nil {
return nil, err
}
@ -1065,7 +1095,7 @@ func (d *Database) GetStateEventsWithEventType(ctx context.Context, roomID, evTy
return nil, fmt.Errorf("room %s doesn't exist", roomID)
}
// e.g invited rooms
if roomInfo.IsStub {
if roomInfo.IsStub() {
return nil, nil
}
eventTypeNID, err := d.EventTypesTable.SelectEventTypeNID(ctx, nil, evType)
@ -1076,7 +1106,7 @@ func (d *Database) GetStateEventsWithEventType(ctx context.Context, roomID, evTy
if err != nil {
return nil, err
}
entries, err := d.loadStateAtSnapshot(ctx, roomInfo.StateSnapshotNID)
entries, err := d.loadStateAtSnapshot(ctx, roomInfo.StateSnapshotNID())
if err != nil {
return nil, err
}
@ -1193,10 +1223,10 @@ func (d *Database) GetBulkStateContent(ctx context.Context, roomIDs []string, tu
return nil, fmt.Errorf("GetBulkStateContent: failed to load room info for room %s : %w", roomID, err2)
}
// for unknown rooms or rooms which we don't have the current state, skip them.
if roomInfo == nil || roomInfo.IsStub {
if roomInfo == nil || roomInfo.IsStub() {
continue
}
entries, err2 := d.loadStateAtSnapshot(ctx, roomInfo.StateSnapshotNID)
entries, err2 := d.loadStateAtSnapshot(ctx, roomInfo.StateSnapshotNID())
if err2 != nil {
return nil, fmt.Errorf("GetBulkStateContent: failed to load state for room %s : %w", roomID, err2)
}

View file

@ -15,24 +15,13 @@
package deltas
import (
"context"
"database/sql"
"fmt"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/pressly/goose"
)
func LoadFromGoose() {
goose.AddMigration(UpAddForgottenColumn, DownAddForgottenColumn)
goose.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)
}
func LoadAddForgottenColumn(m *sqlutil.Migrations) {
m.AddMigration(UpAddForgottenColumn, DownAddForgottenColumn)
}
func UpAddForgottenColumn(tx *sql.Tx) error {
_, err := tx.Exec(` ALTER TABLE roomserver_membership RENAME TO roomserver_membership_tmp;
func UpAddForgottenColumn(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, ` ALTER TABLE roomserver_membership RENAME TO roomserver_membership_tmp;
CREATE TABLE IF NOT EXISTS roomserver_membership (
room_nid INTEGER NOT NULL,
target_nid INTEGER NOT NULL,
@ -57,8 +46,8 @@ DROP TABLE roomserver_membership_tmp;`)
return nil
}
func DownAddForgottenColumn(tx *sql.Tx) error {
_, err := tx.Exec(` ALTER TABLE roomserver_membership RENAME TO roomserver_membership_tmp;
func DownAddForgottenColumn(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, ` ALTER TABLE roomserver_membership RENAME TO roomserver_membership_tmp;
CREATE TABLE IF NOT EXISTS roomserver_membership (
room_nid INTEGER NOT NULL,
target_nid INTEGER NOT NULL,

View file

@ -21,40 +21,35 @@ import (
"fmt"
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/util"
"github.com/sirupsen/logrus"
)
func LoadStateBlocksRefactor(m *sqlutil.Migrations) {
m.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)
}
// nolint:gocyclo
func UpStateBlocksRefactor(tx *sql.Tx) error {
func UpStateBlocksRefactor(ctx context.Context, tx *sql.Tx) error {
logrus.Warn("Performing state storage upgrade. Please wait, this may take some time!")
defer logrus.Warn("State storage upgrade complete")
var maxsnapshotid int
var maxblockid int
if err := tx.QueryRow(`SELECT IFNULL(MAX(state_snapshot_nid),0) FROM roomserver_state_snapshots;`).Scan(&maxsnapshotid); err != nil {
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
if err := tx.QueryRowContext(ctx, `SELECT IFNULL(MAX(state_snapshot_nid),0) FROM roomserver_state_snapshots;`).Scan(&maxsnapshotid); err != nil {
return fmt.Errorf("tx.QueryRowContext.Scan (count snapshots): %w", err)
}
if err := tx.QueryRow(`SELECT IFNULL(MAX(state_block_nid),0) FROM roomserver_state_block;`).Scan(&maxblockid); err != nil {
return fmt.Errorf("tx.QueryRow.Scan (count snapshots): %w", err)
if err := tx.QueryRowContext(ctx, `SELECT IFNULL(MAX(state_block_nid),0) FROM roomserver_state_block;`).Scan(&maxblockid); err != nil {
return fmt.Errorf("tx.QueryRowContext.Scan (count snapshots): %w", err)
}
maxsnapshotid++
maxblockid++
oldMaxSnapshotID := maxsnapshotid
if _, err := tx.Exec(`ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil {
return fmt.Errorf("tx.Exec: %w", err)
if _, err := tx.ExecContext(ctx, `ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil {
return fmt.Errorf("tx.ExecContext: %w", err)
}
if _, err := tx.Exec(`ALTER TABLE roomserver_state_snapshots RENAME TO _roomserver_state_snapshots;`); err != nil {
return fmt.Errorf("tx.Exec: %w", err)
if _, err := tx.ExecContext(ctx, `ALTER TABLE roomserver_state_snapshots RENAME TO _roomserver_state_snapshots;`); err != nil {
return fmt.Errorf("tx.ExecContext: %w", err)
}
_, err := tx.Exec(`
_, err := tx.ExecContext(ctx, `
CREATE TABLE IF NOT EXISTS roomserver_state_block (
state_block_nid INTEGER PRIMARY KEY AUTOINCREMENT,
state_block_hash BLOB UNIQUE,
@ -62,9 +57,9 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
);
`)
if err != nil {
return fmt.Errorf("tx.Exec: %w", err)
return fmt.Errorf("tx.ExecContext: %w", err)
}
_, err = tx.Exec(`
_, err = tx.ExecContext(ctx, `
CREATE TABLE IF NOT EXISTS roomserver_state_snapshots (
state_snapshot_nid INTEGER PRIMARY KEY AUTOINCREMENT,
state_snapshot_hash BLOB UNIQUE,
@ -73,11 +68,11 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
);
`)
if err != nil {
return fmt.Errorf("tx.Exec: %w", err)
return fmt.Errorf("tx.ExecContext: %w", err)
}
snapshotrows, err := tx.Query(`SELECT state_snapshot_nid, room_nid, state_block_nids FROM _roomserver_state_snapshots;`)
snapshotrows, err := tx.QueryContext(ctx, `SELECT state_snapshot_nid, room_nid, state_block_nids FROM _roomserver_state_snapshots;`)
if err != nil {
return fmt.Errorf("tx.Query: %w", err)
return fmt.Errorf("tx.QueryContext: %w", err)
}
defer internal.CloseAndLogIfError(context.TODO(), snapshotrows, "rows.close() failed")
for snapshotrows.Next() {
@ -99,7 +94,7 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
// in question a state snapshot NID of 0 to indicate 'no snapshot'.
// If we don't do this, we'll fail the assertions later on which try to ensure we didn't forget
// any snapshots.
_, err = tx.Exec(
_, err = tx.ExecContext(ctx,
`UPDATE roomserver_events SET state_snapshot_nid = 0 WHERE event_type_nid = $1 AND event_state_key_nid = $2 AND state_snapshot_nid = $3`,
types.MRoomCreateNID, types.EmptyStateKeyNID, snapshot,
)
@ -109,9 +104,9 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
}
for _, block := range blocks {
if err = func() error {
blockrows, berr := tx.Query(`SELECT event_nid FROM _roomserver_state_block WHERE state_block_nid = $1`, block)
blockrows, berr := tx.QueryContext(ctx, `SELECT event_nid FROM _roomserver_state_block WHERE state_block_nid = $1`, block)
if berr != nil {
return fmt.Errorf("tx.Query (event nids from old block): %w", berr)
return fmt.Errorf("tx.QueryContext (event nids from old block): %w", berr)
}
defer internal.CloseAndLogIfError(context.TODO(), blockrows, "rows.close() failed")
events := types.EventNIDs{}
@ -129,14 +124,14 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
}
var blocknid types.StateBlockNID
err = tx.QueryRow(`
err = tx.QueryRowContext(ctx, `
INSERT INTO roomserver_state_block (state_block_nid, state_block_hash, event_nids)
VALUES ($1, $2, $3)
ON CONFLICT (state_block_hash) DO UPDATE SET event_nids=$3
RETURNING state_block_nid
`, maxblockid, events.Hash(), eventjson).Scan(&blocknid)
if err != nil {
return fmt.Errorf("tx.QueryRow.Scan (insert new block): %w", err)
return fmt.Errorf("tx.QueryRowContext.Scan (insert new block): %w", err)
}
maxblockid++
newblocks = append(newblocks, blocknid)
@ -151,22 +146,22 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
}
var newsnapshot types.StateSnapshotNID
err = tx.QueryRow(`
err = tx.QueryRowContext(ctx, `
INSERT INTO roomserver_state_snapshots (state_snapshot_nid, state_snapshot_hash, room_nid, state_block_nids)
VALUES ($1, $2, $3, $4)
ON CONFLICT (state_snapshot_hash) DO UPDATE SET room_nid=$3
RETURNING state_snapshot_nid
`, maxsnapshotid, newblocks.Hash(), room, newblocksjson).Scan(&newsnapshot)
if err != nil {
return fmt.Errorf("tx.QueryRow.Scan (insert new snapshot): %w", err)
return fmt.Errorf("tx.QueryRowContext.Scan (insert new snapshot): %w", err)
}
maxsnapshotid++
_, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid)
_, err = tx.ExecContext(ctx, `UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid)
if err != nil {
return fmt.Errorf("tx.Exec (update events): %w", err)
return fmt.Errorf("tx.ExecContext (update events): %w", err)
}
if _, err = tx.Exec(`UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid); err != nil {
return fmt.Errorf("tx.Exec (update rooms): %w", err)
if _, err = tx.ExecContext(ctx, `UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newsnapshot, snapshot, maxsnapshotid); err != nil {
return fmt.Errorf("tx.ExecContext (update rooms): %w", err)
}
}
}
@ -175,13 +170,13 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
// If we do, this is a problem if Dendrite tries to load the snapshot as it will not exist
// in roomserver_state_snapshots
var count int64
if err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_events WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, oldMaxSnapshotID).Scan(&count); err != nil {
if err = tx.QueryRowContext(ctx, `SELECT COUNT(*) FROM roomserver_events WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, oldMaxSnapshotID).Scan(&count); err != nil {
return fmt.Errorf("assertion query failed: %s", err)
}
if count > 0 {
var res sql.Result
var c int64
res, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid = 0 WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, oldMaxSnapshotID)
res, err = tx.ExecContext(ctx, `UPDATE roomserver_events SET state_snapshot_nid = 0 WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, oldMaxSnapshotID)
if err != nil && err != sql.ErrNoRows {
return fmt.Errorf("failed to reset invalid state snapshots: %w", err)
}
@ -191,23 +186,23 @@ func UpStateBlocksRefactor(tx *sql.Tx) error {
return fmt.Errorf("expected to reset %d event(s) but only updated %d event(s)", count, c)
}
}
if err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_rooms WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, oldMaxSnapshotID).Scan(&count); err != nil {
if err = tx.QueryRowContext(ctx, `SELECT COUNT(*) FROM roomserver_rooms WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, oldMaxSnapshotID).Scan(&count); err != nil {
return fmt.Errorf("assertion query failed: %s", err)
}
if count > 0 {
return fmt.Errorf("%d rooms exist in roomserver_rooms which have not been converted to a new state_snapshot_nid; this is a bug, please report", count)
}
if _, err = tx.Exec(`DROP TABLE _roomserver_state_snapshots;`); err != nil {
if _, err = tx.ExecContext(ctx, `DROP TABLE _roomserver_state_snapshots;`); err != nil {
return fmt.Errorf("tx.Exec (delete old snapshot table): %w", err)
}
if _, err = tx.Exec(`DROP TABLE _roomserver_state_block;`); err != nil {
if _, err = tx.ExecContext(ctx, `DROP TABLE _roomserver_state_block;`); err != nil {
return fmt.Errorf("tx.Exec (delete old block table): %w", err)
}
return nil
}
func DownStateBlocksRefactor(tx *sql.Tx) error {
func DownStateBlocksRefactor(ctx context.Context, tx *sql.Tx) error {
panic("Downgrading state storage is not supported")
}

View file

@ -23,6 +23,7 @@ import (
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/roomserver/storage/sqlite3/deltas"
"github.com/matrix-org/dendrite/roomserver/storage/tables"
"github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/gomatrixserverlib"
@ -62,24 +63,24 @@ const insertMembershipSQL = "" +
const selectMembershipFromRoomAndTargetSQL = "" +
"SELECT membership_nid, event_nid, forgotten FROM roomserver_membership" +
" WHERE room_nid = $1 AND target_nid = $2"
" WHERE room_nid = $1 AND event_nid != 0 AND target_nid = $2"
const selectMembershipsFromRoomAndMembershipSQL = "" +
"SELECT event_nid FROM roomserver_membership" +
" WHERE room_nid = $1 AND membership_nid = $2 and forgotten = false"
" WHERE room_nid = $1 AND event_nid != 0 AND membership_nid = $2 and forgotten = false"
const selectLocalMembershipsFromRoomAndMembershipSQL = "" +
"SELECT event_nid FROM roomserver_membership" +
" WHERE room_nid = $1 AND membership_nid = $2" +
" WHERE room_nid = $1 AND event_nid != 0 AND membership_nid = $2" +
" AND target_local = true and forgotten = false"
const selectMembershipsFromRoomSQL = "" +
"SELECT event_nid FROM roomserver_membership" +
" WHERE room_nid = $1 and forgotten = false"
" WHERE room_nid = $1 AND event_nid != 0 and forgotten = false"
const selectLocalMembershipsFromRoomSQL = "" +
"SELECT event_nid FROM roomserver_membership" +
" WHERE room_nid = $1" +
" WHERE room_nid = $1 AND event_nid != 0" +
" AND target_local = true and forgotten = false"
const selectMembershipForUpdateSQL = "" +
@ -125,6 +126,9 @@ const selectServerInRoomSQL = "" +
" JOIN roomserver_event_state_keys ON roomserver_membership.target_nid = roomserver_event_state_keys.event_state_key_nid" +
" WHERE membership_nid = $1 AND room_nid = $2 AND event_state_key LIKE '%:' || $3 LIMIT 1"
const deleteMembershipSQL = "" +
"DELETE FROM roomserver_membership WHERE room_nid = $1 AND target_nid = $2"
type membershipStatements struct {
db *sql.DB
insertMembershipStmt *sql.Stmt
@ -140,11 +144,20 @@ type membershipStatements struct {
updateMembershipForgetRoomStmt *sql.Stmt
selectLocalServerInRoomStmt *sql.Stmt
selectServerInRoomStmt *sql.Stmt
deleteMembershipStmt *sql.Stmt
}
func CreateMembershipTable(db *sql.DB) error {
_, err := db.Exec(membershipSchema)
return err
if err != nil {
return err
}
m := sqlutil.NewMigrator(db)
m.AddMigrations(sqlutil.Migration{
Version: "roomserver: add forgotten column",
Up: deltas.UpAddForgottenColumn,
})
return m.Up(context.Background())
}
func PrepareMembershipTable(db *sql.DB) (tables.Membership, error) {
@ -166,6 +179,7 @@ func PrepareMembershipTable(db *sql.DB) (tables.Membership, error) {
{&s.updateMembershipForgetRoomStmt, updateMembershipForgetRoom},
{&s.selectLocalServerInRoomStmt, selectLocalServerInRoomSQL},
{&s.selectServerInRoomStmt, selectServerInRoomSQL},
{&s.deleteMembershipStmt, deleteMembershipSQL},
}.Prepare(db)
}
@ -383,3 +397,13 @@ func (s *membershipStatements) SelectServerInRoom(ctx context.Context, txn *sql.
}
return roomNID == nid, nil
}
func (s *membershipStatements) DeleteMembership(
ctx context.Context, txn *sql.Tx,
roomNID types.RoomNID, targetUserNID types.EventStateKeyNID,
) error {
_, err := sqlutil.TxStmt(txn, s.deleteMembershipStmt).ExecContext(
ctx, roomNID, targetUserNID,
)
return err
}

View file

@ -129,9 +129,10 @@ func (s *roomStatements) SelectRoomIDsWithEvents(ctx context.Context, txn *sql.T
func (s *roomStatements) SelectRoomInfo(ctx context.Context, txn *sql.Tx, roomID string) (*types.RoomInfo, error) {
var info types.RoomInfo
var latestNIDsJSON string
var stateSnapshotNID types.StateSnapshotNID
stmt := sqlutil.TxStmt(txn, s.selectRoomInfoStmt)
err := stmt.QueryRowContext(ctx, roomID).Scan(
&info.RoomVersion, &info.RoomNID, &info.StateSnapshotNID, &latestNIDsJSON,
&info.RoomVersion, &info.RoomNID, &stateSnapshotNID, &latestNIDsJSON,
)
if err != nil {
if err == sql.ErrNoRows {
@ -143,7 +144,8 @@ func (s *roomStatements) SelectRoomInfo(ctx context.Context, txn *sql.Tx, roomID
if err = json.Unmarshal([]byte(latestNIDsJSON), &latestNIDs); err != nil {
return nil, err
}
info.IsStub = len(latestNIDs) == 0
info.SetStateSnapshotNID(stateSnapshotNID)
info.SetIsStub(len(latestNIDs) == 0)
return &info, err
}

View file

@ -140,3 +140,9 @@ func (s *stateSnapshotStatements) BulkSelectStateBlockNIDs(
}
return results, nil
}
func (s *stateSnapshotStatements) BulkSelectStateForHistoryVisibility(
ctx context.Context, txn *sql.Tx, stateSnapshotNID types.StateSnapshotNID, domain string,
) ([]types.EventNID, error) {
return nil, tables.OptimisationNotSupportedError
}

View file

@ -54,17 +54,25 @@ func Open(base *base.BaseDendrite, dbProperties *config.DatabaseOptions, cache c
// db.SetMaxOpenConns(20)
// Create the tables.
if err := d.create(db); err != nil {
if err = d.create(db); err != nil {
return nil, err
}
// Then execute the migrations. By this point the tables are created with the latest
// schemas.
m := sqlutil.NewMigrations()
deltas.LoadAddForgottenColumn(m)
deltas.LoadStateBlocksRefactor(m)
if err := m.RunDeltas(db, dbProperties); err != nil {
return nil, err
// Special case, since this migration uses several tables, so it needs to
// be sure that all tables are created first.
// TODO: Remove when we are sure we are not having goose artefacts in the db
// This forces an error, which indicates the migration is already applied, since the
// column event_nid was removed from the table
err = db.QueryRow("SELECT event_nid FROM roomserver_state_block LIMIT 1;").Scan()
if err == nil {
m := sqlutil.NewMigrator(db)
m.AddMigrations(sqlutil.Migration{
Version: "roomserver: state blocks refactor",
Up: deltas.UpStateBlocksRefactor,
})
if err := m.Up(base.Context()); err != nil {
return nil, err
}
}
// Then prepare the statements. Now that the migrations have run, any columns referred

View file

@ -3,12 +3,15 @@ package tables
import (
"context"
"database/sql"
"errors"
"github.com/matrix-org/dendrite/roomserver/types"
"github.com/matrix-org/gomatrixserverlib"
"github.com/tidwall/gjson"
)
var OptimisationNotSupportedError = errors.New("optimisation not supported")
type EventJSONPair struct {
EventNID types.EventNID
EventJSON []byte
@ -80,6 +83,10 @@ type Rooms interface {
type StateSnapshot interface {
InsertState(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, stateBlockNIDs types.StateBlockNIDs) (stateNID types.StateSnapshotNID, err error)
BulkSelectStateBlockNIDs(ctx context.Context, txn *sql.Tx, stateNIDs []types.StateSnapshotNID) ([]types.StateBlockNIDList, error)
// BulkSelectStateForHistoryVisibility is a PostgreSQL-only optimisation for finding
// which users are in a room faster than having to load the entire room state. In the
// case of SQLite, this will return tables.OptimisationNotSupportedError.
BulkSelectStateForHistoryVisibility(ctx context.Context, txn *sql.Tx, stateSnapshotNID types.StateSnapshotNID, domain string) ([]types.EventNID, error)
}
type StateBlock interface {
@ -133,6 +140,7 @@ type Membership interface {
UpdateForgetMembership(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, targetUserNID types.EventStateKeyNID, forget bool) error
SelectLocalServerInRoom(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID) (bool, error)
SelectServerInRoom(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, serverName gomatrixserverlib.ServerName) (bool, error)
DeleteMembership(ctx context.Context, txn *sql.Tx, roomNID types.RoomNID, targetUserNID types.EventStateKeyNID) error
}
type Published interface {

View file

@ -60,6 +60,9 @@ func TestMembershipTable(t *testing.T) {
// This inserts a left user to the room
err = tab.InsertMembership(ctx, nil, 1, stateKeyNID, true)
assert.NoError(t, err)
// We must update the membership with a non-zero event NID or it will get filtered out in later queries
_, err = tab.UpdateMembership(ctx, nil, 1, stateKeyNID, userNIDs[0], tables.MembershipStateLeaveOrBan, 1, false)
assert.NoError(t, err)
}
// ... so this should be false

View file

@ -63,12 +63,12 @@ func TestRoomsTable(t *testing.T) {
roomInfo, err := tab.SelectRoomInfo(ctx, nil, room.ID)
assert.NoError(t, err)
assert.Equal(t, &types.RoomInfo{
RoomNID: wantRoomNID,
RoomVersion: room.Version,
StateSnapshotNID: 0,
IsStub: true, // there are no latestEventNIDs
}, roomInfo)
expected := &types.RoomInfo{
RoomNID: wantRoomNID,
RoomVersion: room.Version,
}
expected.SetIsStub(true) // there are no latestEventNIDs
assert.Equal(t, expected, roomInfo)
roomInfo, err = tab.SelectRoomInfo(ctx, nil, "!doesnotexist:localhost")
assert.NoError(t, err)
@ -103,12 +103,12 @@ func TestRoomsTable(t *testing.T) {
roomInfo, err = tab.SelectRoomInfo(ctx, nil, room.ID)
assert.NoError(t, err)
assert.Equal(t, &types.RoomInfo{
RoomNID: wantRoomNID,
RoomVersion: room.Version,
StateSnapshotNID: 1,
IsStub: false,
}, roomInfo)
expected = &types.RoomInfo{
RoomNID: wantRoomNID,
RoomVersion: room.Version,
}
expected.SetStateSnapshotNID(1)
assert.Equal(t, expected, roomInfo)
eventNIDs, snapshotNID, err := tab.SelectLatestEventNIDs(ctx, nil, wantRoomNID)
assert.NoError(t, err)

View file

@ -23,6 +23,15 @@ func mustCreateStateSnapshotTable(t *testing.T, dbType test.DBType) (tab tables.
assert.NoError(t, err)
switch dbType {
case test.DBTypePostgres:
// for the PostgreSQL history visibility optimisation to work,
// we also need some other tables to exist
err = postgres.CreateEventStateKeysTable(db)
assert.NoError(t, err)
err = postgres.CreateEventsTable(db)
assert.NoError(t, err)
err = postgres.CreateStateBlockTable(db)
assert.NoError(t, err)
// ... and then the snapshot table itself
err = postgres.CreateStateSnapshotTable(db)
assert.NoError(t, err)
tab, err = postgres.PrepareStateSnapshotTable(db)

View file

@ -19,6 +19,7 @@ import (
"encoding/json"
"sort"
"strings"
"sync"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
@ -279,8 +280,46 @@ func (e RejectedError) Error() string { return string(e) }
// RoomInfo contains metadata about a room
type RoomInfo struct {
mu sync.RWMutex
RoomNID RoomNID
RoomVersion gomatrixserverlib.RoomVersion
StateSnapshotNID StateSnapshotNID
IsStub bool
stateSnapshotNID StateSnapshotNID
isStub bool
}
func (r *RoomInfo) StateSnapshotNID() StateSnapshotNID {
r.mu.RLock()
defer r.mu.RUnlock()
return r.stateSnapshotNID
}
func (r *RoomInfo) IsStub() bool {
r.mu.RLock()
defer r.mu.RUnlock()
return r.isStub
}
func (r *RoomInfo) SetStateSnapshotNID(nid StateSnapshotNID) {
r.mu.Lock()
defer r.mu.Unlock()
r.stateSnapshotNID = nid
}
func (r *RoomInfo) SetIsStub(isStub bool) {
r.mu.Lock()
defer r.mu.Unlock()
r.isStub = isStub
}
func (r *RoomInfo) CopyFrom(r2 *RoomInfo) {
r.mu.Lock()
defer r.mu.Unlock()
r2.mu.RLock()
defer r2.mu.RUnlock()
r.RoomNID = r2.RoomNID
r.RoomVersion = r2.RoomVersion
r.stateSnapshotNID = r2.stateSnapshotNID
r.isStub = r2.isStub
}

View file

@ -369,6 +369,25 @@ func (b *BaseDendrite) CreateFederationClient() *gomatrixserverlib.FederationCli
return client
}
func (b *BaseDendrite) configureHTTPErrors() {
notAllowedHandler := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
_, _ = w.Write([]byte(fmt.Sprintf("405 %s not allowed on this endpoint", r.Method)))
}
notFoundCORSHandler := httputil.WrapHandlerInCORS(http.NotFoundHandler())
notAllowedCORSHandler := httputil.WrapHandlerInCORS(http.HandlerFunc(notAllowedHandler))
for _, router := range []*mux.Router{
b.PublicClientAPIMux, b.PublicMediaAPIMux,
b.DendriteAdminMux, b.SynapseAdminMux,
b.PublicWellKnownAPIMux,
} {
router.NotFoundHandler = notFoundCORSHandler
router.MethodNotAllowedHandler = notAllowedCORSHandler
}
}
// SetupAndServeHTTP sets up the HTTP server to serve endpoints registered on
// ApiMux under /api/ and adds a prometheus handler under /metrics.
func (b *BaseDendrite) SetupAndServeHTTP(
@ -409,6 +428,8 @@ func (b *BaseDendrite) SetupAndServeHTTP(
}
}
b.configureHTTPErrors()
internalRouter.PathPrefix(httputil.InternalPathPrefix).Handler(b.InternalAPIMux)
if b.Cfg.Global.Metrics.Enabled {
internalRouter.Handle("/metrics", httputil.WrapHandlerInBasicAuth(promhttp.Handler(), b.Cfg.Global.Metrics.BasicAuth))

View file

@ -46,6 +46,9 @@ type Global struct {
// The server name to delegate server-server communications to, with optional port
WellKnownServerName string `yaml:"well_known_server_name"`
// The server name to delegate client-server communications to, with optional port
WellKnownClientName string `yaml:"well_known_client_name"`
// Disables federation. Dendrite will not be able to make any outbound HTTP requests
// to other servers and the federation API will not be exposed.
DisableFederation bool `yaml:"disable_federation"`

View file

@ -17,6 +17,10 @@ type JetStream struct {
TopicPrefix string `yaml:"topic_prefix"`
// Keep all storage in memory. This is mostly useful for unit tests.
InMemory bool `yaml:"in_memory"`
// Disable logging. This is mostly useful for unit tests.
NoLog bool `yaml:"-"`
// Disables TLS validation. This should NOT be used in production
DisableTLSValidation bool `yaml:"disable_tls_validation"`
}
func (c *JetStream) Prefixed(name string) string {
@ -32,6 +36,8 @@ func (c *JetStream) Defaults(generate bool) {
c.TopicPrefix = "Dendrite"
if generate {
c.StoragePath = Path("./")
c.NoLog = true
c.DisableTLSValidation = true
}
}

View file

@ -42,6 +42,7 @@ global:
key_id: ed25519:auto
key_validity_period: 168h0m0s
well_known_server_name: "localhost:443"
well_known_client_name: "localhost:443"
trusted_third_party_id_servers:
- matrix.org
- vector.im

View file

@ -1,6 +1,7 @@
package jetstream
import (
"crypto/tls"
"fmt"
"reflect"
"strings"
@ -45,6 +46,7 @@ func (s *NATSInstance) Prepare(process *process.ProcessContext, cfg *config.JetS
NoSystemAccount: true,
MaxPayload: 16 * 1024 * 1024,
NoSigs: true,
NoLog: cfg.NoLog,
})
if err != nil {
panic(err)
@ -75,7 +77,13 @@ func (s *NATSInstance) Prepare(process *process.ProcessContext, cfg *config.JetS
func setupNATS(process *process.ProcessContext, cfg *config.JetStream, nc *natsclient.Conn) (natsclient.JetStreamContext, *natsclient.Conn) {
if nc == nil {
var err error
nc, err = natsclient.Connect(strings.Join(cfg.Addresses, ","))
opts := []nats.Option{}
if cfg.DisableTLSValidation {
opts = append(opts, nats.Secure(&tls.Config{
InsecureSkipVerify: true,
}))
}
nc, err = natsclient.Connect(strings.Join(cfg.Addresses, ","), opts...)
if err != nil {
logrus.WithError(err).Panic("Unable to connect to NATS")
return nil, nil

View file

@ -732,7 +732,6 @@ func stripped(ev *gomatrixserverlib.Event) *gomatrixserverlib.MSC2946StrippedEve
StateKey: *ev.StateKey(),
Content: ev.Content(),
Sender: ev.Sender(),
RoomID: ev.RoomID(),
OriginServerTS: ev.OriginServerTS(),
}
}

View file

@ -365,7 +365,7 @@ func (s *OutputRoomEventConsumer) onNewInviteEvent(
"event": string(msg.Event.JSON()),
"pdupos": pduPos,
log.ErrorKey: err,
}).Panicf("roomserver output log: write invite failure")
}).Errorf("roomserver output log: write invite failure")
return
}
@ -385,7 +385,7 @@ func (s *OutputRoomEventConsumer) onRetireInviteEvent(
log.WithFields(log.Fields{
"event_id": msg.EventID,
log.ErrorKey: err,
}).Panicf("roomserver output log: remove invite failure")
}).Errorf("roomserver output log: remove invite failure")
return
}
@ -403,7 +403,7 @@ func (s *OutputRoomEventConsumer) onNewPeek(
// panic rather than continue with an inconsistent database
log.WithFields(log.Fields{
log.ErrorKey: err,
}).Panicf("roomserver output log: write peek failure")
}).Errorf("roomserver output log: write peek failure")
return
}
@ -422,7 +422,7 @@ func (s *OutputRoomEventConsumer) onRetirePeek(
// panic rather than continue with an inconsistent database
log.WithFields(log.Fields{
log.ErrorKey: err,
}).Panicf("roomserver output log: write peek failure")
}).Errorf("roomserver output log: write peek failure")
return
}

View file

@ -25,10 +25,9 @@ import (
"github.com/matrix-org/dendrite/syncapi/types"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
"github.com/sirupsen/logrus"
)
const DeviceListLogName = "dl"
// DeviceOTKCounts adds one-time key counts to the /sync response
func DeviceOTKCounts(ctx context.Context, keyAPI keyapi.SyncKeyAPI, userID, deviceID string, res *types.Response) error {
var queryRes keyapi.QueryOneTimeKeysResponse
@ -93,18 +92,13 @@ func DeviceListCatchup(
queryRes.UserIDs = append(queryRes.UserIDs, joinUserIDs...)
queryRes.UserIDs = append(queryRes.UserIDs, leaveUserIDs...)
queryRes.UserIDs = util.UniqueStrings(queryRes.UserIDs)
var sharedUsersMap map[string]int
sharedUsersMap, queryRes.UserIDs = filterSharedUsers(ctx, db, userID, queryRes.UserIDs)
util.GetLogger(ctx).Debugf(
"QueryKeyChanges request off=%d,to=%d response off=%d uids=%v",
offset, toOffset, queryRes.Offset, queryRes.UserIDs,
)
sharedUsersMap := filterSharedUsers(ctx, db, userID, queryRes.UserIDs)
userSet := make(map[string]bool)
for _, userID := range res.DeviceLists.Changed {
userSet[userID] = true
}
for _, userID := range queryRes.UserIDs {
if !userSet[userID] {
for userID, count := range sharedUsersMap {
if !userSet[userID] && count > 0 {
res.DeviceLists.Changed = append(res.DeviceLists.Changed, userID)
hasNew = true
userSet[userID] = true
@ -113,7 +107,7 @@ func DeviceListCatchup(
// Finally, add in users who have joined or left.
// TODO: This is sub-optimal because we will add users to `changed` even if we already shared a room with them.
for _, userID := range joinUserIDs {
if !userSet[userID] {
if !userSet[userID] && sharedUsersMap[userID] > 0 {
res.DeviceLists.Changed = append(res.DeviceLists.Changed, userID)
hasNew = true
userSet[userID] = true
@ -126,6 +120,13 @@ func DeviceListCatchup(
}
}
util.GetLogger(ctx).WithFields(logrus.Fields{
"user_id": userID,
"from": offset,
"to": toOffset,
"response_offset": queryRes.Offset,
}).Debugf("QueryKeyChanges request result: %+v", res.DeviceLists)
return types.StreamPosition(queryRes.Offset), hasNew, nil
}
@ -220,24 +221,27 @@ func TrackChangedUsers(
// it down to include only users who the requesting user shares a room with.
func filterSharedUsers(
ctx context.Context, db storage.SharedUsers, userID string, usersWithChangedKeys []string,
) (map[string]int, []string) {
) map[string]int {
sharedUsersMap := make(map[string]int, len(usersWithChangedKeys))
for _, userID := range usersWithChangedKeys {
sharedUsersMap[userID] = 0
for _, changedUserID := range usersWithChangedKeys {
sharedUsersMap[changedUserID] = 0
if changedUserID == userID {
// We forcibly put ourselves in this list because we should be notified about our own device updates
// and if we are in 0 rooms then we don't technically share any room with ourselves so we wouldn't
// be notified about key changes.
sharedUsersMap[userID] = 1
}
}
sharedUsers, err := db.SharedUsers(ctx, userID, usersWithChangedKeys)
if err != nil {
util.GetLogger(ctx).WithError(err).Errorf("db.SharedUsers failed: %s", err)
// default to all users so we do needless queries rather than miss some important device update
return nil, usersWithChangedKeys
return sharedUsersMap
}
for _, userID := range sharedUsers {
sharedUsersMap[userID]++
}
// We forcibly put ourselves in this list because we should be notified about our own device updates
// and if we are in 0 rooms then we don't technically share any room with ourselves so we wouldn't
// be notified about key changes.
sharedUsersMap[userID] = 1
return sharedUsersMap, sharedUsers
return sharedUsersMap
}
func joinedRooms(res *types.Response, userID string) []string {

View file

@ -129,6 +129,7 @@ type wantCatchup struct {
}
func assertCatchup(t *testing.T, hasNew bool, syncResponse *types.Response, want wantCatchup) {
t.Helper()
if hasNew != want.hasNew {
t.Errorf("got hasNew=%v want %v", hasNew, want.hasNew)
}

View file

@ -23,6 +23,7 @@ import (
"github.com/lib/pq"
"github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/matrix-org/dendrite/syncapi/storage/postgres/deltas"
"github.com/matrix-org/dendrite/syncapi/storage/tables"
"github.com/matrix-org/dendrite/syncapi/types"
"github.com/matrix-org/gomatrixserverlib"
@ -111,7 +112,7 @@ const selectEventsWithEventIDsSQL = "" +
const selectSharedUsersSQL = "" +
"SELECT state_key FROM syncapi_current_room_state WHERE room_id = ANY(" +
" SELECT room_id FROM syncapi_current_room_state WHERE state_key = $1 AND membership='join'" +
") AND state_key = ANY($2) AND membership='join';"
") AND state_key = ANY($2) AND membership IN ('join', 'invite');"
type currentRoomStateStatements struct {
upsertRoomStateStmt *sql.Stmt
@ -133,6 +134,17 @@ func NewPostgresCurrentRoomStateTable(db *sql.DB) (tables.CurrentRoomState, erro
if err != nil {
return nil, err
}
m := sqlutil.NewMigrator(db)
m.AddMigrations(sqlutil.Migration{
Version: "syncapi: add history visibility column (current_room_state)",
Up: deltas.UpAddHistoryVisibilityColumnCurrentRoomState,
})
err = m.Up(context.Background())
if err != nil {
return nil, err
}
if s.upsertRoomStateStmt, err = db.Prepare(upsertRoomStateSQL); err != nil {
return nil, err
}
@ -395,7 +407,7 @@ func (s *currentRoomStateStatements) SelectSharedUsers(
ctx context.Context, txn *sql.Tx, userID string, otherUserIDs []string,
) ([]string, error) {
stmt := sqlutil.TxStmt(txn, s.selectSharedUsersStmt)
rows, err := stmt.QueryContext(ctx, userID, otherUserIDs)
rows, err := stmt.QueryContext(ctx, userID, pq.Array(otherUserIDs))
if err != nil {
return nil, err
}

View file

@ -15,24 +15,13 @@
package deltas
import (
"context"
"database/sql"
"fmt"
"github.com/matrix-org/dendrite/internal/sqlutil"
"github.com/pressly/goose"
)
func LoadFromGoose() {
goose.AddMigration(UpFixSequences, DownFixSequences)
goose.AddMigration(UpRemoveSendToDeviceSentColumn, DownRemoveSendToDeviceSentColumn)
}
func LoadFixSequences(m *sqlutil.Migrations) {
m.AddMigration(UpFixSequences, DownFixSequences)
}
func UpFixSequences(tx *sql.Tx) error {
_, err := tx.Exec(`
func UpFixSequences(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
-- We need to delete all of the existing receipts because the indexes
-- will be wrong, and we'll get primary key violations if we try to
-- reuse existing stream IDs from a different sequence.
@ -49,8 +38,8 @@ func UpFixSequences(tx *sql.Tx) error {
return nil
}
func DownFixSequences(tx *sql.Tx) error {
_, err := tx.Exec(`
func DownFixSequences(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
-- We need to delete all of the existing receipts because the indexes
-- will be wrong, and we'll get primary key violations if we try to
-- reuse existing stream IDs from a different sequence.

Some files were not shown because too many files have changed in this diff Show more