2020-02-13 11:27:33 -06:00
// Copyright 2017-2018 New Vector Ltd
// Copyright 2019-2020 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sqlite3
import (
"context"
"database/sql"
"encoding/json"
2021-01-19 12:00:42 -06:00
"fmt"
2020-02-13 11:27:33 -06:00
"sort"
2022-03-11 06:48:45 -06:00
"strings"
2020-02-13 11:27:33 -06:00
2020-06-12 08:55:57 -05:00
"github.com/matrix-org/dendrite/internal"
2020-02-13 11:27:33 -06:00
"github.com/matrix-org/dendrite/roomserver/api"
2023-04-27 06:54:20 -05:00
rstypes "github.com/matrix-org/dendrite/roomserver/types"
2022-07-25 04:39:22 -05:00
"github.com/matrix-org/dendrite/syncapi/storage/sqlite3/deltas"
2020-05-14 03:53:55 -05:00
"github.com/matrix-org/dendrite/syncapi/storage/tables"
2023-04-04 12:16:53 -05:00
"github.com/matrix-org/dendrite/syncapi/synctypes"
2020-02-13 11:27:33 -06:00
"github.com/matrix-org/dendrite/syncapi/types"
"github.com/matrix-org/gomatrixserverlib"
2022-10-19 07:05:39 -05:00
"github.com/matrix-org/dendrite/internal/sqlutil"
2020-02-13 11:27:33 -06:00
)
const outputRoomEventsSchema = `
-- Stores output room events received from the roomserver .
CREATE TABLE IF NOT EXISTS syncapi_output_room_events (
id INTEGER PRIMARY KEY AUTOINCREMENT ,
event_id TEXT NOT NULL UNIQUE ,
room_id TEXT NOT NULL ,
2020-03-19 07:07:01 -05:00
headered_event_json TEXT NOT NULL ,
2020-02-13 11:27:33 -06:00
type TEXT NOT NULL ,
sender TEXT NOT NULL ,
contains_url BOOL NOT NULL ,
Add peer-to-peer support into Dendrite via libp2p and fetch (#880)
* Use a fork of pq which supports userCurrent on wasm
* Use sqlite3_js driver when running in JS
* Add cmd/dendritejs to pull in sqlite3_js driver for wasm only
* Update to latest go-sqlite-js version
* Replace prometheus with a stub. sigh
* Hard-code a config and don't use opentracing
* Latest go-sqlite3-js version
* Generate a key for now
* Listen for fetch traffic rather than HTTP
* Latest hacks for js
* libp2p support
* More libp2p
* Fork gjson to allow us to enforce auth checks as before
Previously, all events would come down redacted because the hash
checks would fail. They would fail because sjson.DeleteBytes didn't
remove keys not used for hashing. This didn't work because of a build
tag which included a file which no-oped the index returned.
See https://github.com/tidwall/gjson/issues/157
When it's resolved, let's go back to mainline.
* Use gjson@1.6.0 as it fixes https://github.com/tidwall/gjson/issues/157
* Use latest gomatrixserverlib for sig checks
* Fix a bug which could cause exclude_from_sync to not be set
Caused when sending events over federation.
* Use query variadic to make lookups actually work!
* Latest gomatrixserverlib
* Add notes on getting p2p up and running
Partly so I don't forget myself!
* refactor: Move p2p specific stuff to cmd/dendritejs
This is important or else the normal build of dendrite will fail
because the p2p libraries depend on syscall/js which doesn't work
on normal builds.
Also, clean up main.go to read a bit better.
* Update ho-http-js-libp2p to return errors from RoundTrip
* Add an LRU cache around the key DB
We actually need this for P2P because otherwise we can *segfault*
with things like: "runtime: unexpected return pc for runtime.handleEvent"
where the event is a `syscall/js` event, caused by spamming sql.js
caused by "Checking event signatures for 14 events of room state" which
hammers the key DB repeatedly in quick succession.
Using a cache fixes this, though the underlying cause is probably a bug
in the version of Go I'm on (1.13.7)
* breaking: Add Tracing.Enabled to toggle whether we do opentracing
Defaults to false, which is why this is a breaking change. We need
this flag because WASM builds cannot do opentracing.
* Start adding conditional builds for wasm to handle lib/pq
The general idea here is to have the wasm build have a `NewXXXDatabase`
that doesn't import any postgres package and hence we never import
`lib/pq`, which doesn't work under WASM (undefined `userCurrent`).
* Remove lib/pq for wasm for syncapi
* Add conditional building to remaining storage APIs
* Update build script to set env vars correctly for dendritejs
* sqlite bug fixes
* Docs
* Add a no-op main for dendritejs when not building under wasm
* Use the real prometheus, even for WASM
Instead, the dendrite-sw.js must mock out `process.pid` and
`fs.stat` - which must invoke the callback with an error (e.g `EINVAL`)
in order for it to work:
```
global.process = {
pid: 1,
};
global.fs.stat = function(path, cb) {
cb({
code: "EINVAL",
});
}
```
* Linting
2020-03-06 04:23:55 -06:00
add_state_ids TEXT , -- JSON encoded string array
remove_state_ids TEXT , -- JSON encoded string array
2020-02-13 11:27:33 -06:00
session_id BIGINT ,
transaction_id TEXT ,
2022-07-18 07:46:15 -05:00
exclude_from_sync BOOL NOT NULL DEFAULT FALSE ,
history_visibility SMALLINT NOT NULL DEFAULT 2 -- The history visibility before this event ( 1 - world_readable ; 2 - shared ; 3 - invited ; 4 - joined )
2020-02-13 11:27:33 -06:00
) ;
2022-05-10 05:23:36 -05:00
CREATE INDEX IF NOT EXISTS syncapi_output_room_events_type_idx ON syncapi_output_room_events ( type ) ;
CREATE INDEX IF NOT EXISTS syncapi_output_room_events_sender_idx ON syncapi_output_room_events ( sender ) ;
CREATE INDEX IF NOT EXISTS syncapi_output_room_events_room_id_idx ON syncapi_output_room_events ( room_id ) ;
CREATE INDEX IF NOT EXISTS syncapi_output_room_events_exclude_from_sync_idx ON syncapi_output_room_events ( exclude_from_sync ) ;
2022-10-04 10:43:10 -05:00
CREATE INDEX IF NOT EXISTS syncapi_output_room_events_add_state_ids_idx ON syncapi_output_room_events ( ( add_state_ids IS NOT NULL ) ) ;
CREATE INDEX IF NOT EXISTS syncapi_output_room_events_remove_state_ids_idx ON syncapi_output_room_events ( ( remove_state_ids IS NOT NULL ) ) ;
2020-02-13 11:27:33 -06:00
`
const insertEventSQL = "" +
"INSERT INTO syncapi_output_room_events (" +
2022-07-18 07:46:15 -05:00
"id, room_id, event_id, headered_event_json, type, sender, contains_url, add_state_ids, remove_state_ids, session_id, transaction_id, exclude_from_sync, history_visibility" +
") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) " +
"ON CONFLICT (event_id) DO UPDATE SET exclude_from_sync = (excluded.exclude_from_sync AND $14)"
2020-02-13 11:27:33 -06:00
const selectEventsSQL = "" +
2022-07-18 07:46:15 -05:00
"SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id, history_visibility FROM syncapi_output_room_events WHERE event_id IN ($1)"
2020-02-13 11:27:33 -06:00
const selectRecentEventsSQL = "" +
2022-07-18 07:46:15 -05:00
"SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id, history_visibility FROM syncapi_output_room_events" +
2021-01-19 12:00:42 -06:00
" WHERE room_id = $1 AND id > $2 AND id <= $3"
2022-03-03 05:40:53 -06:00
2022-02-21 10:12:22 -06:00
// WHEN, ORDER BY and LIMIT are appended by prepareWithFilters
2020-02-13 11:27:33 -06:00
const selectRecentEventsForSyncSQL = "" +
2022-07-18 07:46:15 -05:00
"SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id, history_visibility FROM syncapi_output_room_events" +
2021-01-19 12:00:42 -06:00
" WHERE room_id = $1 AND id > $2 AND id <= $3 AND exclude_from_sync = FALSE"
2022-03-03 05:40:53 -06:00
2022-02-21 10:12:22 -06:00
// WHEN, ORDER BY and LIMIT are appended by prepareWithFilters
2020-02-13 11:27:33 -06:00
const selectMaxEventIDSQL = "" +
"SELECT MAX(id) FROM syncapi_output_room_events"
2020-07-08 11:45:39 -05:00
const updateEventJSONSQL = "" +
"UPDATE syncapi_output_room_events SET headered_event_json=$1 WHERE event_id=$2"
2020-02-13 11:27:33 -06:00
const selectStateInRangeSQL = "" +
2022-07-18 07:46:15 -05:00
"SELECT event_id, id, headered_event_json, exclude_from_sync, add_state_ids, remove_state_ids, history_visibility" +
2020-02-13 11:27:33 -06:00
" FROM syncapi_output_room_events" +
2021-01-19 12:00:42 -06:00
" WHERE (id > $1 AND id <= $2)" +
2022-03-11 06:48:45 -06:00
" AND room_id IN ($3)" +
2021-01-19 12:00:42 -06:00
" AND ((add_state_ids IS NOT NULL AND add_state_ids != '') OR (remove_state_ids IS NOT NULL AND remove_state_ids != ''))"
2022-03-03 05:40:53 -06:00
2022-02-21 10:12:22 -06:00
// WHEN, ORDER BY and LIMIT are appended by prepareWithFilters
2020-02-13 11:27:33 -06:00
2020-09-15 05:17:46 -05:00
const deleteEventsForRoomSQL = "" +
"DELETE FROM syncapi_output_room_events WHERE room_id = $1"
2022-02-21 10:12:22 -06:00
const selectContextEventSQL = "" +
2022-07-18 07:46:15 -05:00
"SELECT id, headered_event_json, history_visibility FROM syncapi_output_room_events WHERE room_id = $1 AND event_id = $2"
2022-02-21 10:12:22 -06:00
const selectContextBeforeEventSQL = "" +
2022-07-18 07:46:15 -05:00
"SELECT headered_event_json, history_visibility FROM syncapi_output_room_events WHERE room_id = $1 AND id < $2"
2022-03-03 05:40:53 -06:00
2022-02-21 10:12:22 -06:00
// WHEN, ORDER BY and LIMIT are appended by prepareWithFilters
const selectContextAfterEventSQL = "" +
2022-07-18 07:46:15 -05:00
"SELECT id, headered_event_json, history_visibility FROM syncapi_output_room_events WHERE room_id = $1 AND id > $2"
2022-03-03 05:40:53 -06:00
2022-02-21 10:12:22 -06:00
// WHEN, ORDER BY and LIMIT are appended by prepareWithFilters
2023-04-17 04:25:33 -05:00
const selectSearchSQL = "SELECT id, event_id, headered_event_json FROM syncapi_output_room_events WHERE id > $1 AND type IN ($2)"
2022-09-27 11:06:49 -05:00
2023-01-19 14:02:32 -06:00
const purgeEventsSQL = "" +
"DELETE FROM syncapi_output_room_events WHERE room_id = $1"
2020-02-13 11:27:33 -06:00
type outputRoomEventsStatements struct {
2022-02-21 10:12:22 -06:00
db * sql . DB
2022-04-08 11:53:24 -05:00
streamIDStatements * StreamIDStatements
2022-02-21 10:12:22 -06:00
insertEventStmt * sql . Stmt
selectMaxEventIDStmt * sql . Stmt
updateEventJSONStmt * sql . Stmt
deleteEventsForRoomStmt * sql . Stmt
selectContextEventStmt * sql . Stmt
selectContextBeforeEventStmt * sql . Stmt
selectContextAfterEventStmt * sql . Stmt
2023-01-19 14:02:32 -06:00
purgeEventsStmt * sql . Stmt
2022-09-27 11:06:49 -05:00
//selectSearchStmt *sql.Stmt - prepared at runtime
2020-02-13 11:27:33 -06:00
}
2022-04-08 11:53:24 -05:00
func NewSqliteEventsTable ( db * sql . DB , streamID * StreamIDStatements ) ( tables . Events , error ) {
2020-05-14 03:53:55 -05:00
s := & outputRoomEventsStatements {
2020-07-21 09:48:21 -05:00
db : db ,
2020-05-14 03:53:55 -05:00
streamIDStatements : streamID ,
}
_ , err := db . Exec ( outputRoomEventsSchema )
2020-02-13 11:27:33 -06:00
if err != nil {
2020-05-14 03:53:55 -05:00
return nil , err
2020-02-13 11:27:33 -06:00
}
2022-07-25 04:39:22 -05:00
m := sqlutil . NewMigrator ( db )
2022-08-11 11:23:35 -05:00
m . AddMigrations (
sqlutil . Migration {
Version : "syncapi: add history visibility column (output_room_events)" ,
Up : deltas . UpAddHistoryVisibilityColumnOutputRoomEvents ,
} ,
)
2022-07-25 04:39:22 -05:00
err = m . Up ( context . Background ( ) )
if err != nil {
return nil , err
}
2022-02-21 10:12:22 -06:00
return s , sqlutil . StatementList {
{ & s . insertEventStmt , insertEventSQL } ,
{ & s . selectMaxEventIDStmt , selectMaxEventIDSQL } ,
{ & s . updateEventJSONStmt , updateEventJSONSQL } ,
{ & s . deleteEventsForRoomStmt , deleteEventsForRoomSQL } ,
{ & s . selectContextEventStmt , selectContextEventSQL } ,
{ & s . selectContextBeforeEventStmt , selectContextBeforeEventSQL } ,
{ & s . selectContextAfterEventStmt , selectContextAfterEventSQL } ,
2023-01-19 14:02:32 -06:00
{ & s . purgeEventsStmt , purgeEventsSQL } ,
2022-09-27 11:06:49 -05:00
//{&s.selectSearchStmt, selectSearchSQL}, - prepared at runtime
2022-02-21 10:12:22 -06:00
} . Prepare ( db )
2020-02-13 11:27:33 -06:00
}
2023-04-27 06:54:20 -05:00
func ( s * outputRoomEventsStatements ) UpdateEventJSON ( ctx context . Context , txn * sql . Tx , event * rstypes . HeaderedEvent ) error {
2020-07-08 11:45:39 -05:00
headeredJSON , err := json . Marshal ( event )
if err != nil {
return err
}
2022-09-28 04:18:03 -05:00
_ , err = sqlutil . TxStmt ( txn , s . updateEventJSONStmt ) . ExecContext ( ctx , headeredJSON , event . EventID ( ) )
2020-08-21 04:42:08 -05:00
return err
2020-07-08 11:45:39 -05:00
}
2020-02-13 11:27:33 -06:00
// selectStateInRange returns the state events between the two given PDU stream positions, exclusive of oldPos, inclusive of newPos.
// Results are bucketed based on the room ID. If the same state is overwritten multiple times between the
// two positions, only the most recent state is returned.
2020-05-14 03:53:55 -05:00
func ( s * outputRoomEventsStatements ) SelectStateInRange (
2020-05-15 03:41:12 -05:00
ctx context . Context , txn * sql . Tx , r types . Range ,
2023-04-04 12:16:53 -05:00
stateFilter * synctypes . StateFilter , roomIDs [ ] string ,
2020-02-13 11:27:33 -06:00
) ( map [ string ] map [ string ] bool , map [ string ] types . StreamEvent , error ) {
2022-03-11 06:48:45 -06:00
stmtSQL := strings . Replace ( selectStateInRangeSQL , "($3)" , sqlutil . QueryVariadicOffset ( len ( roomIDs ) , 2 ) , 1 )
inputParams := [ ] interface { } {
r . Low ( ) , r . High ( ) ,
}
for _ , roomID := range roomIDs {
inputParams = append ( inputParams , roomID )
}
2022-10-19 07:05:39 -05:00
var (
stmt * sql . Stmt
params [ ] any
err error
2020-02-13 11:27:33 -06:00
)
2022-10-19 07:05:39 -05:00
if stateFilter != nil {
stmt , params , err = prepareWithFilters (
s . db , txn , stmtSQL , inputParams ,
stateFilter . Senders , stateFilter . NotSenders ,
stateFilter . Types , stateFilter . NotTypes ,
2022-11-02 04:34:19 -05:00
nil , stateFilter . ContainsURL , 0 , FilterOrderAsc ,
2022-10-19 07:05:39 -05:00
)
} else {
stmt , params , err = prepareWithFilters (
s . db , txn , stmtSQL , inputParams ,
nil , nil ,
nil , nil ,
nil , nil , int ( r . High ( ) - r . Low ( ) ) , FilterOrderAsc ,
)
}
2021-01-19 12:00:42 -06:00
if err != nil {
return nil , nil , fmt . Errorf ( "s.prepareWithFilters: %w" , err )
}
2022-10-19 07:05:39 -05:00
defer internal . CloseAndLogIfError ( ctx , stmt , "selectStateInRange: stmt.close() failed" )
2021-01-19 12:00:42 -06:00
rows , err := stmt . QueryContext ( ctx , params ... )
2020-02-13 11:27:33 -06:00
if err != nil {
return nil , nil , err
}
2022-10-19 07:05:39 -05:00
defer internal . CloseAndLogIfError ( ctx , rows , "selectStateInRange: rows.close() failed" )
2020-02-13 11:27:33 -06:00
// Fetch all the state change events for all rooms between the two positions then loop each event and:
// - Keep a cache of the event by ID (99% of state change events are for the event itself)
// - For each room ID, build up an array of event IDs which represents cumulative adds/removes
// For each room, map cumulative event IDs to events and return. This may need to a batch SELECT based on event ID
// if they aren't in the event ID cache. We don't handle state deletion yet.
eventIDToEvent := make ( map [ string ] types . StreamEvent )
// RoomID => A set (map[string]bool) of state event IDs which are between the two positions
stateNeeded := make ( map [ string ] map [ string ] bool )
for rows . Next ( ) {
var (
2022-07-18 07:46:15 -05:00
eventID string
streamPos types . StreamPosition
eventBytes [ ] byte
excludeFromSync bool
addIDsJSON string
delIDsJSON string
historyVisibility gomatrixserverlib . HistoryVisibility
2020-02-13 11:27:33 -06:00
)
2022-07-18 07:46:15 -05:00
if err := rows . Scan ( & eventID , & streamPos , & eventBytes , & excludeFromSync , & addIDsJSON , & delIDsJSON , & historyVisibility ) ; err != nil {
2020-02-13 11:27:33 -06:00
return nil , nil , err
}
Add peer-to-peer support into Dendrite via libp2p and fetch (#880)
* Use a fork of pq which supports userCurrent on wasm
* Use sqlite3_js driver when running in JS
* Add cmd/dendritejs to pull in sqlite3_js driver for wasm only
* Update to latest go-sqlite-js version
* Replace prometheus with a stub. sigh
* Hard-code a config and don't use opentracing
* Latest go-sqlite3-js version
* Generate a key for now
* Listen for fetch traffic rather than HTTP
* Latest hacks for js
* libp2p support
* More libp2p
* Fork gjson to allow us to enforce auth checks as before
Previously, all events would come down redacted because the hash
checks would fail. They would fail because sjson.DeleteBytes didn't
remove keys not used for hashing. This didn't work because of a build
tag which included a file which no-oped the index returned.
See https://github.com/tidwall/gjson/issues/157
When it's resolved, let's go back to mainline.
* Use gjson@1.6.0 as it fixes https://github.com/tidwall/gjson/issues/157
* Use latest gomatrixserverlib for sig checks
* Fix a bug which could cause exclude_from_sync to not be set
Caused when sending events over federation.
* Use query variadic to make lookups actually work!
* Latest gomatrixserverlib
* Add notes on getting p2p up and running
Partly so I don't forget myself!
* refactor: Move p2p specific stuff to cmd/dendritejs
This is important or else the normal build of dendrite will fail
because the p2p libraries depend on syscall/js which doesn't work
on normal builds.
Also, clean up main.go to read a bit better.
* Update ho-http-js-libp2p to return errors from RoundTrip
* Add an LRU cache around the key DB
We actually need this for P2P because otherwise we can *segfault*
with things like: "runtime: unexpected return pc for runtime.handleEvent"
where the event is a `syscall/js` event, caused by spamming sql.js
caused by "Checking event signatures for 14 events of room state" which
hammers the key DB repeatedly in quick succession.
Using a cache fixes this, though the underlying cause is probably a bug
in the version of Go I'm on (1.13.7)
* breaking: Add Tracing.Enabled to toggle whether we do opentracing
Defaults to false, which is why this is a breaking change. We need
this flag because WASM builds cannot do opentracing.
* Start adding conditional builds for wasm to handle lib/pq
The general idea here is to have the wasm build have a `NewXXXDatabase`
that doesn't import any postgres package and hence we never import
`lib/pq`, which doesn't work under WASM (undefined `userCurrent`).
* Remove lib/pq for wasm for syncapi
* Add conditional building to remaining storage APIs
* Update build script to set env vars correctly for dendritejs
* sqlite bug fixes
* Docs
* Add a no-op main for dendritejs when not building under wasm
* Use the real prometheus, even for WASM
Instead, the dendrite-sw.js must mock out `process.pid` and
`fs.stat` - which must invoke the callback with an error (e.g `EINVAL`)
in order for it to work:
```
global.process = {
pid: 1,
};
global.fs.stat = function(path, cb) {
cb({
code: "EINVAL",
});
}
```
* Linting
2020-03-06 04:23:55 -06:00
addIDs , delIDs , err := unmarshalStateIDs ( addIDsJSON , delIDsJSON )
if err != nil {
return nil , nil , err
}
2020-02-13 11:27:33 -06:00
// TODO: Handle redacted events
2023-04-27 06:54:20 -05:00
var ev rstypes . HeaderedEvent
if err := json . Unmarshal ( eventBytes , & ev ) ; err != nil {
2020-02-13 11:27:33 -06:00
return nil , nil , err
}
needSet := stateNeeded [ ev . RoomID ( ) ]
if needSet == nil { // make set if required
needSet = make ( map [ string ] bool )
}
for _ , id := range delIDs {
needSet [ id ] = false
}
for _ , id := range addIDs {
needSet [ id ] = true
}
stateNeeded [ ev . RoomID ( ) ] = needSet
2022-07-18 07:46:15 -05:00
ev . Visibility = historyVisibility
2020-02-13 11:27:33 -06:00
2021-11-03 04:53:37 -05:00
eventIDToEvent [ eventID ] = types . StreamEvent {
2020-11-16 09:44:53 -06:00
HeaderedEvent : & ev ,
2020-02-13 11:27:33 -06:00
StreamPosition : streamPos ,
ExcludeFromSync : excludeFromSync ,
}
}
return stateNeeded , eventIDToEvent , nil
}
// MaxID returns the ID of the last inserted event in this table. 'txn' is optional. If it is not supplied,
// then this function should only ever be used at startup, as it will race with inserting events if it is
// done afterwards. If there are no inserted events, 0 is returned.
2020-05-14 03:53:55 -05:00
func ( s * outputRoomEventsStatements ) SelectMaxEventID (
2020-02-13 11:27:33 -06:00
ctx context . Context , txn * sql . Tx ,
) ( id int64 , err error ) {
var nullableID sql . NullInt64
2020-06-12 08:55:57 -05:00
stmt := sqlutil . TxStmt ( txn , s . selectMaxEventIDStmt )
2022-10-19 07:05:39 -05:00
defer internal . CloseAndLogIfError ( ctx , stmt , "SelectMaxEventID: stmt.close() failed" )
2020-02-13 11:27:33 -06:00
err = stmt . QueryRowContext ( ctx ) . Scan ( & nullableID )
if nullableID . Valid {
id = nullableID . Int64
}
return
}
// InsertEvent into the output_room_events table. addState and removeState are an optional list of state event IDs. Returns the position
// of the inserted event.
2020-05-14 03:53:55 -05:00
func ( s * outputRoomEventsStatements ) InsertEvent (
2020-02-13 11:27:33 -06:00
ctx context . Context , txn * sql . Tx ,
2023-04-27 06:54:20 -05:00
event * rstypes . HeaderedEvent , addState , removeState [ ] string ,
2022-07-18 07:46:15 -05:00
transactionID * api . TransactionID , excludeFromSync bool , historyVisibility gomatrixserverlib . HistoryVisibility ,
2020-07-21 09:48:21 -05:00
) ( types . StreamPosition , error ) {
2020-02-13 11:27:33 -06:00
var txnID * string
var sessionID * int64
if transactionID != nil {
sessionID = & transactionID . SessionID
txnID = & transactionID . TransactionID
}
// Parse content as JSON and search for an "url" key
containsURL := false
var content map [ string ] interface { }
2022-04-13 06:16:02 -05:00
if json . Unmarshal ( event . Content ( ) , & content ) == nil {
2020-02-13 11:27:33 -06:00
// Set containsURL to true if url is present
_ , containsURL = content [ "url" ]
}
2020-03-19 07:07:01 -05:00
var headeredJSON [ ] byte
2020-07-21 09:48:21 -05:00
headeredJSON , err := json . Marshal ( event )
2020-02-13 11:27:33 -06:00
if err != nil {
2020-07-21 09:48:21 -05:00
return 0 , err
2020-02-13 11:27:33 -06:00
}
2021-01-19 12:00:42 -06:00
var addStateJSON , removeStateJSON [ ] byte
if len ( addState ) > 0 {
addStateJSON , err = json . Marshal ( addState )
}
Add peer-to-peer support into Dendrite via libp2p and fetch (#880)
* Use a fork of pq which supports userCurrent on wasm
* Use sqlite3_js driver when running in JS
* Add cmd/dendritejs to pull in sqlite3_js driver for wasm only
* Update to latest go-sqlite-js version
* Replace prometheus with a stub. sigh
* Hard-code a config and don't use opentracing
* Latest go-sqlite3-js version
* Generate a key for now
* Listen for fetch traffic rather than HTTP
* Latest hacks for js
* libp2p support
* More libp2p
* Fork gjson to allow us to enforce auth checks as before
Previously, all events would come down redacted because the hash
checks would fail. They would fail because sjson.DeleteBytes didn't
remove keys not used for hashing. This didn't work because of a build
tag which included a file which no-oped the index returned.
See https://github.com/tidwall/gjson/issues/157
When it's resolved, let's go back to mainline.
* Use gjson@1.6.0 as it fixes https://github.com/tidwall/gjson/issues/157
* Use latest gomatrixserverlib for sig checks
* Fix a bug which could cause exclude_from_sync to not be set
Caused when sending events over federation.
* Use query variadic to make lookups actually work!
* Latest gomatrixserverlib
* Add notes on getting p2p up and running
Partly so I don't forget myself!
* refactor: Move p2p specific stuff to cmd/dendritejs
This is important or else the normal build of dendrite will fail
because the p2p libraries depend on syscall/js which doesn't work
on normal builds.
Also, clean up main.go to read a bit better.
* Update ho-http-js-libp2p to return errors from RoundTrip
* Add an LRU cache around the key DB
We actually need this for P2P because otherwise we can *segfault*
with things like: "runtime: unexpected return pc for runtime.handleEvent"
where the event is a `syscall/js` event, caused by spamming sql.js
caused by "Checking event signatures for 14 events of room state" which
hammers the key DB repeatedly in quick succession.
Using a cache fixes this, though the underlying cause is probably a bug
in the version of Go I'm on (1.13.7)
* breaking: Add Tracing.Enabled to toggle whether we do opentracing
Defaults to false, which is why this is a breaking change. We need
this flag because WASM builds cannot do opentracing.
* Start adding conditional builds for wasm to handle lib/pq
The general idea here is to have the wasm build have a `NewXXXDatabase`
that doesn't import any postgres package and hence we never import
`lib/pq`, which doesn't work under WASM (undefined `userCurrent`).
* Remove lib/pq for wasm for syncapi
* Add conditional building to remaining storage APIs
* Update build script to set env vars correctly for dendritejs
* sqlite bug fixes
* Docs
* Add a no-op main for dendritejs when not building under wasm
* Use the real prometheus, even for WASM
Instead, the dendrite-sw.js must mock out `process.pid` and
`fs.stat` - which must invoke the callback with an error (e.g `EINVAL`)
in order for it to work:
```
global.process = {
pid: 1,
};
global.fs.stat = function(path, cb) {
cb({
code: "EINVAL",
});
}
```
* Linting
2020-03-06 04:23:55 -06:00
if err != nil {
2021-01-19 12:00:42 -06:00
return 0 , fmt . Errorf ( "json.Marshal(addState): %w" , err )
}
if len ( removeState ) > 0 {
removeStateJSON , err = json . Marshal ( removeState )
Add peer-to-peer support into Dendrite via libp2p and fetch (#880)
* Use a fork of pq which supports userCurrent on wasm
* Use sqlite3_js driver when running in JS
* Add cmd/dendritejs to pull in sqlite3_js driver for wasm only
* Update to latest go-sqlite-js version
* Replace prometheus with a stub. sigh
* Hard-code a config and don't use opentracing
* Latest go-sqlite3-js version
* Generate a key for now
* Listen for fetch traffic rather than HTTP
* Latest hacks for js
* libp2p support
* More libp2p
* Fork gjson to allow us to enforce auth checks as before
Previously, all events would come down redacted because the hash
checks would fail. They would fail because sjson.DeleteBytes didn't
remove keys not used for hashing. This didn't work because of a build
tag which included a file which no-oped the index returned.
See https://github.com/tidwall/gjson/issues/157
When it's resolved, let's go back to mainline.
* Use gjson@1.6.0 as it fixes https://github.com/tidwall/gjson/issues/157
* Use latest gomatrixserverlib for sig checks
* Fix a bug which could cause exclude_from_sync to not be set
Caused when sending events over federation.
* Use query variadic to make lookups actually work!
* Latest gomatrixserverlib
* Add notes on getting p2p up and running
Partly so I don't forget myself!
* refactor: Move p2p specific stuff to cmd/dendritejs
This is important or else the normal build of dendrite will fail
because the p2p libraries depend on syscall/js which doesn't work
on normal builds.
Also, clean up main.go to read a bit better.
* Update ho-http-js-libp2p to return errors from RoundTrip
* Add an LRU cache around the key DB
We actually need this for P2P because otherwise we can *segfault*
with things like: "runtime: unexpected return pc for runtime.handleEvent"
where the event is a `syscall/js` event, caused by spamming sql.js
caused by "Checking event signatures for 14 events of room state" which
hammers the key DB repeatedly in quick succession.
Using a cache fixes this, though the underlying cause is probably a bug
in the version of Go I'm on (1.13.7)
* breaking: Add Tracing.Enabled to toggle whether we do opentracing
Defaults to false, which is why this is a breaking change. We need
this flag because WASM builds cannot do opentracing.
* Start adding conditional builds for wasm to handle lib/pq
The general idea here is to have the wasm build have a `NewXXXDatabase`
that doesn't import any postgres package and hence we never import
`lib/pq`, which doesn't work under WASM (undefined `userCurrent`).
* Remove lib/pq for wasm for syncapi
* Add conditional building to remaining storage APIs
* Update build script to set env vars correctly for dendritejs
* sqlite bug fixes
* Docs
* Add a no-op main for dendritejs when not building under wasm
* Use the real prometheus, even for WASM
Instead, the dendrite-sw.js must mock out `process.pid` and
`fs.stat` - which must invoke the callback with an error (e.g `EINVAL`)
in order for it to work:
```
global.process = {
pid: 1,
};
global.fs.stat = function(path, cb) {
cb({
code: "EINVAL",
});
}
```
* Linting
2020-03-06 04:23:55 -06:00
}
if err != nil {
2021-01-19 12:00:42 -06:00
return 0 , fmt . Errorf ( "json.Marshal(removeState): %w" , err )
Add peer-to-peer support into Dendrite via libp2p and fetch (#880)
* Use a fork of pq which supports userCurrent on wasm
* Use sqlite3_js driver when running in JS
* Add cmd/dendritejs to pull in sqlite3_js driver for wasm only
* Update to latest go-sqlite-js version
* Replace prometheus with a stub. sigh
* Hard-code a config and don't use opentracing
* Latest go-sqlite3-js version
* Generate a key for now
* Listen for fetch traffic rather than HTTP
* Latest hacks for js
* libp2p support
* More libp2p
* Fork gjson to allow us to enforce auth checks as before
Previously, all events would come down redacted because the hash
checks would fail. They would fail because sjson.DeleteBytes didn't
remove keys not used for hashing. This didn't work because of a build
tag which included a file which no-oped the index returned.
See https://github.com/tidwall/gjson/issues/157
When it's resolved, let's go back to mainline.
* Use gjson@1.6.0 as it fixes https://github.com/tidwall/gjson/issues/157
* Use latest gomatrixserverlib for sig checks
* Fix a bug which could cause exclude_from_sync to not be set
Caused when sending events over federation.
* Use query variadic to make lookups actually work!
* Latest gomatrixserverlib
* Add notes on getting p2p up and running
Partly so I don't forget myself!
* refactor: Move p2p specific stuff to cmd/dendritejs
This is important or else the normal build of dendrite will fail
because the p2p libraries depend on syscall/js which doesn't work
on normal builds.
Also, clean up main.go to read a bit better.
* Update ho-http-js-libp2p to return errors from RoundTrip
* Add an LRU cache around the key DB
We actually need this for P2P because otherwise we can *segfault*
with things like: "runtime: unexpected return pc for runtime.handleEvent"
where the event is a `syscall/js` event, caused by spamming sql.js
caused by "Checking event signatures for 14 events of room state" which
hammers the key DB repeatedly in quick succession.
Using a cache fixes this, though the underlying cause is probably a bug
in the version of Go I'm on (1.13.7)
* breaking: Add Tracing.Enabled to toggle whether we do opentracing
Defaults to false, which is why this is a breaking change. We need
this flag because WASM builds cannot do opentracing.
* Start adding conditional builds for wasm to handle lib/pq
The general idea here is to have the wasm build have a `NewXXXDatabase`
that doesn't import any postgres package and hence we never import
`lib/pq`, which doesn't work under WASM (undefined `userCurrent`).
* Remove lib/pq for wasm for syncapi
* Add conditional building to remaining storage APIs
* Update build script to set env vars correctly for dendritejs
* sqlite bug fixes
* Docs
* Add a no-op main for dendritejs when not building under wasm
* Use the real prometheus, even for WASM
Instead, the dendrite-sw.js must mock out `process.pid` and
`fs.stat` - which must invoke the callback with an error (e.g `EINVAL`)
in order for it to work:
```
global.process = {
pid: 1,
};
global.fs.stat = function(path, cb) {
cb({
code: "EINVAL",
});
}
```
* Linting
2020-03-06 04:23:55 -06:00
}
2021-01-19 12:00:42 -06:00
streamPos , err := s . streamIDStatements . nextPDUID ( ctx , txn )
2020-08-21 04:42:08 -05:00
if err != nil {
return 0 , err
}
insertStmt := sqlutil . TxStmt ( txn , s . insertEventStmt )
2022-10-19 07:05:39 -05:00
defer internal . CloseAndLogIfError ( ctx , insertStmt , "InsertEvent: stmt.close() failed" )
2020-08-21 04:42:08 -05:00
_ , err = insertStmt . ExecContext (
ctx ,
streamPos ,
event . RoomID ( ) ,
event . EventID ( ) ,
headeredJSON ,
event . Type ( ) ,
2023-06-06 15:55:18 -05:00
event . SenderID ( ) ,
2020-08-21 04:42:08 -05:00
containsURL ,
string ( addStateJSON ) ,
string ( removeStateJSON ) ,
sessionID ,
txnID ,
excludeFromSync ,
2022-07-18 07:46:15 -05:00
historyVisibility ,
2020-08-21 04:42:08 -05:00
excludeFromSync ,
)
2020-07-21 09:48:21 -05:00
return streamPos , err
2020-02-13 11:27:33 -06:00
}
2020-05-14 03:53:55 -05:00
func ( s * outputRoomEventsStatements ) SelectRecentEvents (
2020-02-13 11:27:33 -06:00
ctx context . Context , txn * sql . Tx ,
2023-04-04 12:16:53 -05:00
roomIDs [ ] string , r types . Range , eventFilter * synctypes . RoomEventFilter ,
2020-02-13 11:27:33 -06:00
chronologicalOrder bool , onlySyncEvents bool ,
2023-02-07 07:31:23 -06:00
) ( map [ string ] types . RecentEvents , error ) {
2021-01-19 12:00:42 -06:00
var query string
2020-02-13 11:27:33 -06:00
if onlySyncEvents {
2021-01-19 12:00:42 -06:00
query = selectRecentEventsForSyncSQL
2020-02-13 11:27:33 -06:00
} else {
2021-01-19 12:00:42 -06:00
query = selectRecentEventsSQL
2020-02-13 11:27:33 -06:00
}
2023-02-07 07:31:23 -06:00
result := make ( map [ string ] types . RecentEvents , len ( roomIDs ) )
for _ , roomID := range roomIDs {
stmt , params , err := prepareWithFilters (
s . db , txn , query ,
[ ] interface { } {
roomID , r . Low ( ) , r . High ( ) ,
} ,
eventFilter . Senders , eventFilter . NotSenders ,
eventFilter . Types , eventFilter . NotTypes ,
nil , eventFilter . ContainsURL , eventFilter . Limit + 1 , FilterOrderDesc ,
)
if err != nil {
return nil , fmt . Errorf ( "s.prepareWithFilters: %w" , err )
}
defer internal . CloseAndLogIfError ( ctx , stmt , "selectRecentEvents: stmt.close() failed" )
2021-01-19 12:00:42 -06:00
2023-02-07 07:31:23 -06:00
rows , err := stmt . QueryContext ( ctx , params ... )
if err != nil {
return nil , err
}
defer internal . CloseAndLogIfError ( ctx , rows , "selectRecentEvents: rows.close() failed" )
events , err := rowsToStreamEvents ( rows )
if err != nil {
return nil , err
}
2020-06-26 09:34:41 -05:00
if chronologicalOrder {
2023-02-07 07:31:23 -06:00
// The events need to be returned from oldest to latest, which isn't
// necessary the way the SQL query returns them, so a sort is necessary to
// ensure the events are in the right order in the slice.
sort . SliceStable ( events , func ( i int , j int ) bool {
return events [ i ] . StreamPosition < events [ j ] . StreamPosition
} )
}
res := types . RecentEvents { }
// we queried for 1 more than the limit, so if we returned one more mark limited=true
if len ( events ) > eventFilter . Limit {
res . Limited = true
// re-slice the extra (oldest) event out: in chronological order this is the first entry, else the last.
if chronologicalOrder {
events = events [ 1 : ]
} else {
events = events [ : len ( events ) - 1 ]
}
2020-06-26 09:34:41 -05:00
}
2023-02-07 07:31:23 -06:00
res . Events = events
result [ roomID ] = res
2020-06-26 09:34:41 -05:00
}
2023-02-07 07:31:23 -06:00
return result , nil
2020-02-13 11:27:33 -06:00
}
// selectEvents returns the events for the given event IDs. If an event is
// missing from the database, it will be omitted.
2020-05-14 03:53:55 -05:00
func ( s * outputRoomEventsStatements ) SelectEvents (
2023-04-04 12:16:53 -05:00
ctx context . Context , txn * sql . Tx , eventIDs [ ] string , filter * synctypes . RoomEventFilter , preserveOrder bool ,
2020-02-13 11:27:33 -06:00
) ( [ ] types . StreamEvent , error ) {
2022-04-08 11:53:24 -05:00
iEventIDs := make ( [ ] interface { } , len ( eventIDs ) )
for i := range eventIDs {
iEventIDs [ i ] = eventIDs [ i ]
}
selectSQL := strings . Replace ( selectEventsSQL , "($1)" , sqlutil . QueryVariadic ( len ( eventIDs ) ) , 1 )
2022-04-13 06:16:02 -05:00
if filter == nil {
2023-04-04 12:16:53 -05:00
filter = & synctypes . RoomEventFilter { Limit : 20 }
2022-04-13 06:16:02 -05:00
}
stmt , params , err := prepareWithFilters (
s . db , txn , selectSQL , iEventIDs ,
filter . Senders , filter . NotSenders ,
filter . Types , filter . NotTypes ,
nil , filter . ContainsURL , filter . Limit , FilterOrderAsc ,
)
if err != nil {
return nil , err
2022-04-08 11:53:24 -05:00
}
2022-10-19 07:05:39 -05:00
defer internal . CloseAndLogIfError ( ctx , stmt , "SelectEvents: stmt.close() failed" )
2022-04-13 06:16:02 -05:00
rows , err := stmt . QueryContext ( ctx , params ... )
2022-04-08 11:53:24 -05:00
if err != nil {
return nil , err
}
defer internal . CloseAndLogIfError ( ctx , rows , "selectEvents: rows.close() failed" )
streamEvents , err := rowsToStreamEvents ( rows )
if err != nil {
return nil , err
}
if preserveOrder {
var returnEvents [ ] types . StreamEvent
eventMap := make ( map [ string ] types . StreamEvent )
for _ , ev := range streamEvents {
eventMap [ ev . EventID ( ) ] = ev
2020-02-13 11:27:33 -06:00
}
2022-04-08 11:53:24 -05:00
for _ , eventID := range eventIDs {
ev , ok := eventMap [ eventID ]
if ok {
returnEvents = append ( returnEvents , ev )
}
2020-02-13 11:27:33 -06:00
}
2022-04-08 11:53:24 -05:00
return returnEvents , nil
2020-02-13 11:27:33 -06:00
}
2022-04-08 11:53:24 -05:00
return streamEvents , nil
2020-02-13 11:27:33 -06:00
}
2020-09-15 05:17:46 -05:00
func ( s * outputRoomEventsStatements ) DeleteEventsForRoom (
ctx context . Context , txn * sql . Tx , roomID string ,
) ( err error ) {
_ , err = sqlutil . TxStmt ( txn , s . deleteEventsForRoomStmt ) . ExecContext ( ctx , roomID )
return err
}
2020-02-13 11:27:33 -06:00
func rowsToStreamEvents ( rows * sql . Rows ) ( [ ] types . StreamEvent , error ) {
var result [ ] types . StreamEvent
for rows . Next ( ) {
var (
2022-07-18 07:46:15 -05:00
eventID string
streamPos types . StreamPosition
eventBytes [ ] byte
excludeFromSync bool
sessionID * int64
txnID * string
transactionID * api . TransactionID
historyVisibility gomatrixserverlib . HistoryVisibility
2020-02-13 11:27:33 -06:00
)
2022-07-18 07:46:15 -05:00
if err := rows . Scan ( & eventID , & streamPos , & eventBytes , & sessionID , & excludeFromSync , & txnID , & historyVisibility ) ; err != nil {
2020-02-13 11:27:33 -06:00
return nil , err
}
// TODO: Handle redacted events
2023-04-27 06:54:20 -05:00
var ev rstypes . HeaderedEvent
if err := json . Unmarshal ( eventBytes , & ev ) ; err != nil {
2020-02-13 11:27:33 -06:00
return nil , err
}
if sessionID != nil && txnID != nil {
transactionID = & api . TransactionID {
SessionID : * sessionID ,
TransactionID : * txnID ,
}
}
2022-07-18 07:46:15 -05:00
ev . Visibility = historyVisibility
2020-02-13 11:27:33 -06:00
result = append ( result , types . StreamEvent {
2020-11-16 09:44:53 -06:00
HeaderedEvent : & ev ,
2020-02-13 11:27:33 -06:00
StreamPosition : streamPos ,
TransactionID : transactionID ,
ExcludeFromSync : excludeFromSync ,
} )
}
return result , nil
}
2022-02-21 10:12:22 -06:00
func ( s * outputRoomEventsStatements ) SelectContextEvent (
ctx context . Context , txn * sql . Tx , roomID , eventID string ,
2023-04-27 06:54:20 -05:00
) ( id int , evt rstypes . HeaderedEvent , err error ) {
2022-02-21 10:12:22 -06:00
row := sqlutil . TxStmt ( txn , s . selectContextEventStmt ) . QueryRowContext ( ctx , roomID , eventID )
var eventAsString string
2022-07-18 07:46:15 -05:00
var historyVisibility gomatrixserverlib . HistoryVisibility
if err = row . Scan ( & id , & eventAsString , & historyVisibility ) ; err != nil {
2022-02-21 10:12:22 -06:00
return 0 , evt , err
}
if err = json . Unmarshal ( [ ] byte ( eventAsString ) , & evt ) ; err != nil {
return 0 , evt , err
}
2022-07-18 07:46:15 -05:00
evt . Visibility = historyVisibility
2022-02-21 10:12:22 -06:00
return id , evt , nil
}
func ( s * outputRoomEventsStatements ) SelectContextBeforeEvent (
2023-04-04 12:16:53 -05:00
ctx context . Context , txn * sql . Tx , id int , roomID string , filter * synctypes . RoomEventFilter ,
2023-04-27 06:54:20 -05:00
) ( evts [ ] * rstypes . HeaderedEvent , err error ) {
2022-02-21 10:12:22 -06:00
stmt , params , err := prepareWithFilters (
s . db , txn , selectContextBeforeEventSQL ,
[ ] interface { } {
roomID , id ,
} ,
filter . Senders , filter . NotSenders ,
filter . Types , filter . NotTypes ,
2022-04-13 06:16:02 -05:00
nil , filter . ContainsURL , filter . Limit , FilterOrderDesc ,
2022-02-21 10:12:22 -06:00
)
2022-10-19 07:05:39 -05:00
if err != nil {
return
}
defer internal . CloseAndLogIfError ( ctx , stmt , "SelectContextBeforeEvent: stmt.close() failed" )
2022-02-21 10:12:22 -06:00
rows , err := stmt . QueryContext ( ctx , params ... )
if err != nil {
return
}
2022-03-24 05:03:22 -05:00
defer internal . CloseAndLogIfError ( ctx , rows , "rows.close() failed" )
2022-02-21 10:12:22 -06:00
for rows . Next ( ) {
var (
2022-07-18 07:46:15 -05:00
eventBytes [ ] byte
2023-04-27 06:54:20 -05:00
evt * rstypes . HeaderedEvent
2022-07-18 07:46:15 -05:00
historyVisibility gomatrixserverlib . HistoryVisibility
2022-02-21 10:12:22 -06:00
)
2022-07-18 07:46:15 -05:00
if err = rows . Scan ( & eventBytes , & historyVisibility ) ; err != nil {
2022-02-21 10:12:22 -06:00
return evts , err
}
if err = json . Unmarshal ( eventBytes , & evt ) ; err != nil {
return evts , err
}
2022-07-18 07:46:15 -05:00
evt . Visibility = historyVisibility
2022-02-21 10:12:22 -06:00
evts = append ( evts , evt )
}
return evts , rows . Err ( )
}
func ( s * outputRoomEventsStatements ) SelectContextAfterEvent (
2023-04-04 12:16:53 -05:00
ctx context . Context , txn * sql . Tx , id int , roomID string , filter * synctypes . RoomEventFilter ,
2023-04-27 06:54:20 -05:00
) ( lastID int , evts [ ] * rstypes . HeaderedEvent , err error ) {
2022-02-21 10:12:22 -06:00
stmt , params , err := prepareWithFilters (
s . db , txn , selectContextAfterEventSQL ,
[ ] interface { } {
roomID , id ,
} ,
filter . Senders , filter . NotSenders ,
filter . Types , filter . NotTypes ,
2022-04-13 06:16:02 -05:00
nil , filter . ContainsURL , filter . Limit , FilterOrderAsc ,
2022-02-21 10:12:22 -06:00
)
2022-10-19 07:05:39 -05:00
if err != nil {
return
}
defer internal . CloseAndLogIfError ( ctx , stmt , "SelectContextAfterEvent: stmt.close() failed" )
2022-02-21 10:12:22 -06:00
rows , err := stmt . QueryContext ( ctx , params ... )
if err != nil {
return
}
2022-03-24 05:03:22 -05:00
defer internal . CloseAndLogIfError ( ctx , rows , "rows.close() failed" )
2022-02-21 10:12:22 -06:00
for rows . Next ( ) {
var (
2022-07-18 07:46:15 -05:00
eventBytes [ ] byte
2023-04-27 06:54:20 -05:00
evt * rstypes . HeaderedEvent
2022-07-18 07:46:15 -05:00
historyVisibility gomatrixserverlib . HistoryVisibility
2022-02-21 10:12:22 -06:00
)
2022-07-18 07:46:15 -05:00
if err = rows . Scan ( & lastID , & eventBytes , & historyVisibility ) ; err != nil {
2022-02-21 10:12:22 -06:00
return 0 , evts , err
}
if err = json . Unmarshal ( eventBytes , & evt ) ; err != nil {
return 0 , evts , err
}
2022-07-18 07:46:15 -05:00
evt . Visibility = historyVisibility
2022-02-21 10:12:22 -06:00
evts = append ( evts , evt )
}
return lastID , evts , rows . Err ( )
}
Add peer-to-peer support into Dendrite via libp2p and fetch (#880)
* Use a fork of pq which supports userCurrent on wasm
* Use sqlite3_js driver when running in JS
* Add cmd/dendritejs to pull in sqlite3_js driver for wasm only
* Update to latest go-sqlite-js version
* Replace prometheus with a stub. sigh
* Hard-code a config and don't use opentracing
* Latest go-sqlite3-js version
* Generate a key for now
* Listen for fetch traffic rather than HTTP
* Latest hacks for js
* libp2p support
* More libp2p
* Fork gjson to allow us to enforce auth checks as before
Previously, all events would come down redacted because the hash
checks would fail. They would fail because sjson.DeleteBytes didn't
remove keys not used for hashing. This didn't work because of a build
tag which included a file which no-oped the index returned.
See https://github.com/tidwall/gjson/issues/157
When it's resolved, let's go back to mainline.
* Use gjson@1.6.0 as it fixes https://github.com/tidwall/gjson/issues/157
* Use latest gomatrixserverlib for sig checks
* Fix a bug which could cause exclude_from_sync to not be set
Caused when sending events over federation.
* Use query variadic to make lookups actually work!
* Latest gomatrixserverlib
* Add notes on getting p2p up and running
Partly so I don't forget myself!
* refactor: Move p2p specific stuff to cmd/dendritejs
This is important or else the normal build of dendrite will fail
because the p2p libraries depend on syscall/js which doesn't work
on normal builds.
Also, clean up main.go to read a bit better.
* Update ho-http-js-libp2p to return errors from RoundTrip
* Add an LRU cache around the key DB
We actually need this for P2P because otherwise we can *segfault*
with things like: "runtime: unexpected return pc for runtime.handleEvent"
where the event is a `syscall/js` event, caused by spamming sql.js
caused by "Checking event signatures for 14 events of room state" which
hammers the key DB repeatedly in quick succession.
Using a cache fixes this, though the underlying cause is probably a bug
in the version of Go I'm on (1.13.7)
* breaking: Add Tracing.Enabled to toggle whether we do opentracing
Defaults to false, which is why this is a breaking change. We need
this flag because WASM builds cannot do opentracing.
* Start adding conditional builds for wasm to handle lib/pq
The general idea here is to have the wasm build have a `NewXXXDatabase`
that doesn't import any postgres package and hence we never import
`lib/pq`, which doesn't work under WASM (undefined `userCurrent`).
* Remove lib/pq for wasm for syncapi
* Add conditional building to remaining storage APIs
* Update build script to set env vars correctly for dendritejs
* sqlite bug fixes
* Docs
* Add a no-op main for dendritejs when not building under wasm
* Use the real prometheus, even for WASM
Instead, the dendrite-sw.js must mock out `process.pid` and
`fs.stat` - which must invoke the callback with an error (e.g `EINVAL`)
in order for it to work:
```
global.process = {
pid: 1,
};
global.fs.stat = function(path, cb) {
cb({
code: "EINVAL",
});
}
```
* Linting
2020-03-06 04:23:55 -06:00
func unmarshalStateIDs ( addIDsJSON , delIDsJSON string ) ( addIDs [ ] string , delIDs [ ] string , err error ) {
if len ( addIDsJSON ) > 0 {
if err = json . Unmarshal ( [ ] byte ( addIDsJSON ) , & addIDs ) ; err != nil {
return
}
}
if len ( delIDsJSON ) > 0 {
if err = json . Unmarshal ( [ ] byte ( delIDsJSON ) , & delIDs ) ; err != nil {
return
}
}
return
}
2022-09-27 11:06:49 -05:00
2023-01-19 14:02:32 -06:00
func ( s * outputRoomEventsStatements ) PurgeEvents (
ctx context . Context , txn * sql . Tx , roomID string ,
) error {
_ , err := sqlutil . TxStmt ( txn , s . purgeEventsStmt ) . ExecContext ( ctx , roomID )
return err
}
2023-04-27 06:54:20 -05:00
func ( s * outputRoomEventsStatements ) ReIndex ( ctx context . Context , txn * sql . Tx , limit , afterID int64 , types [ ] string ) ( map [ int64 ] rstypes . HeaderedEvent , error ) {
2023-04-17 04:25:33 -05:00
params := make ( [ ] interface { } , len ( types ) + 1 )
params [ 0 ] = afterID
2022-09-27 11:06:49 -05:00
for i := range types {
2023-04-17 04:25:33 -05:00
params [ i + 1 ] = types [ i ]
2022-09-27 11:06:49 -05:00
}
2023-04-17 04:25:33 -05:00
selectSQL := strings . Replace ( selectSearchSQL , "($2)" , sqlutil . QueryVariadicOffset ( len ( types ) , 1 ) , 1 )
stmt , params , err := prepareWithFilters ( s . db , txn , selectSQL , params , nil , nil , nil , nil , nil , nil , int ( limit ) , FilterOrderAsc )
2022-09-27 11:06:49 -05:00
if err != nil {
return nil , err
}
2023-04-17 04:25:33 -05:00
2022-09-27 11:06:49 -05:00
defer internal . CloseAndLogIfError ( ctx , stmt , "selectEvents: stmt.close() failed" )
2022-09-28 04:18:03 -05:00
rows , err := sqlutil . TxStmt ( txn , stmt ) . QueryContext ( ctx , params ... )
2022-09-27 11:06:49 -05:00
if err != nil {
return nil , err
}
defer internal . CloseAndLogIfError ( ctx , rows , "rows.close() failed" )
var eventID string
var id int64
2023-04-27 06:54:20 -05:00
result := make ( map [ int64 ] rstypes . HeaderedEvent )
2022-09-27 11:06:49 -05:00
for rows . Next ( ) {
2023-04-27 06:54:20 -05:00
var ev rstypes . HeaderedEvent
2022-09-27 11:06:49 -05:00
var eventBytes [ ] byte
if err = rows . Scan ( & id , & eventID , & eventBytes ) ; err != nil {
return nil , err
}
2023-04-27 06:54:20 -05:00
if err = json . Unmarshal ( eventBytes , & ev ) ; err != nil {
2022-09-27 11:06:49 -05:00
return nil , err
}
result [ id ] = ev
}
return result , rows . Err ( )
}