mirror of
https://github.com/matrix-org/dendrite.git
synced 2026-01-16 18:43:10 -06:00
Merge branch 'main' into neilalexander/purgeroom
This commit is contained in:
commit
2ca3774841
17
CHANGES.md
17
CHANGES.md
|
|
@ -1,5 +1,22 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## Dendrite 0.10.3 (2022-10-14)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* Event relations are now tracked and support for the `/room/{roomID}/relations/...` client API endpoints have been added
|
||||||
|
* Support has been added for private read receipts
|
||||||
|
* The built-in NATS Server has been updated to version 2.9.3
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* The `unread_notifications` are now always populated in joined room responses
|
||||||
|
* The `/get_missing_events` federation API endpoint should now work correctly for rooms with `joined` and `invited` visibility settings, returning redacted events for events that other servers are not allowed to see
|
||||||
|
* The `/event` client API endpoint now applies history visibility correctly
|
||||||
|
* Read markers should now be updated much more reliably
|
||||||
|
* A rare bug in the sync API which could cause some `join` memberships to be incorrectly overwritten by other memberships when working out which rooms to populate has been fixed
|
||||||
|
* The federation API now correctly updates the joined hosts table during a state rewrite
|
||||||
|
|
||||||
## Dendrite 0.10.2 (2022-10-07)
|
## Dendrite 0.10.2 (2022-10-07)
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,8 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
appserviceAPI "github.com/matrix-org/dendrite/appservice/api"
|
appserviceAPI "github.com/matrix-org/dendrite/appservice/api"
|
||||||
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
||||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||||
|
|
@ -27,7 +29,6 @@ import (
|
||||||
"github.com/matrix-org/dendrite/roomserver/api"
|
"github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
|
|
||||||
"github.com/matrix-org/gomatrix"
|
"github.com/matrix-org/gomatrix"
|
||||||
"github.com/matrix-org/util"
|
"github.com/matrix-org/util"
|
||||||
|
|
@ -126,20 +127,6 @@ func SetAvatarURL(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
res := &userapi.QueryProfileResponse{}
|
|
||||||
err = profileAPI.QueryProfile(req.Context(), &userapi.QueryProfileRequest{
|
|
||||||
UserID: userID,
|
|
||||||
}, res)
|
|
||||||
if err != nil {
|
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.QueryProfile failed")
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
|
||||||
oldProfile := &authtypes.Profile{
|
|
||||||
Localpart: localpart,
|
|
||||||
DisplayName: res.DisplayName,
|
|
||||||
AvatarURL: res.AvatarURL,
|
|
||||||
}
|
|
||||||
|
|
||||||
setRes := &userapi.PerformSetAvatarURLResponse{}
|
setRes := &userapi.PerformSetAvatarURLResponse{}
|
||||||
if err = profileAPI.SetAvatarURL(req.Context(), &userapi.PerformSetAvatarURLRequest{
|
if err = profileAPI.SetAvatarURL(req.Context(), &userapi.PerformSetAvatarURLRequest{
|
||||||
Localpart: localpart,
|
Localpart: localpart,
|
||||||
|
|
@ -148,41 +135,17 @@ func SetAvatarURL(
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.SetAvatarURL failed")
|
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.SetAvatarURL failed")
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError()
|
||||||
}
|
}
|
||||||
|
// No need to build new membership events, since nothing changed
|
||||||
var roomsRes api.QueryRoomsForUserResponse
|
if !setRes.Changed {
|
||||||
err = rsAPI.QueryRoomsForUser(req.Context(), &api.QueryRoomsForUserRequest{
|
|
||||||
UserID: device.UserID,
|
|
||||||
WantMembership: "join",
|
|
||||||
}, &roomsRes)
|
|
||||||
if err != nil {
|
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("QueryRoomsForUser failed")
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
|
||||||
|
|
||||||
newProfile := authtypes.Profile{
|
|
||||||
Localpart: localpart,
|
|
||||||
DisplayName: oldProfile.DisplayName,
|
|
||||||
AvatarURL: r.AvatarURL,
|
|
||||||
}
|
|
||||||
|
|
||||||
events, err := buildMembershipEvents(
|
|
||||||
req.Context(), roomsRes.RoomIDs, newProfile, userID, cfg, evTime, rsAPI,
|
|
||||||
)
|
|
||||||
switch e := err.(type) {
|
|
||||||
case nil:
|
|
||||||
case gomatrixserverlib.BadJSONError:
|
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusOK,
|
||||||
JSON: jsonerror.BadJSON(e.Error()),
|
JSON: struct{}{},
|
||||||
}
|
}
|
||||||
default:
|
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("buildMembershipEvents failed")
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, cfg.Matrix.ServerName, nil, true); err != nil {
|
response, err := updateProfile(req.Context(), rsAPI, device, setRes.Profile, userID, cfg, evTime)
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed")
|
if err != nil {
|
||||||
return jsonerror.InternalServerError()
|
return response
|
||||||
}
|
}
|
||||||
|
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
|
|
@ -255,47 +218,51 @@ func SetDisplayName(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pRes := &userapi.QueryProfileResponse{}
|
profileRes := &userapi.PerformUpdateDisplayNameResponse{}
|
||||||
err = profileAPI.QueryProfile(req.Context(), &userapi.QueryProfileRequest{
|
|
||||||
UserID: userID,
|
|
||||||
}, pRes)
|
|
||||||
if err != nil {
|
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.QueryProfile failed")
|
|
||||||
return jsonerror.InternalServerError()
|
|
||||||
}
|
|
||||||
oldProfile := &authtypes.Profile{
|
|
||||||
Localpart: localpart,
|
|
||||||
DisplayName: pRes.DisplayName,
|
|
||||||
AvatarURL: pRes.AvatarURL,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = profileAPI.SetDisplayName(req.Context(), &userapi.PerformUpdateDisplayNameRequest{
|
err = profileAPI.SetDisplayName(req.Context(), &userapi.PerformUpdateDisplayNameRequest{
|
||||||
Localpart: localpart,
|
Localpart: localpart,
|
||||||
DisplayName: r.DisplayName,
|
DisplayName: r.DisplayName,
|
||||||
}, &struct{}{})
|
}, profileRes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.SetDisplayName failed")
|
util.GetLogger(req.Context()).WithError(err).Error("profileAPI.SetDisplayName failed")
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError()
|
||||||
}
|
}
|
||||||
|
// No need to build new membership events, since nothing changed
|
||||||
|
if !profileRes.Changed {
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusOK,
|
||||||
|
JSON: struct{}{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := updateProfile(req.Context(), rsAPI, device, profileRes.Profile, userID, cfg, evTime)
|
||||||
|
if err != nil {
|
||||||
|
return response
|
||||||
|
}
|
||||||
|
|
||||||
|
return util.JSONResponse{
|
||||||
|
Code: http.StatusOK,
|
||||||
|
JSON: struct{}{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateProfile(
|
||||||
|
ctx context.Context, rsAPI api.ClientRoomserverAPI, device *userapi.Device,
|
||||||
|
profile *authtypes.Profile,
|
||||||
|
userID string, cfg *config.ClientAPI, evTime time.Time,
|
||||||
|
) (util.JSONResponse, error) {
|
||||||
var res api.QueryRoomsForUserResponse
|
var res api.QueryRoomsForUserResponse
|
||||||
err = rsAPI.QueryRoomsForUser(req.Context(), &api.QueryRoomsForUserRequest{
|
err := rsAPI.QueryRoomsForUser(ctx, &api.QueryRoomsForUserRequest{
|
||||||
UserID: device.UserID,
|
UserID: device.UserID,
|
||||||
WantMembership: "join",
|
WantMembership: "join",
|
||||||
}, &res)
|
}, &res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("QueryRoomsForUser failed")
|
util.GetLogger(ctx).WithError(err).Error("QueryRoomsForUser failed")
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError(), err
|
||||||
}
|
|
||||||
|
|
||||||
newProfile := authtypes.Profile{
|
|
||||||
Localpart: localpart,
|
|
||||||
DisplayName: r.DisplayName,
|
|
||||||
AvatarURL: oldProfile.AvatarURL,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
events, err := buildMembershipEvents(
|
events, err := buildMembershipEvents(
|
||||||
req.Context(), res.RoomIDs, newProfile, userID, cfg, evTime, rsAPI,
|
ctx, res.RoomIDs, *profile, userID, cfg, evTime, rsAPI,
|
||||||
)
|
)
|
||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
case nil:
|
case nil:
|
||||||
|
|
@ -303,21 +270,17 @@ func SetDisplayName(
|
||||||
return util.JSONResponse{
|
return util.JSONResponse{
|
||||||
Code: http.StatusBadRequest,
|
Code: http.StatusBadRequest,
|
||||||
JSON: jsonerror.BadJSON(e.Error()),
|
JSON: jsonerror.BadJSON(e.Error()),
|
||||||
}
|
}, e
|
||||||
default:
|
default:
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("buildMembershipEvents failed")
|
util.GetLogger(ctx).WithError(err).Error("buildMembershipEvents failed")
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError(), e
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, cfg.Matrix.ServerName, nil, true); err != nil {
|
if err := api.SendEvents(ctx, rsAPI, api.KindNew, events, cfg.Matrix.ServerName, cfg.Matrix.ServerName, nil, true); err != nil {
|
||||||
util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed")
|
util.GetLogger(ctx).WithError(err).Error("SendEvents failed")
|
||||||
return jsonerror.InternalServerError()
|
return jsonerror.InternalServerError(), err
|
||||||
}
|
|
||||||
|
|
||||||
return util.JSONResponse{
|
|
||||||
Code: http.StatusOK,
|
|
||||||
JSON: struct{}{},
|
|
||||||
}
|
}
|
||||||
|
return util.JSONResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getProfile gets the full profile of a user by querying the database or a
|
// getProfile gets the full profile of a user by querying the database or a
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,9 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/matrix-org/util"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||||
|
|
@ -26,8 +29,6 @@ import (
|
||||||
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/matrix-org/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type redactionContent struct {
|
type redactionContent struct {
|
||||||
|
|
@ -51,7 +52,7 @@ func SendRedaction(
|
||||||
|
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
// Try to fetch response from transactionsCache
|
// Try to fetch response from transactionsCache
|
||||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID); ok {
|
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID, req.URL); ok {
|
||||||
return *res
|
return *res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -144,7 +145,7 @@ func SendRedaction(
|
||||||
|
|
||||||
// Add response to transactionsCache
|
// Add response to transactionsCache
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
txnCache.AddTransaction(device.AccessToken, *txnID, &res)
|
txnCache.AddTransaction(device.AccessToken, *txnID, req.URL, &res)
|
||||||
}
|
}
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
|
||||||
|
|
@ -70,6 +70,7 @@ func Setup(
|
||||||
|
|
||||||
unstableFeatures := map[string]bool{
|
unstableFeatures := map[string]bool{
|
||||||
"org.matrix.e2e_cross_signing": true,
|
"org.matrix.e2e_cross_signing": true,
|
||||||
|
"org.matrix.msc2285.stable": true,
|
||||||
}
|
}
|
||||||
for _, msc := range cfg.MSCs.MSCs {
|
for _, msc := range cfg.MSCs.MSCs {
|
||||||
unstableFeatures["org.matrix."+msc] = true
|
unstableFeatures["org.matrix."+msc] = true
|
||||||
|
|
@ -183,7 +184,7 @@ func Setup(
|
||||||
// server notifications
|
// server notifications
|
||||||
if cfg.Matrix.ServerNotices.Enabled {
|
if cfg.Matrix.ServerNotices.Enabled {
|
||||||
logrus.Info("Enabling server notices at /_synapse/admin/v1/send_server_notice")
|
logrus.Info("Enabling server notices at /_synapse/admin/v1/send_server_notice")
|
||||||
serverNotificationSender, err := getSenderDevice(context.Background(), userAPI, cfg)
|
serverNotificationSender, err := getSenderDevice(context.Background(), rsAPI, userAPI, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.WithError(err).Fatal("unable to get account for sending sending server notices")
|
logrus.WithError(err).Fatal("unable to get account for sending sending server notices")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -86,7 +86,7 @@ func SendEvent(
|
||||||
|
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
// Try to fetch response from transactionsCache
|
// Try to fetch response from transactionsCache
|
||||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID); ok {
|
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID, req.URL); ok {
|
||||||
return *res
|
return *res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -206,7 +206,7 @@ func SendEvent(
|
||||||
}
|
}
|
||||||
// Add response to transactionsCache
|
// Add response to transactionsCache
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
txnCache.AddTransaction(device.AccessToken, *txnID, &res)
|
txnCache.AddTransaction(device.AccessToken, *txnID, req.URL, &res)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Take a note of how long it took to generate the event vs submit
|
// Take a note of how long it took to generate the event vs submit
|
||||||
|
|
|
||||||
|
|
@ -16,12 +16,13 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/matrix-org/util"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
"github.com/matrix-org/dendrite/clientapi/producers"
|
"github.com/matrix-org/dendrite/clientapi/producers"
|
||||||
"github.com/matrix-org/dendrite/internal/transactions"
|
"github.com/matrix-org/dendrite/internal/transactions"
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/matrix-org/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// SendToDevice handles PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}
|
// SendToDevice handles PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}
|
||||||
|
|
@ -33,7 +34,7 @@ func SendToDevice(
|
||||||
eventType string, txnID *string,
|
eventType string, txnID *string,
|
||||||
) util.JSONResponse {
|
) util.JSONResponse {
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID); ok {
|
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID, req.URL); ok {
|
||||||
return *res
|
return *res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -63,7 +64,7 @@ func SendToDevice(
|
||||||
}
|
}
|
||||||
|
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
txnCache.AddTransaction(device.AccessToken, *txnID, &res)
|
txnCache.AddTransaction(device.AccessToken, *txnID, req.URL, &res)
|
||||||
}
|
}
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/roomserver/version"
|
|
||||||
"github.com/matrix-org/gomatrix"
|
"github.com/matrix-org/gomatrix"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/matrix-org/gomatrixserverlib/tokens"
|
"github.com/matrix-org/gomatrixserverlib/tokens"
|
||||||
|
|
@ -29,6 +28,8 @@ import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/roomserver/version"
|
||||||
|
|
||||||
appserviceAPI "github.com/matrix-org/dendrite/appservice/api"
|
appserviceAPI "github.com/matrix-org/dendrite/appservice/api"
|
||||||
"github.com/matrix-org/dendrite/clientapi/httputil"
|
"github.com/matrix-org/dendrite/clientapi/httputil"
|
||||||
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
||||||
|
|
@ -73,7 +74,7 @@ func SendServerNotice(
|
||||||
|
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
// Try to fetch response from transactionsCache
|
// Try to fetch response from transactionsCache
|
||||||
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID); ok {
|
if res, ok := txnCache.FetchTransaction(device.AccessToken, *txnID, req.URL); ok {
|
||||||
return *res
|
return *res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -251,7 +252,7 @@ func SendServerNotice(
|
||||||
}
|
}
|
||||||
// Add response to transactionsCache
|
// Add response to transactionsCache
|
||||||
if txnID != nil {
|
if txnID != nil {
|
||||||
txnCache.AddTransaction(device.AccessToken, *txnID, &res)
|
txnCache.AddTransaction(device.AccessToken, *txnID, req.URL, &res)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Take a note of how long it took to generate the event vs submit
|
// Take a note of how long it took to generate the event vs submit
|
||||||
|
|
@ -276,6 +277,7 @@ func (r sendServerNoticeRequest) valid() (ok bool) {
|
||||||
// It returns an userapi.Device, which is used for building the event
|
// It returns an userapi.Device, which is used for building the event
|
||||||
func getSenderDevice(
|
func getSenderDevice(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
|
rsAPI api.ClientRoomserverAPI,
|
||||||
userAPI userapi.ClientUserAPI,
|
userAPI userapi.ClientUserAPI,
|
||||||
cfg *config.ClientAPI,
|
cfg *config.ClientAPI,
|
||||||
) (*userapi.Device, error) {
|
) (*userapi.Device, error) {
|
||||||
|
|
@ -290,16 +292,32 @@ func getSenderDevice(
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// set the avatarurl for the user
|
// Set the avatarurl for the user
|
||||||
res := &userapi.PerformSetAvatarURLResponse{}
|
avatarRes := &userapi.PerformSetAvatarURLResponse{}
|
||||||
if err = userAPI.SetAvatarURL(ctx, &userapi.PerformSetAvatarURLRequest{
|
if err = userAPI.SetAvatarURL(ctx, &userapi.PerformSetAvatarURLRequest{
|
||||||
Localpart: cfg.Matrix.ServerNotices.LocalPart,
|
Localpart: cfg.Matrix.ServerNotices.LocalPart,
|
||||||
AvatarURL: cfg.Matrix.ServerNotices.AvatarURL,
|
AvatarURL: cfg.Matrix.ServerNotices.AvatarURL,
|
||||||
}, res); err != nil {
|
}, avatarRes); err != nil {
|
||||||
util.GetLogger(ctx).WithError(err).Error("userAPI.SetAvatarURL failed")
|
util.GetLogger(ctx).WithError(err).Error("userAPI.SetAvatarURL failed")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
profile := avatarRes.Profile
|
||||||
|
|
||||||
|
// Set the displayname for the user
|
||||||
|
displayNameRes := &userapi.PerformUpdateDisplayNameResponse{}
|
||||||
|
if err = userAPI.SetDisplayName(ctx, &userapi.PerformUpdateDisplayNameRequest{
|
||||||
|
Localpart: cfg.Matrix.ServerNotices.LocalPart,
|
||||||
|
DisplayName: cfg.Matrix.ServerNotices.DisplayName,
|
||||||
|
}, displayNameRes); err != nil {
|
||||||
|
util.GetLogger(ctx).WithError(err).Error("userAPI.SetDisplayName failed")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if displayNameRes.Changed {
|
||||||
|
profile.DisplayName = cfg.Matrix.ServerNotices.DisplayName
|
||||||
|
}
|
||||||
|
|
||||||
// Check if we got existing devices
|
// Check if we got existing devices
|
||||||
deviceRes := &userapi.QueryDevicesResponse{}
|
deviceRes := &userapi.QueryDevicesResponse{}
|
||||||
err = userAPI.QueryDevices(ctx, &userapi.QueryDevicesRequest{
|
err = userAPI.QueryDevices(ctx, &userapi.QueryDevicesRequest{
|
||||||
|
|
@ -309,7 +327,15 @@ func getSenderDevice(
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We've got an existing account, return the first device of it
|
||||||
if len(deviceRes.Devices) > 0 {
|
if len(deviceRes.Devices) > 0 {
|
||||||
|
// If there were changes to the profile, create a new membership event
|
||||||
|
if displayNameRes.Changed || avatarRes.Changed {
|
||||||
|
_, err = updateProfile(ctx, rsAPI, &deviceRes.Devices[0], profile, accRes.Account.UserID, cfg, time.Now())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
return &deviceRes.Devices[0], nil
|
return &deviceRes.Devices[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -179,7 +179,10 @@ func sharedSecretRegister(sharedSecret, serverURL, localpart, password string, a
|
||||||
body, _ = io.ReadAll(regResp.Body)
|
body, _ = io.ReadAll(regResp.Body)
|
||||||
return "", fmt.Errorf(gjson.GetBytes(body, "error").Str)
|
return "", fmt.Errorf(gjson.GetBytes(body, "error").Str)
|
||||||
}
|
}
|
||||||
r, _ := io.ReadAll(regResp.Body)
|
r, err := io.ReadAll(regResp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to read response body (HTTP %d): %w", regResp.StatusCode, err)
|
||||||
|
}
|
||||||
|
|
||||||
return gjson.GetBytes(r, "access_token").Str, nil
|
return gjson.GetBytes(r, "access_token").Str, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -231,9 +231,9 @@ GEM
|
||||||
jekyll-seo-tag (~> 2.1)
|
jekyll-seo-tag (~> 2.1)
|
||||||
minitest (5.15.0)
|
minitest (5.15.0)
|
||||||
multipart-post (2.1.1)
|
multipart-post (2.1.1)
|
||||||
nokogiri (1.13.6-arm64-darwin)
|
nokogiri (1.13.9-arm64-darwin)
|
||||||
racc (~> 1.4)
|
racc (~> 1.4)
|
||||||
nokogiri (1.13.6-x86_64-linux)
|
nokogiri (1.13.9-x86_64-linux)
|
||||||
racc (~> 1.4)
|
racc (~> 1.4)
|
||||||
octokit (4.22.0)
|
octokit (4.22.0)
|
||||||
faraday (>= 0.9)
|
faraday (>= 0.9)
|
||||||
|
|
|
||||||
|
|
@ -116,17 +116,14 @@ func NewInternalAPI(
|
||||||
_ = federationDB.RemoveAllServersFromBlacklist()
|
_ = federationDB.RemoveAllServersFromBlacklist()
|
||||||
}
|
}
|
||||||
|
|
||||||
stats := &statistics.Statistics{
|
stats := statistics.NewStatistics(federationDB, cfg.FederationMaxRetries+1)
|
||||||
DB: federationDB,
|
|
||||||
FailuresUntilBlacklist: cfg.FederationMaxRetries,
|
|
||||||
}
|
|
||||||
|
|
||||||
js, _ := base.NATS.Prepare(base.ProcessContext, &cfg.Matrix.JetStream)
|
js, _ := base.NATS.Prepare(base.ProcessContext, &cfg.Matrix.JetStream)
|
||||||
|
|
||||||
queues := queue.NewOutgoingQueues(
|
queues := queue.NewOutgoingQueues(
|
||||||
federationDB, base.ProcessContext,
|
federationDB, base.ProcessContext,
|
||||||
cfg.Matrix.DisableFederation,
|
cfg.Matrix.DisableFederation,
|
||||||
cfg.Matrix.ServerName, federation, rsAPI, stats,
|
cfg.Matrix.ServerName, federation, rsAPI, &stats,
|
||||||
&queue.SigningInfo{
|
&queue.SigningInfo{
|
||||||
KeyID: cfg.Matrix.KeyID,
|
KeyID: cfg.Matrix.KeyID,
|
||||||
PrivateKey: cfg.Matrix.PrivateKey,
|
PrivateKey: cfg.Matrix.PrivateKey,
|
||||||
|
|
@ -183,5 +180,5 @@ func NewInternalAPI(
|
||||||
}
|
}
|
||||||
time.AfterFunc(time.Minute, cleanExpiredEDUs)
|
time.AfterFunc(time.Minute, cleanExpiredEDUs)
|
||||||
|
|
||||||
return internal.NewFederationInternalAPI(federationDB, cfg, rsAPI, federation, stats, caches, queues, keyRing)
|
return internal.NewFederationInternalAPI(federationDB, cfg, rsAPI, federation, &stats, caches, queues, keyRing)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,21 +21,22 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrix"
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
fedapi "github.com/matrix-org/dendrite/federationapi/api"
|
fedapi "github.com/matrix-org/dendrite/federationapi/api"
|
||||||
"github.com/matrix-org/dendrite/federationapi/statistics"
|
"github.com/matrix-org/dendrite/federationapi/statistics"
|
||||||
"github.com/matrix-org/dendrite/federationapi/storage"
|
"github.com/matrix-org/dendrite/federationapi/storage"
|
||||||
"github.com/matrix-org/dendrite/federationapi/storage/shared"
|
"github.com/matrix-org/dendrite/federationapi/storage/shared"
|
||||||
"github.com/matrix-org/dendrite/roomserver/api"
|
"github.com/matrix-org/dendrite/roomserver/api"
|
||||||
"github.com/matrix-org/dendrite/setup/process"
|
"github.com/matrix-org/dendrite/setup/process"
|
||||||
"github.com/matrix-org/gomatrix"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"go.uber.org/atomic"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
maxPDUsPerTransaction = 50
|
maxPDUsPerTransaction = 50
|
||||||
maxEDUsPerTransaction = 50
|
maxEDUsPerTransaction = 100
|
||||||
maxPDUsInMemory = 128
|
maxPDUsInMemory = 128
|
||||||
maxEDUsInMemory = 128
|
maxEDUsInMemory = 128
|
||||||
queueIdleTimeout = time.Second * 30
|
queueIdleTimeout = time.Second * 30
|
||||||
|
|
@ -64,7 +65,6 @@ type destinationQueue struct {
|
||||||
pendingPDUs []*queuedPDU // PDUs waiting to be sent
|
pendingPDUs []*queuedPDU // PDUs waiting to be sent
|
||||||
pendingEDUs []*queuedEDU // EDUs waiting to be sent
|
pendingEDUs []*queuedEDU // EDUs waiting to be sent
|
||||||
pendingMutex sync.RWMutex // protects pendingPDUs and pendingEDUs
|
pendingMutex sync.RWMutex // protects pendingPDUs and pendingEDUs
|
||||||
interruptBackoff chan bool // interrupts backoff
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send event adds the event to the pending queue for the destination.
|
// Send event adds the event to the pending queue for the destination.
|
||||||
|
|
@ -75,6 +75,7 @@ func (oq *destinationQueue) sendEvent(event *gomatrixserverlib.HeaderedEvent, re
|
||||||
logrus.Errorf("attempt to send nil PDU with destination %q", oq.destination)
|
logrus.Errorf("attempt to send nil PDU with destination %q", oq.destination)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a database entry that associates the given PDU NID with
|
// Create a database entry that associates the given PDU NID with
|
||||||
// this destination queue. We'll then be able to retrieve the PDU
|
// this destination queue. We'll then be able to retrieve the PDU
|
||||||
// later.
|
// later.
|
||||||
|
|
@ -102,12 +103,12 @@ func (oq *destinationQueue) sendEvent(event *gomatrixserverlib.HeaderedEvent, re
|
||||||
oq.overflowed.Store(true)
|
oq.overflowed.Store(true)
|
||||||
}
|
}
|
||||||
oq.pendingMutex.Unlock()
|
oq.pendingMutex.Unlock()
|
||||||
// Wake up the queue if it's asleep.
|
|
||||||
oq.wakeQueueIfNeeded()
|
if !oq.backingOff.Load() {
|
||||||
select {
|
oq.wakeQueueAndNotify()
|
||||||
case oq.notify <- struct{}{}:
|
|
||||||
default:
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
oq.overflowed.Store(true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -147,12 +148,37 @@ func (oq *destinationQueue) sendEDU(event *gomatrixserverlib.EDU, receipt *share
|
||||||
oq.overflowed.Store(true)
|
oq.overflowed.Store(true)
|
||||||
}
|
}
|
||||||
oq.pendingMutex.Unlock()
|
oq.pendingMutex.Unlock()
|
||||||
// Wake up the queue if it's asleep.
|
|
||||||
oq.wakeQueueIfNeeded()
|
if !oq.backingOff.Load() {
|
||||||
select {
|
oq.wakeQueueAndNotify()
|
||||||
case oq.notify <- struct{}{}:
|
|
||||||
default:
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
oq.overflowed.Store(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleBackoffNotifier is registered as the backoff notification
|
||||||
|
// callback with Statistics. It will wakeup and notify the queue
|
||||||
|
// if the queue is currently backing off.
|
||||||
|
func (oq *destinationQueue) handleBackoffNotifier() {
|
||||||
|
// Only wake up the queue if it is backing off.
|
||||||
|
// Otherwise there is no pending work for the queue to handle
|
||||||
|
// so waking the queue would be a waste of resources.
|
||||||
|
if oq.backingOff.Load() {
|
||||||
|
oq.wakeQueueAndNotify()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wakeQueueAndNotify ensures the destination queue is running and notifies it
|
||||||
|
// that there is pending work.
|
||||||
|
func (oq *destinationQueue) wakeQueueAndNotify() {
|
||||||
|
// Wake up the queue if it's asleep.
|
||||||
|
oq.wakeQueueIfNeeded()
|
||||||
|
|
||||||
|
// Notify the queue that there are events ready to send.
|
||||||
|
select {
|
||||||
|
case oq.notify <- struct{}{}:
|
||||||
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -161,10 +187,11 @@ func (oq *destinationQueue) sendEDU(event *gomatrixserverlib.EDU, receipt *share
|
||||||
// then we will interrupt the backoff, causing any federation
|
// then we will interrupt the backoff, causing any federation
|
||||||
// requests to retry.
|
// requests to retry.
|
||||||
func (oq *destinationQueue) wakeQueueIfNeeded() {
|
func (oq *destinationQueue) wakeQueueIfNeeded() {
|
||||||
// If we are backing off then interrupt the backoff.
|
// Clear the backingOff flag and update the backoff metrics if it was set.
|
||||||
if oq.backingOff.CompareAndSwap(true, false) {
|
if oq.backingOff.CompareAndSwap(true, false) {
|
||||||
oq.interruptBackoff <- true
|
destinationQueueBackingOff.Dec()
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we aren't running then wake up the queue.
|
// If we aren't running then wake up the queue.
|
||||||
if !oq.running.Load() {
|
if !oq.running.Load() {
|
||||||
// Start the queue.
|
// Start the queue.
|
||||||
|
|
@ -196,38 +223,54 @@ func (oq *destinationQueue) getPendingFromDatabase() {
|
||||||
gotEDUs[edu.receipt.String()] = struct{}{}
|
gotEDUs[edu.receipt.String()] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
overflowed := false
|
||||||
if pduCapacity := maxPDUsInMemory - len(oq.pendingPDUs); pduCapacity > 0 {
|
if pduCapacity := maxPDUsInMemory - len(oq.pendingPDUs); pduCapacity > 0 {
|
||||||
// We have room in memory for some PDUs - let's request no more than that.
|
// We have room in memory for some PDUs - let's request no more than that.
|
||||||
if pdus, err := oq.db.GetPendingPDUs(ctx, oq.destination, pduCapacity); err == nil {
|
if pdus, err := oq.db.GetPendingPDUs(ctx, oq.destination, maxPDUsInMemory); err == nil {
|
||||||
|
if len(pdus) == maxPDUsInMemory {
|
||||||
|
overflowed = true
|
||||||
|
}
|
||||||
for receipt, pdu := range pdus {
|
for receipt, pdu := range pdus {
|
||||||
if _, ok := gotPDUs[receipt.String()]; ok {
|
if _, ok := gotPDUs[receipt.String()]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
oq.pendingPDUs = append(oq.pendingPDUs, &queuedPDU{receipt, pdu})
|
oq.pendingPDUs = append(oq.pendingPDUs, &queuedPDU{receipt, pdu})
|
||||||
retrieved = true
|
retrieved = true
|
||||||
|
if len(oq.pendingPDUs) == maxPDUsInMemory {
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logrus.WithError(err).Errorf("Failed to get pending PDUs for %q", oq.destination)
|
logrus.WithError(err).Errorf("Failed to get pending PDUs for %q", oq.destination)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if eduCapacity := maxEDUsInMemory - len(oq.pendingEDUs); eduCapacity > 0 {
|
if eduCapacity := maxEDUsInMemory - len(oq.pendingEDUs); eduCapacity > 0 {
|
||||||
// We have room in memory for some EDUs - let's request no more than that.
|
// We have room in memory for some EDUs - let's request no more than that.
|
||||||
if edus, err := oq.db.GetPendingEDUs(ctx, oq.destination, eduCapacity); err == nil {
|
if edus, err := oq.db.GetPendingEDUs(ctx, oq.destination, maxEDUsInMemory); err == nil {
|
||||||
|
if len(edus) == maxEDUsInMemory {
|
||||||
|
overflowed = true
|
||||||
|
}
|
||||||
for receipt, edu := range edus {
|
for receipt, edu := range edus {
|
||||||
if _, ok := gotEDUs[receipt.String()]; ok {
|
if _, ok := gotEDUs[receipt.String()]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
oq.pendingEDUs = append(oq.pendingEDUs, &queuedEDU{receipt, edu})
|
oq.pendingEDUs = append(oq.pendingEDUs, &queuedEDU{receipt, edu})
|
||||||
retrieved = true
|
retrieved = true
|
||||||
|
if len(oq.pendingEDUs) == maxEDUsInMemory {
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logrus.WithError(err).Errorf("Failed to get pending EDUs for %q", oq.destination)
|
logrus.WithError(err).Errorf("Failed to get pending EDUs for %q", oq.destination)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we've retrieved all of the events from the database with room to spare
|
// If we've retrieved all of the events from the database with room to spare
|
||||||
// in memory then we'll no longer consider this queue to be overflowed.
|
// in memory then we'll no longer consider this queue to be overflowed.
|
||||||
if len(oq.pendingPDUs) < maxPDUsInMemory && len(oq.pendingEDUs) < maxEDUsInMemory {
|
if !overflowed {
|
||||||
oq.overflowed.Store(false)
|
oq.overflowed.Store(false)
|
||||||
|
} else {
|
||||||
}
|
}
|
||||||
// If we've retrieved some events then notify the destination queue goroutine.
|
// If we've retrieved some events then notify the destination queue goroutine.
|
||||||
if retrieved {
|
if retrieved {
|
||||||
|
|
@ -238,6 +281,24 @@ func (oq *destinationQueue) getPendingFromDatabase() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// checkNotificationsOnClose checks for any remaining notifications
|
||||||
|
// and starts a new backgroundSend goroutine if any exist.
|
||||||
|
func (oq *destinationQueue) checkNotificationsOnClose() {
|
||||||
|
// NOTE : If we are stopping the queue due to blacklist then it
|
||||||
|
// doesn't matter if we have been notified of new work since
|
||||||
|
// this queue instance will be deleted anyway.
|
||||||
|
if !oq.statistics.Blacklisted() {
|
||||||
|
select {
|
||||||
|
case <-oq.notify:
|
||||||
|
// We received a new notification in between the
|
||||||
|
// idle timeout firing and stopping the goroutine.
|
||||||
|
// Immediately restart the queue.
|
||||||
|
oq.wakeQueueAndNotify()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// backgroundSend is the worker goroutine for sending events.
|
// backgroundSend is the worker goroutine for sending events.
|
||||||
func (oq *destinationQueue) backgroundSend() {
|
func (oq *destinationQueue) backgroundSend() {
|
||||||
// Check if a worker is already running, and if it isn't, then
|
// Check if a worker is already running, and if it isn't, then
|
||||||
|
|
@ -245,10 +306,17 @@ func (oq *destinationQueue) backgroundSend() {
|
||||||
if !oq.running.CompareAndSwap(false, true) {
|
if !oq.running.CompareAndSwap(false, true) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Register queue cleanup functions.
|
||||||
|
// NOTE : The ordering here is very intentional.
|
||||||
|
defer oq.checkNotificationsOnClose()
|
||||||
|
defer oq.running.Store(false)
|
||||||
|
|
||||||
destinationQueueRunning.Inc()
|
destinationQueueRunning.Inc()
|
||||||
defer destinationQueueRunning.Dec()
|
defer destinationQueueRunning.Dec()
|
||||||
defer oq.queues.clearQueue(oq)
|
|
||||||
defer oq.running.Store(false)
|
idleTimeout := time.NewTimer(queueIdleTimeout)
|
||||||
|
defer idleTimeout.Stop()
|
||||||
|
|
||||||
// Mark the queue as overflowed, so we will consult the database
|
// Mark the queue as overflowed, so we will consult the database
|
||||||
// to see if there's anything new to send.
|
// to see if there's anything new to send.
|
||||||
|
|
@ -261,59 +329,33 @@ func (oq *destinationQueue) backgroundSend() {
|
||||||
oq.getPendingFromDatabase()
|
oq.getPendingFromDatabase()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reset the queue idle timeout.
|
||||||
|
if !idleTimeout.Stop() {
|
||||||
|
select {
|
||||||
|
case <-idleTimeout.C:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
idleTimeout.Reset(queueIdleTimeout)
|
||||||
|
|
||||||
// If we have nothing to do then wait either for incoming events, or
|
// If we have nothing to do then wait either for incoming events, or
|
||||||
// until we hit an idle timeout.
|
// until we hit an idle timeout.
|
||||||
select {
|
select {
|
||||||
case <-oq.notify:
|
case <-oq.notify:
|
||||||
// There's work to do, either because getPendingFromDatabase
|
// There's work to do, either because getPendingFromDatabase
|
||||||
// told us there is, or because a new event has come in via
|
// told us there is, a new event has come in via sendEvent/sendEDU,
|
||||||
// sendEvent/sendEDU.
|
// or we are backing off and it is time to retry.
|
||||||
case <-time.After(queueIdleTimeout):
|
case <-idleTimeout.C:
|
||||||
// The worker is idle so stop the goroutine. It'll get
|
// The worker is idle so stop the goroutine. It'll get
|
||||||
// restarted automatically the next time we have an event to
|
// restarted automatically the next time we have an event to
|
||||||
// send.
|
// send.
|
||||||
return
|
return
|
||||||
case <-oq.process.Context().Done():
|
case <-oq.process.Context().Done():
|
||||||
// The parent process is shutting down, so stop.
|
// The parent process is shutting down, so stop.
|
||||||
|
oq.statistics.ClearBackoff()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we are backing off this server then wait for the
|
|
||||||
// backoff duration to complete first, or until explicitly
|
|
||||||
// told to retry.
|
|
||||||
until, blacklisted := oq.statistics.BackoffInfo()
|
|
||||||
if blacklisted {
|
|
||||||
// It's been suggested that we should give up because the backoff
|
|
||||||
// has exceeded a maximum allowable value. Clean up the in-memory
|
|
||||||
// buffers at this point. The PDU clean-up is already on a defer.
|
|
||||||
logrus.Warnf("Blacklisting %q due to exceeding backoff threshold", oq.destination)
|
|
||||||
oq.pendingMutex.Lock()
|
|
||||||
for i := range oq.pendingPDUs {
|
|
||||||
oq.pendingPDUs[i] = nil
|
|
||||||
}
|
|
||||||
for i := range oq.pendingEDUs {
|
|
||||||
oq.pendingEDUs[i] = nil
|
|
||||||
}
|
|
||||||
oq.pendingPDUs = nil
|
|
||||||
oq.pendingEDUs = nil
|
|
||||||
oq.pendingMutex.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if until != nil && until.After(time.Now()) {
|
|
||||||
// We haven't backed off yet, so wait for the suggested amount of
|
|
||||||
// time.
|
|
||||||
duration := time.Until(*until)
|
|
||||||
logrus.Debugf("Backing off %q for %s", oq.destination, duration)
|
|
||||||
oq.backingOff.Store(true)
|
|
||||||
destinationQueueBackingOff.Inc()
|
|
||||||
select {
|
|
||||||
case <-time.After(duration):
|
|
||||||
case <-oq.interruptBackoff:
|
|
||||||
}
|
|
||||||
destinationQueueBackingOff.Dec()
|
|
||||||
oq.backingOff.Store(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Work out which PDUs/EDUs to include in the next transaction.
|
// Work out which PDUs/EDUs to include in the next transaction.
|
||||||
oq.pendingMutex.RLock()
|
oq.pendingMutex.RLock()
|
||||||
pduCount := len(oq.pendingPDUs)
|
pduCount := len(oq.pendingPDUs)
|
||||||
|
|
@ -328,99 +370,52 @@ func (oq *destinationQueue) backgroundSend() {
|
||||||
toSendEDUs := oq.pendingEDUs[:eduCount]
|
toSendEDUs := oq.pendingEDUs[:eduCount]
|
||||||
oq.pendingMutex.RUnlock()
|
oq.pendingMutex.RUnlock()
|
||||||
|
|
||||||
|
// If we didn't get anything from the database and there are no
|
||||||
|
// pending EDUs then there's nothing to do - stop here.
|
||||||
|
if pduCount == 0 && eduCount == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// If we have pending PDUs or EDUs then construct a transaction.
|
// If we have pending PDUs or EDUs then construct a transaction.
|
||||||
// Try sending the next transaction and see what happens.
|
// Try sending the next transaction and see what happens.
|
||||||
transaction, pc, ec, terr := oq.nextTransaction(toSendPDUs, toSendEDUs)
|
terr := oq.nextTransaction(toSendPDUs, toSendEDUs)
|
||||||
if terr != nil {
|
if terr != nil {
|
||||||
// We failed to send the transaction. Mark it as a failure.
|
// We failed to send the transaction. Mark it as a failure.
|
||||||
oq.statistics.Failure()
|
_, blacklisted := oq.statistics.Failure()
|
||||||
|
if !blacklisted {
|
||||||
} else if transaction {
|
// Register the backoff state and exit the goroutine.
|
||||||
// If we successfully sent the transaction then clear out
|
// It'll get restarted automatically when the backoff
|
||||||
// the pending events and EDUs, and wipe our transaction ID.
|
// completes.
|
||||||
oq.statistics.Success()
|
oq.backingOff.Store(true)
|
||||||
oq.pendingMutex.Lock()
|
destinationQueueBackingOff.Inc()
|
||||||
for i := range oq.pendingPDUs[:pc] {
|
return
|
||||||
oq.pendingPDUs[i] = nil
|
} else {
|
||||||
|
// Immediately trigger the blacklist logic.
|
||||||
|
oq.blacklistDestination()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
for i := range oq.pendingEDUs[:ec] {
|
} else {
|
||||||
oq.pendingEDUs[i] = nil
|
oq.handleTransactionSuccess(pduCount, eduCount)
|
||||||
}
|
|
||||||
oq.pendingPDUs = oq.pendingPDUs[pc:]
|
|
||||||
oq.pendingEDUs = oq.pendingEDUs[ec:]
|
|
||||||
oq.pendingMutex.Unlock()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// nextTransaction creates a new transaction from the pending event
|
// nextTransaction creates a new transaction from the pending event
|
||||||
// queue and sends it. Returns true if a transaction was sent or
|
// queue and sends it.
|
||||||
// false otherwise.
|
// Returns an error if the transaction wasn't sent.
|
||||||
func (oq *destinationQueue) nextTransaction(
|
func (oq *destinationQueue) nextTransaction(
|
||||||
pdus []*queuedPDU,
|
pdus []*queuedPDU,
|
||||||
edus []*queuedEDU,
|
edus []*queuedEDU,
|
||||||
) (bool, int, int, error) {
|
) error {
|
||||||
// If there's no projected transaction ID then generate one. If
|
|
||||||
// the transaction succeeds then we'll set it back to "" so that
|
|
||||||
// we generate a new one next time. If it fails, we'll preserve
|
|
||||||
// it so that we retry with the same transaction ID.
|
|
||||||
oq.transactionIDMutex.Lock()
|
|
||||||
if oq.transactionID == "" {
|
|
||||||
now := gomatrixserverlib.AsTimestamp(time.Now())
|
|
||||||
oq.transactionID = gomatrixserverlib.TransactionID(fmt.Sprintf("%d-%d", now, oq.statistics.SuccessCount()))
|
|
||||||
}
|
|
||||||
oq.transactionIDMutex.Unlock()
|
|
||||||
|
|
||||||
// Create the transaction.
|
// Create the transaction.
|
||||||
t := gomatrixserverlib.Transaction{
|
t, pduReceipts, eduReceipts := oq.createTransaction(pdus, edus)
|
||||||
PDUs: []json.RawMessage{},
|
|
||||||
EDUs: []gomatrixserverlib.EDU{},
|
|
||||||
}
|
|
||||||
t.Origin = oq.origin
|
|
||||||
t.Destination = oq.destination
|
|
||||||
t.OriginServerTS = gomatrixserverlib.AsTimestamp(time.Now())
|
|
||||||
t.TransactionID = oq.transactionID
|
|
||||||
|
|
||||||
// If we didn't get anything from the database and there are no
|
|
||||||
// pending EDUs then there's nothing to do - stop here.
|
|
||||||
if len(pdus) == 0 && len(edus) == 0 {
|
|
||||||
return false, 0, 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var pduReceipts []*shared.Receipt
|
|
||||||
var eduReceipts []*shared.Receipt
|
|
||||||
|
|
||||||
// Go through PDUs that we retrieved from the database, if any,
|
|
||||||
// and add them into the transaction.
|
|
||||||
for _, pdu := range pdus {
|
|
||||||
if pdu == nil || pdu.pdu == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Append the JSON of the event, since this is a json.RawMessage type in the
|
|
||||||
// gomatrixserverlib.Transaction struct
|
|
||||||
t.PDUs = append(t.PDUs, pdu.pdu.JSON())
|
|
||||||
pduReceipts = append(pduReceipts, pdu.receipt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do the same for pending EDUS in the queue.
|
|
||||||
for _, edu := range edus {
|
|
||||||
if edu == nil || edu.edu == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
t.EDUs = append(t.EDUs, *edu.edu)
|
|
||||||
eduReceipts = append(eduReceipts, edu.receipt)
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.WithField("server_name", oq.destination).Debugf("Sending transaction %q containing %d PDUs, %d EDUs", t.TransactionID, len(t.PDUs), len(t.EDUs))
|
logrus.WithField("server_name", oq.destination).Debugf("Sending transaction %q containing %d PDUs, %d EDUs", t.TransactionID, len(t.PDUs), len(t.EDUs))
|
||||||
|
|
||||||
// Try to send the transaction to the destination server.
|
// Try to send the transaction to the destination server.
|
||||||
// TODO: we should check for 500-ish fails vs 400-ish here,
|
|
||||||
// since we shouldn't queue things indefinitely in response
|
|
||||||
// to a 400-ish error
|
|
||||||
ctx, cancel := context.WithTimeout(oq.process.Context(), time.Minute*5)
|
ctx, cancel := context.WithTimeout(oq.process.Context(), time.Minute*5)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
_, err := oq.client.SendTransaction(ctx, t)
|
_, err := oq.client.SendTransaction(ctx, t)
|
||||||
switch err.(type) {
|
switch errResponse := err.(type) {
|
||||||
case nil:
|
case nil:
|
||||||
// Clean up the transaction in the database.
|
// Clean up the transaction in the database.
|
||||||
if pduReceipts != nil {
|
if pduReceipts != nil {
|
||||||
|
|
@ -439,16 +434,129 @@ func (oq *destinationQueue) nextTransaction(
|
||||||
oq.transactionIDMutex.Lock()
|
oq.transactionIDMutex.Lock()
|
||||||
oq.transactionID = ""
|
oq.transactionID = ""
|
||||||
oq.transactionIDMutex.Unlock()
|
oq.transactionIDMutex.Unlock()
|
||||||
return true, len(t.PDUs), len(t.EDUs), nil
|
return nil
|
||||||
case gomatrix.HTTPError:
|
case gomatrix.HTTPError:
|
||||||
// Report that we failed to send the transaction and we
|
// Report that we failed to send the transaction and we
|
||||||
// will retry again, subject to backoff.
|
// will retry again, subject to backoff.
|
||||||
return false, 0, 0, err
|
|
||||||
|
// TODO: we should check for 500-ish fails vs 400-ish here,
|
||||||
|
// since we shouldn't queue things indefinitely in response
|
||||||
|
// to a 400-ish error
|
||||||
|
code := errResponse.Code
|
||||||
|
logrus.Debug("Transaction failed with HTTP", code)
|
||||||
|
return err
|
||||||
default:
|
default:
|
||||||
logrus.WithFields(logrus.Fields{
|
logrus.WithFields(logrus.Fields{
|
||||||
"destination": oq.destination,
|
"destination": oq.destination,
|
||||||
logrus.ErrorKey: err,
|
logrus.ErrorKey: err,
|
||||||
}).Debugf("Failed to send transaction %q", t.TransactionID)
|
}).Debugf("Failed to send transaction %q", t.TransactionID)
|
||||||
return false, 0, 0, err
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// createTransaction generates a gomatrixserverlib.Transaction from the provided pdus and edus.
|
||||||
|
// It also returns the associated event receipts so they can be cleaned from the database in
|
||||||
|
// the case of a successful transaction.
|
||||||
|
func (oq *destinationQueue) createTransaction(
|
||||||
|
pdus []*queuedPDU,
|
||||||
|
edus []*queuedEDU,
|
||||||
|
) (gomatrixserverlib.Transaction, []*shared.Receipt, []*shared.Receipt) {
|
||||||
|
// If there's no projected transaction ID then generate one. If
|
||||||
|
// the transaction succeeds then we'll set it back to "" so that
|
||||||
|
// we generate a new one next time. If it fails, we'll preserve
|
||||||
|
// it so that we retry with the same transaction ID.
|
||||||
|
oq.transactionIDMutex.Lock()
|
||||||
|
if oq.transactionID == "" {
|
||||||
|
now := gomatrixserverlib.AsTimestamp(time.Now())
|
||||||
|
oq.transactionID = gomatrixserverlib.TransactionID(fmt.Sprintf("%d-%d", now, oq.statistics.SuccessCount()))
|
||||||
|
}
|
||||||
|
oq.transactionIDMutex.Unlock()
|
||||||
|
|
||||||
|
t := gomatrixserverlib.Transaction{
|
||||||
|
PDUs: []json.RawMessage{},
|
||||||
|
EDUs: []gomatrixserverlib.EDU{},
|
||||||
|
}
|
||||||
|
t.Origin = oq.origin
|
||||||
|
t.Destination = oq.destination
|
||||||
|
t.OriginServerTS = gomatrixserverlib.AsTimestamp(time.Now())
|
||||||
|
t.TransactionID = oq.transactionID
|
||||||
|
|
||||||
|
var pduReceipts []*shared.Receipt
|
||||||
|
var eduReceipts []*shared.Receipt
|
||||||
|
|
||||||
|
// Go through PDUs that we retrieved from the database, if any,
|
||||||
|
// and add them into the transaction.
|
||||||
|
for _, pdu := range pdus {
|
||||||
|
// These should never be nil.
|
||||||
|
if pdu == nil || pdu.pdu == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Append the JSON of the event, since this is a json.RawMessage type in the
|
||||||
|
// gomatrixserverlib.Transaction struct
|
||||||
|
t.PDUs = append(t.PDUs, pdu.pdu.JSON())
|
||||||
|
pduReceipts = append(pduReceipts, pdu.receipt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do the same for pending EDUS in the queue.
|
||||||
|
for _, edu := range edus {
|
||||||
|
// These should never be nil.
|
||||||
|
if edu == nil || edu.edu == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.EDUs = append(t.EDUs, *edu.edu)
|
||||||
|
eduReceipts = append(eduReceipts, edu.receipt)
|
||||||
|
}
|
||||||
|
|
||||||
|
return t, pduReceipts, eduReceipts
|
||||||
|
}
|
||||||
|
|
||||||
|
// blacklistDestination removes all pending PDUs and EDUs that have been cached
|
||||||
|
// and deletes this queue.
|
||||||
|
func (oq *destinationQueue) blacklistDestination() {
|
||||||
|
// It's been suggested that we should give up because the backoff
|
||||||
|
// has exceeded a maximum allowable value. Clean up the in-memory
|
||||||
|
// buffers at this point. The PDU clean-up is already on a defer.
|
||||||
|
logrus.Warnf("Blacklisting %q due to exceeding backoff threshold", oq.destination)
|
||||||
|
|
||||||
|
oq.pendingMutex.Lock()
|
||||||
|
for i := range oq.pendingPDUs {
|
||||||
|
oq.pendingPDUs[i] = nil
|
||||||
|
}
|
||||||
|
for i := range oq.pendingEDUs {
|
||||||
|
oq.pendingEDUs[i] = nil
|
||||||
|
}
|
||||||
|
oq.pendingPDUs = nil
|
||||||
|
oq.pendingEDUs = nil
|
||||||
|
oq.pendingMutex.Unlock()
|
||||||
|
|
||||||
|
// Delete this queue as no more messages will be sent to this
|
||||||
|
// destination until it is no longer blacklisted.
|
||||||
|
oq.statistics.AssignBackoffNotifier(nil)
|
||||||
|
oq.queues.clearQueue(oq)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleTransactionSuccess updates the cached event queues as well as the success and
|
||||||
|
// backoff information for this server.
|
||||||
|
func (oq *destinationQueue) handleTransactionSuccess(pduCount int, eduCount int) {
|
||||||
|
// If we successfully sent the transaction then clear out
|
||||||
|
// the pending events and EDUs, and wipe our transaction ID.
|
||||||
|
oq.statistics.Success()
|
||||||
|
oq.pendingMutex.Lock()
|
||||||
|
defer oq.pendingMutex.Unlock()
|
||||||
|
|
||||||
|
for i := range oq.pendingPDUs[:pduCount] {
|
||||||
|
oq.pendingPDUs[i] = nil
|
||||||
|
}
|
||||||
|
for i := range oq.pendingEDUs[:eduCount] {
|
||||||
|
oq.pendingEDUs[i] = nil
|
||||||
|
}
|
||||||
|
oq.pendingPDUs = oq.pendingPDUs[pduCount:]
|
||||||
|
oq.pendingEDUs = oq.pendingEDUs[eduCount:]
|
||||||
|
|
||||||
|
if len(oq.pendingPDUs) > 0 || len(oq.pendingEDUs) > 0 {
|
||||||
|
select {
|
||||||
|
case oq.notify <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -162,23 +162,25 @@ func (oqs *OutgoingQueues) getQueue(destination gomatrixserverlib.ServerName) *d
|
||||||
if !ok || oq == nil {
|
if !ok || oq == nil {
|
||||||
destinationQueueTotal.Inc()
|
destinationQueueTotal.Inc()
|
||||||
oq = &destinationQueue{
|
oq = &destinationQueue{
|
||||||
queues: oqs,
|
queues: oqs,
|
||||||
db: oqs.db,
|
db: oqs.db,
|
||||||
process: oqs.process,
|
process: oqs.process,
|
||||||
rsAPI: oqs.rsAPI,
|
rsAPI: oqs.rsAPI,
|
||||||
origin: oqs.origin,
|
origin: oqs.origin,
|
||||||
destination: destination,
|
destination: destination,
|
||||||
client: oqs.client,
|
client: oqs.client,
|
||||||
statistics: oqs.statistics.ForServer(destination),
|
statistics: oqs.statistics.ForServer(destination),
|
||||||
notify: make(chan struct{}, 1),
|
notify: make(chan struct{}, 1),
|
||||||
interruptBackoff: make(chan bool),
|
signing: oqs.signing,
|
||||||
signing: oqs.signing,
|
|
||||||
}
|
}
|
||||||
|
oq.statistics.AssignBackoffNotifier(oq.handleBackoffNotifier)
|
||||||
oqs.queues[destination] = oq
|
oqs.queues[destination] = oq
|
||||||
}
|
}
|
||||||
return oq
|
return oq
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// clearQueue removes the queue for the provided destination from the
|
||||||
|
// set of destination queues.
|
||||||
func (oqs *OutgoingQueues) clearQueue(oq *destinationQueue) {
|
func (oqs *OutgoingQueues) clearQueue(oq *destinationQueue) {
|
||||||
oqs.queuesMutex.Lock()
|
oqs.queuesMutex.Lock()
|
||||||
defer oqs.queuesMutex.Unlock()
|
defer oqs.queuesMutex.Unlock()
|
||||||
|
|
@ -332,7 +334,9 @@ func (oqs *OutgoingQueues) RetryServer(srv gomatrixserverlib.ServerName) {
|
||||||
if oqs.disabled {
|
if oqs.disabled {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
oqs.statistics.ForServer(srv).RemoveBlacklist()
|
||||||
if queue := oqs.getQueue(srv); queue != nil {
|
if queue := oqs.getQueue(srv); queue != nil {
|
||||||
|
queue.statistics.ClearBackoff()
|
||||||
queue.wakeQueueIfNeeded()
|
queue.wakeQueueIfNeeded()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
1050
federationapi/queue/queue_test.go
Normal file
1050
federationapi/queue/queue_test.go
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -2,6 +2,7 @@ package statistics
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
|
"math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -20,12 +21,23 @@ type Statistics struct {
|
||||||
servers map[gomatrixserverlib.ServerName]*ServerStatistics
|
servers map[gomatrixserverlib.ServerName]*ServerStatistics
|
||||||
mutex sync.RWMutex
|
mutex sync.RWMutex
|
||||||
|
|
||||||
|
backoffTimers map[gomatrixserverlib.ServerName]*time.Timer
|
||||||
|
backoffMutex sync.RWMutex
|
||||||
|
|
||||||
// How many times should we tolerate consecutive failures before we
|
// How many times should we tolerate consecutive failures before we
|
||||||
// just blacklist the host altogether? The backoff is exponential,
|
// just blacklist the host altogether? The backoff is exponential,
|
||||||
// so the max time here to attempt is 2**failures seconds.
|
// so the max time here to attempt is 2**failures seconds.
|
||||||
FailuresUntilBlacklist uint32
|
FailuresUntilBlacklist uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewStatistics(db storage.Database, failuresUntilBlacklist uint32) Statistics {
|
||||||
|
return Statistics{
|
||||||
|
DB: db,
|
||||||
|
FailuresUntilBlacklist: failuresUntilBlacklist,
|
||||||
|
backoffTimers: make(map[gomatrixserverlib.ServerName]*time.Timer),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ForServer returns server statistics for the given server name. If it
|
// ForServer returns server statistics for the given server name. If it
|
||||||
// does not exist, it will create empty statistics and return those.
|
// does not exist, it will create empty statistics and return those.
|
||||||
func (s *Statistics) ForServer(serverName gomatrixserverlib.ServerName) *ServerStatistics {
|
func (s *Statistics) ForServer(serverName gomatrixserverlib.ServerName) *ServerStatistics {
|
||||||
|
|
@ -45,7 +57,6 @@ func (s *Statistics) ForServer(serverName gomatrixserverlib.ServerName) *ServerS
|
||||||
server = &ServerStatistics{
|
server = &ServerStatistics{
|
||||||
statistics: s,
|
statistics: s,
|
||||||
serverName: serverName,
|
serverName: serverName,
|
||||||
interrupt: make(chan struct{}),
|
|
||||||
}
|
}
|
||||||
s.servers[serverName] = server
|
s.servers[serverName] = server
|
||||||
s.mutex.Unlock()
|
s.mutex.Unlock()
|
||||||
|
|
@ -64,29 +75,43 @@ func (s *Statistics) ForServer(serverName gomatrixserverlib.ServerName) *ServerS
|
||||||
// many times we failed etc. It also manages the backoff time and black-
|
// many times we failed etc. It also manages the backoff time and black-
|
||||||
// listing a remote host if it remains uncooperative.
|
// listing a remote host if it remains uncooperative.
|
||||||
type ServerStatistics struct {
|
type ServerStatistics struct {
|
||||||
statistics *Statistics //
|
statistics *Statistics //
|
||||||
serverName gomatrixserverlib.ServerName //
|
serverName gomatrixserverlib.ServerName //
|
||||||
blacklisted atomic.Bool // is the node blacklisted
|
blacklisted atomic.Bool // is the node blacklisted
|
||||||
backoffStarted atomic.Bool // is the backoff started
|
backoffStarted atomic.Bool // is the backoff started
|
||||||
backoffUntil atomic.Value // time.Time until this backoff interval ends
|
backoffUntil atomic.Value // time.Time until this backoff interval ends
|
||||||
backoffCount atomic.Uint32 // number of times BackoffDuration has been called
|
backoffCount atomic.Uint32 // number of times BackoffDuration has been called
|
||||||
interrupt chan struct{} // interrupts the backoff goroutine
|
successCounter atomic.Uint32 // how many times have we succeeded?
|
||||||
successCounter atomic.Uint32 // how many times have we succeeded?
|
backoffNotifier func() // notifies destination queue when backoff completes
|
||||||
|
notifierMutex sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const maxJitterMultiplier = 1.4
|
||||||
|
const minJitterMultiplier = 0.8
|
||||||
|
|
||||||
// duration returns how long the next backoff interval should be.
|
// duration returns how long the next backoff interval should be.
|
||||||
func (s *ServerStatistics) duration(count uint32) time.Duration {
|
func (s *ServerStatistics) duration(count uint32) time.Duration {
|
||||||
return time.Second * time.Duration(math.Exp2(float64(count)))
|
// Add some jitter to minimise the chance of having multiple backoffs
|
||||||
|
// ending at the same time.
|
||||||
|
jitter := rand.Float64()*(maxJitterMultiplier-minJitterMultiplier) + minJitterMultiplier
|
||||||
|
duration := time.Millisecond * time.Duration(math.Exp2(float64(count))*jitter*1000)
|
||||||
|
return duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// cancel will interrupt the currently active backoff.
|
// cancel will interrupt the currently active backoff.
|
||||||
func (s *ServerStatistics) cancel() {
|
func (s *ServerStatistics) cancel() {
|
||||||
s.blacklisted.Store(false)
|
s.blacklisted.Store(false)
|
||||||
s.backoffUntil.Store(time.Time{})
|
s.backoffUntil.Store(time.Time{})
|
||||||
select {
|
|
||||||
case s.interrupt <- struct{}{}:
|
s.ClearBackoff()
|
||||||
default:
|
}
|
||||||
}
|
|
||||||
|
// AssignBackoffNotifier configures the channel to send to when
|
||||||
|
// a backoff completes.
|
||||||
|
func (s *ServerStatistics) AssignBackoffNotifier(notifier func()) {
|
||||||
|
s.notifierMutex.Lock()
|
||||||
|
defer s.notifierMutex.Unlock()
|
||||||
|
s.backoffNotifier = notifier
|
||||||
}
|
}
|
||||||
|
|
||||||
// Success updates the server statistics with a new successful
|
// Success updates the server statistics with a new successful
|
||||||
|
|
@ -95,8 +120,8 @@ func (s *ServerStatistics) cancel() {
|
||||||
// we will unblacklist it.
|
// we will unblacklist it.
|
||||||
func (s *ServerStatistics) Success() {
|
func (s *ServerStatistics) Success() {
|
||||||
s.cancel()
|
s.cancel()
|
||||||
s.successCounter.Inc()
|
|
||||||
s.backoffCount.Store(0)
|
s.backoffCount.Store(0)
|
||||||
|
s.successCounter.Inc()
|
||||||
if s.statistics.DB != nil {
|
if s.statistics.DB != nil {
|
||||||
if err := s.statistics.DB.RemoveServerFromBlacklist(s.serverName); err != nil {
|
if err := s.statistics.DB.RemoveServerFromBlacklist(s.serverName); err != nil {
|
||||||
logrus.WithError(err).Errorf("Failed to remove %q from blacklist", s.serverName)
|
logrus.WithError(err).Errorf("Failed to remove %q from blacklist", s.serverName)
|
||||||
|
|
@ -105,13 +130,17 @@ func (s *ServerStatistics) Success() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Failure marks a failure and starts backing off if needed.
|
// Failure marks a failure and starts backing off if needed.
|
||||||
// The next call to BackoffIfRequired will do the right thing
|
// It will return the time that the current failure
|
||||||
// after this. It will return the time that the current failure
|
|
||||||
// will result in backoff waiting until, and a bool signalling
|
// will result in backoff waiting until, and a bool signalling
|
||||||
// whether we have blacklisted and therefore to give up.
|
// whether we have blacklisted and therefore to give up.
|
||||||
func (s *ServerStatistics) Failure() (time.Time, bool) {
|
func (s *ServerStatistics) Failure() (time.Time, bool) {
|
||||||
|
// Return immediately if we have blacklisted this node.
|
||||||
|
if s.blacklisted.Load() {
|
||||||
|
return time.Time{}, true
|
||||||
|
}
|
||||||
|
|
||||||
// If we aren't already backing off, this call will start
|
// If we aren't already backing off, this call will start
|
||||||
// a new backoff period. Increase the failure counter and
|
// a new backoff period, increase the failure counter and
|
||||||
// start a goroutine which will wait out the backoff and
|
// start a goroutine which will wait out the backoff and
|
||||||
// unset the backoffStarted flag when done.
|
// unset the backoffStarted flag when done.
|
||||||
if s.backoffStarted.CompareAndSwap(false, true) {
|
if s.backoffStarted.CompareAndSwap(false, true) {
|
||||||
|
|
@ -122,40 +151,48 @@ func (s *ServerStatistics) Failure() (time.Time, bool) {
|
||||||
logrus.WithError(err).Errorf("Failed to add %q to blacklist", s.serverName)
|
logrus.WithError(err).Errorf("Failed to add %q to blacklist", s.serverName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
s.ClearBackoff()
|
||||||
return time.Time{}, true
|
return time.Time{}, true
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
// We're starting a new back off so work out what the next interval
|
||||||
until, ok := s.backoffUntil.Load().(time.Time)
|
// will be.
|
||||||
if ok && !until.IsZero() {
|
count := s.backoffCount.Load()
|
||||||
select {
|
until := time.Now().Add(s.duration(count))
|
||||||
case <-time.After(time.Until(until)):
|
s.backoffUntil.Store(until)
|
||||||
case <-s.interrupt:
|
|
||||||
}
|
s.statistics.backoffMutex.Lock()
|
||||||
s.backoffStarted.Store(false)
|
defer s.statistics.backoffMutex.Unlock()
|
||||||
}
|
s.statistics.backoffTimers[s.serverName] = time.AfterFunc(time.Until(until), s.backoffFinished)
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we have blacklisted this node.
|
return s.backoffUntil.Load().(time.Time), false
|
||||||
if s.blacklisted.Load() {
|
}
|
||||||
return time.Now(), true
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're already backing off and we haven't yet surpassed
|
// ClearBackoff stops the backoff timer for this destination if it is running
|
||||||
// the deadline then return that. Repeated calls to Failure
|
// and removes the timer from the backoffTimers map.
|
||||||
// within a single backoff interval will have no side effects.
|
func (s *ServerStatistics) ClearBackoff() {
|
||||||
if until, ok := s.backoffUntil.Load().(time.Time); ok && !time.Now().After(until) {
|
// If the timer is still running then stop it so it's memory is cleaned up sooner.
|
||||||
return until, false
|
s.statistics.backoffMutex.Lock()
|
||||||
|
defer s.statistics.backoffMutex.Unlock()
|
||||||
|
if timer, ok := s.statistics.backoffTimers[s.serverName]; ok {
|
||||||
|
timer.Stop()
|
||||||
}
|
}
|
||||||
|
delete(s.statistics.backoffTimers, s.serverName)
|
||||||
|
|
||||||
// We're either backing off and have passed the deadline, or
|
s.backoffStarted.Store(false)
|
||||||
// we aren't backing off, so work out what the next interval
|
}
|
||||||
// will be.
|
|
||||||
count := s.backoffCount.Load()
|
// backoffFinished will clear the previous backoff and notify the destination queue.
|
||||||
until := time.Now().Add(s.duration(count))
|
func (s *ServerStatistics) backoffFinished() {
|
||||||
s.backoffUntil.Store(until)
|
s.ClearBackoff()
|
||||||
return until, false
|
|
||||||
|
// Notify the destinationQueue if one is currently running.
|
||||||
|
s.notifierMutex.Lock()
|
||||||
|
defer s.notifierMutex.Unlock()
|
||||||
|
if s.backoffNotifier != nil {
|
||||||
|
s.backoffNotifier()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BackoffInfo returns information about the current or previous backoff.
|
// BackoffInfo returns information about the current or previous backoff.
|
||||||
|
|
@ -174,6 +211,12 @@ func (s *ServerStatistics) Blacklisted() bool {
|
||||||
return s.blacklisted.Load()
|
return s.blacklisted.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RemoveBlacklist removes the blacklisted status from the server.
|
||||||
|
func (s *ServerStatistics) RemoveBlacklist() {
|
||||||
|
s.cancel()
|
||||||
|
s.backoffCount.Store(0)
|
||||||
|
}
|
||||||
|
|
||||||
// SuccessCount returns the number of successful requests. This is
|
// SuccessCount returns the number of successful requests. This is
|
||||||
// usually useful in constructing transaction IDs.
|
// usually useful in constructing transaction IDs.
|
||||||
func (s *ServerStatistics) SuccessCount() uint32 {
|
func (s *ServerStatistics) SuccessCount() uint32 {
|
||||||
|
|
|
||||||
|
|
@ -7,9 +7,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBackoff(t *testing.T) {
|
func TestBackoff(t *testing.T) {
|
||||||
stats := Statistics{
|
stats := NewStatistics(nil, 7)
|
||||||
FailuresUntilBlacklist: 7,
|
|
||||||
}
|
|
||||||
server := ServerStatistics{
|
server := ServerStatistics{
|
||||||
statistics: &stats,
|
statistics: &stats,
|
||||||
serverName: "test.com",
|
serverName: "test.com",
|
||||||
|
|
@ -36,7 +34,7 @@ func TestBackoff(t *testing.T) {
|
||||||
|
|
||||||
// Get the duration.
|
// Get the duration.
|
||||||
_, blacklist := server.BackoffInfo()
|
_, blacklist := server.BackoffInfo()
|
||||||
duration := time.Until(until).Round(time.Second)
|
duration := time.Until(until)
|
||||||
|
|
||||||
// Unset the backoff, or otherwise our next call will think that
|
// Unset the backoff, or otherwise our next call will think that
|
||||||
// there's a backoff in progress and return the same result.
|
// there's a backoff in progress and return the same result.
|
||||||
|
|
@ -57,8 +55,17 @@ func TestBackoff(t *testing.T) {
|
||||||
|
|
||||||
// Check if the duration is what we expect.
|
// Check if the duration is what we expect.
|
||||||
t.Logf("Backoff %d is for %s", i, duration)
|
t.Logf("Backoff %d is for %s", i, duration)
|
||||||
if wanted := time.Second * time.Duration(math.Exp2(float64(i))); !blacklist && duration != wanted {
|
roundingAllowance := 0.01
|
||||||
t.Fatalf("Backoff %d should have been %s but was %s", i, wanted, duration)
|
minDuration := time.Millisecond * time.Duration(math.Exp2(float64(i))*minJitterMultiplier*1000-roundingAllowance)
|
||||||
|
maxDuration := time.Millisecond * time.Duration(math.Exp2(float64(i))*maxJitterMultiplier*1000+roundingAllowance)
|
||||||
|
var inJitterRange bool
|
||||||
|
if duration >= minDuration && duration <= maxDuration {
|
||||||
|
inJitterRange = true
|
||||||
|
} else {
|
||||||
|
inJitterRange = false
|
||||||
|
}
|
||||||
|
if !blacklist && !inJitterRange {
|
||||||
|
t.Fatalf("Backoff %d should have been between %s and %s but was %s", i, minDuration, maxDuration, duration)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -52,6 +52,10 @@ type Receipt struct {
|
||||||
nid int64
|
nid int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewReceipt(nid int64) Receipt {
|
||||||
|
return Receipt{nid: nid}
|
||||||
|
}
|
||||||
|
|
||||||
func (r *Receipt) String() string {
|
func (r *Receipt) String() string {
|
||||||
return fmt.Sprintf("%d", r.nid)
|
return fmt.Sprintf("%d", r.nid)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
59
go.mod
59
go.mod
|
|
@ -8,26 +8,26 @@ require (
|
||||||
github.com/Masterminds/semver/v3 v3.1.1
|
github.com/Masterminds/semver/v3 v3.1.1
|
||||||
github.com/blevesearch/bleve/v2 v2.3.4
|
github.com/blevesearch/bleve/v2 v2.3.4
|
||||||
github.com/codeclysm/extract v2.2.0+incompatible
|
github.com/codeclysm/extract v2.2.0+incompatible
|
||||||
github.com/dgraph-io/ristretto v0.1.1-0.20220403145359-8e850b710d6d
|
github.com/dgraph-io/ristretto v0.1.1
|
||||||
github.com/docker/docker v20.10.18+incompatible
|
github.com/docker/docker v20.10.19+incompatible
|
||||||
github.com/docker/go-connections v0.4.0
|
github.com/docker/go-connections v0.4.0
|
||||||
github.com/getsentry/sentry-go v0.13.0
|
github.com/getsentry/sentry-go v0.14.0
|
||||||
github.com/gologme/log v1.3.0
|
github.com/gologme/log v1.3.0
|
||||||
github.com/google/go-cmp v0.5.9
|
github.com/google/go-cmp v0.5.9
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/gorilla/mux v1.8.0
|
github.com/gorilla/mux v1.8.0
|
||||||
github.com/gorilla/websocket v1.5.0
|
github.com/gorilla/websocket v1.5.0
|
||||||
github.com/kardianos/minwinsvc v1.0.0
|
github.com/kardianos/minwinsvc v1.0.2
|
||||||
github.com/lib/pq v1.10.7
|
github.com/lib/pq v1.10.7
|
||||||
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e
|
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e
|
||||||
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91
|
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91
|
||||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
|
github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530
|
||||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20221011115330-49fa704b9a64
|
github.com/matrix-org/gomatrixserverlib v0.0.0-20221021091412-7c772f1b388a
|
||||||
github.com/matrix-org/pinecone v0.0.0-20220929155234-2ce51dd4a42c
|
github.com/matrix-org/pinecone v0.0.0-20221007145426-3adc85477dd3
|
||||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
|
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
|
||||||
github.com/mattn/go-sqlite3 v1.14.15
|
github.com/mattn/go-sqlite3 v1.14.15
|
||||||
github.com/nats-io/nats-server/v2 v2.9.2
|
github.com/nats-io/nats-server/v2 v2.9.3
|
||||||
github.com/nats-io/nats.go v1.17.0
|
github.com/nats-io/nats.go v1.18.0
|
||||||
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9
|
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9
|
||||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
|
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
|
||||||
github.com/ngrok/sqlmw v0.0.0-20220520173518-97c9c04efc79
|
github.com/ngrok/sqlmw v0.0.0-20220520173518-97c9c04efc79
|
||||||
|
|
@ -43,25 +43,25 @@ require (
|
||||||
github.com/uber/jaeger-lib v2.4.1+incompatible
|
github.com/uber/jaeger-lib v2.4.1+incompatible
|
||||||
github.com/yggdrasil-network/yggdrasil-go v0.4.5-0.20220901155642-4f2abece817c
|
github.com/yggdrasil-network/yggdrasil-go v0.4.5-0.20220901155642-4f2abece817c
|
||||||
go.uber.org/atomic v1.10.0
|
go.uber.org/atomic v1.10.0
|
||||||
golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be
|
golang.org/x/crypto v0.0.0-20221012134737-56aed061732a
|
||||||
golang.org/x/image v0.0.0-20220902085622-e7cb96979f69
|
golang.org/x/image v0.0.0-20220902085622-e7cb96979f69
|
||||||
golang.org/x/mobile v0.0.0-20220722155234-aaac322e2105
|
golang.org/x/mobile v0.0.0-20221012134814-c746ac228303
|
||||||
golang.org/x/net v0.0.0-20220919232410-f2f64ebce3c1
|
golang.org/x/net v0.0.0-20221014081412-f15817d10f9b
|
||||||
golang.org/x/term v0.0.0-20220919170432-7a66f970e087
|
golang.org/x/term v0.0.0-20220919170432-7a66f970e087
|
||||||
gopkg.in/h2non/bimg.v1 v1.1.9
|
gopkg.in/h2non/bimg.v1 v1.1.9
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
|
gotest.tools/v3 v3.4.0
|
||||||
nhooyr.io/websocket v1.8.7
|
nhooyr.io/websocket v1.8.7
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
|
||||||
github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect
|
github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect
|
||||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||||
github.com/RoaringBitmap/roaring v1.2.1 // indirect
|
github.com/RoaringBitmap/roaring v1.2.1 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/bits-and-blooms/bitset v1.3.3 // indirect
|
github.com/bits-and-blooms/bitset v1.3.3 // indirect
|
||||||
github.com/blevesearch/bleve_index_api v1.0.3 // indirect
|
github.com/blevesearch/bleve_index_api v1.0.3 // indirect
|
||||||
github.com/blevesearch/geo v0.1.14 // indirect
|
github.com/blevesearch/geo v0.1.15 // indirect
|
||||||
github.com/blevesearch/go-porterstemmer v1.0.3 // indirect
|
github.com/blevesearch/go-porterstemmer v1.0.3 // indirect
|
||||||
github.com/blevesearch/gtreap v0.1.1 // indirect
|
github.com/blevesearch/gtreap v0.1.1 // indirect
|
||||||
github.com/blevesearch/mmap-go v1.0.4 // indirect
|
github.com/blevesearch/mmap-go v1.0.4 // indirect
|
||||||
|
|
@ -69,7 +69,7 @@ require (
|
||||||
github.com/blevesearch/segment v0.9.0 // indirect
|
github.com/blevesearch/segment v0.9.0 // indirect
|
||||||
github.com/blevesearch/snowballstem v0.9.0 // indirect
|
github.com/blevesearch/snowballstem v0.9.0 // indirect
|
||||||
github.com/blevesearch/upsidedown_store_api v1.0.1 // indirect
|
github.com/blevesearch/upsidedown_store_api v1.0.1 // indirect
|
||||||
github.com/blevesearch/vellum v1.0.8 // indirect
|
github.com/blevesearch/vellum v1.0.9 // indirect
|
||||||
github.com/blevesearch/zapx/v11 v11.3.5 // indirect
|
github.com/blevesearch/zapx/v11 v11.3.5 // indirect
|
||||||
github.com/blevesearch/zapx/v12 v12.3.5 // indirect
|
github.com/blevesearch/zapx/v12 v12.3.5 // indirect
|
||||||
github.com/blevesearch/zapx/v13 v13.3.5 // indirect
|
github.com/blevesearch/zapx/v13 v13.3.5 // indirect
|
||||||
|
|
@ -80,7 +80,7 @@ require (
|
||||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||||
github.com/docker/go-units v0.5.0 // indirect
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
|
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
|
||||||
|
|
@ -92,13 +92,13 @@ require (
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/juju/errors v1.0.0 // indirect
|
github.com/juju/errors v1.0.0 // indirect
|
||||||
github.com/klauspost/compress v1.15.11 // indirect
|
github.com/klauspost/compress v1.15.11 // indirect
|
||||||
github.com/kr/pretty v0.3.0 // indirect
|
github.com/kr/pretty v0.3.1 // indirect
|
||||||
github.com/lucas-clemente/quic-go v0.29.0 // indirect
|
github.com/lucas-clemente/quic-go v0.29.2 // indirect
|
||||||
github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect
|
github.com/marten-seemann/qtls-go1-18 v0.1.3 // indirect
|
||||||
github.com/marten-seemann/qtls-go1-19 v0.1.0 // indirect
|
github.com/marten-seemann/qtls-go1-19 v0.1.1 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||||
github.com/minio/highwayhash v1.0.2 // indirect
|
github.com/minio/highwayhash v1.0.2 // indirect
|
||||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 // indirect
|
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
|
|
@ -108,27 +108,26 @@ require (
|
||||||
github.com/nats-io/nuid v1.0.1 // indirect
|
github.com/nats-io/nuid v1.0.1 // indirect
|
||||||
github.com/nxadm/tail v1.4.8 // indirect
|
github.com/nxadm/tail v1.4.8 // indirect
|
||||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||||
github.com/onsi/gomega v1.17.0 // indirect
|
github.com/onsi/gomega v1.22.1 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
github.com/prometheus/common v0.37.0 // indirect
|
github.com/prometheus/common v0.37.0 // indirect
|
||||||
github.com/prometheus/procfs v0.8.0 // indirect
|
github.com/prometheus/procfs v0.8.0 // indirect
|
||||||
github.com/tidwall/match v1.1.1 // indirect
|
github.com/tidwall/match v1.1.1 // indirect
|
||||||
github.com/tidwall/pretty v1.2.0 // indirect
|
github.com/tidwall/pretty v1.2.1 // indirect
|
||||||
go.etcd.io/bbolt v1.3.6 // indirect
|
go.etcd.io/bbolt v1.3.6 // indirect
|
||||||
golang.org/x/exp v0.0.0-20220916125017-b168a2c6b86b // indirect
|
golang.org/x/exp v0.0.0-20221012211006-4de253d81b95 // indirect
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||||
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect
|
golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43 // indirect
|
||||||
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b // indirect
|
golang.org/x/text v0.3.8 // indirect
|
||||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
||||||
golang.org/x/tools v0.1.12 // indirect
|
golang.org/x/tools v0.1.12 // indirect
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
gopkg.in/macaroon.v2 v2.1.0 // indirect
|
gopkg.in/macaroon.v2 v2.1.0 // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
gotest.tools/v3 v3.0.3 // indirect
|
|
||||||
)
|
)
|
||||||
|
|
||||||
go 1.18
|
go 1.18
|
||||||
|
|
|
||||||
145
go.sum
145
go.sum
|
|
@ -42,7 +42,6 @@ github.com/Arceliar/ironwood v0.0.0-20220903132624-ee60c16bcfcf h1:kjPkmDHUTWUma
|
||||||
github.com/Arceliar/ironwood v0.0.0-20220903132624-ee60c16bcfcf/go.mod h1:RP72rucOFm5udrnEzTmIWLRVGQiV/fSUAQXJ0RST/nk=
|
github.com/Arceliar/ironwood v0.0.0-20220903132624-ee60c16bcfcf/go.mod h1:RP72rucOFm5udrnEzTmIWLRVGQiV/fSUAQXJ0RST/nk=
|
||||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979 h1:WndgpSW13S32VLQ3ugUxx2EnnWmgba1kCqPkd4Gk1yQ=
|
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979 h1:WndgpSW13S32VLQ3ugUxx2EnnWmgba1kCqPkd4Gk1yQ=
|
||||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979/go.mod h1:6Lkn+/zJilRMsKmbmG1RPoamiArC6HS73xbwRyp3UyI=
|
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979/go.mod h1:6Lkn+/zJilRMsKmbmG1RPoamiArC6HS73xbwRyp3UyI=
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
|
@ -55,8 +54,8 @@ github.com/MFAshby/stdemuxerhook v1.0.0 h1:1XFGzakrsHMv76AeanPDL26NOgwjPl/OUxbGh
|
||||||
github.com/MFAshby/stdemuxerhook v1.0.0/go.mod h1:nLMI9FUf9Hz98n+yAXsTMUR4RZQy28uCTLG1Fzvj/uY=
|
github.com/MFAshby/stdemuxerhook v1.0.0/go.mod h1:nLMI9FUf9Hz98n+yAXsTMUR4RZQy28uCTLG1Fzvj/uY=
|
||||||
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
||||||
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||||
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
|
||||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
|
||||||
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
|
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
|
||||||
github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA=
|
github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA=
|
||||||
github.com/RoaringBitmap/roaring v1.2.1 h1:58/LJlg/81wfEHd5L9qsHduznOIhyv4qb1yWcSvVq9A=
|
github.com/RoaringBitmap/roaring v1.2.1 h1:58/LJlg/81wfEHd5L9qsHduznOIhyv4qb1yWcSvVq9A=
|
||||||
|
|
@ -95,8 +94,8 @@ github.com/blevesearch/bleve/v2 v2.3.4/go.mod h1:Ot0zYum8XQRfPcwhae8bZmNyYubynso
|
||||||
github.com/blevesearch/bleve_index_api v1.0.3 h1:DDSWaPXOZZJ2BB73ZTWjKxydAugjwywcqU+91AAqcAg=
|
github.com/blevesearch/bleve_index_api v1.0.3 h1:DDSWaPXOZZJ2BB73ZTWjKxydAugjwywcqU+91AAqcAg=
|
||||||
github.com/blevesearch/bleve_index_api v1.0.3/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4=
|
github.com/blevesearch/bleve_index_api v1.0.3/go.mod h1:fiwKS0xLEm+gBRgv5mumf0dhgFr2mDgZah1pqv1c1M4=
|
||||||
github.com/blevesearch/geo v0.1.13/go.mod h1:cRIvqCdk3cgMhGeHNNe6yPzb+w56otxbfo1FBJfR2Pc=
|
github.com/blevesearch/geo v0.1.13/go.mod h1:cRIvqCdk3cgMhGeHNNe6yPzb+w56otxbfo1FBJfR2Pc=
|
||||||
github.com/blevesearch/geo v0.1.14 h1:TTDpJN6l9ck/cUYbXSn4aCElNls0Whe44rcQKsB7EfU=
|
github.com/blevesearch/geo v0.1.15 h1:0NybEduqE5fduFRYiUKF0uqybAIFKXYjkBdXKYn7oA4=
|
||||||
github.com/blevesearch/geo v0.1.14/go.mod h1:cRIvqCdk3cgMhGeHNNe6yPzb+w56otxbfo1FBJfR2Pc=
|
github.com/blevesearch/geo v0.1.15/go.mod h1:cRIvqCdk3cgMhGeHNNe6yPzb+w56otxbfo1FBJfR2Pc=
|
||||||
github.com/blevesearch/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:9eJDeqxJ3E7WnLebQUlPD7ZjSce7AnDb9vjGmMCbD0A=
|
github.com/blevesearch/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:9eJDeqxJ3E7WnLebQUlPD7ZjSce7AnDb9vjGmMCbD0A=
|
||||||
github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo=
|
github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo=
|
||||||
github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M=
|
github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M=
|
||||||
|
|
@ -115,8 +114,9 @@ github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD
|
||||||
github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs=
|
github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs=
|
||||||
github.com/blevesearch/upsidedown_store_api v1.0.1 h1:1SYRwyoFLwG3sj0ed89RLtM15amfX2pXlYbFOnF8zNU=
|
github.com/blevesearch/upsidedown_store_api v1.0.1 h1:1SYRwyoFLwG3sj0ed89RLtM15amfX2pXlYbFOnF8zNU=
|
||||||
github.com/blevesearch/upsidedown_store_api v1.0.1/go.mod h1:MQDVGpHZrpe3Uy26zJBf/a8h0FZY6xJbthIMm8myH2Q=
|
github.com/blevesearch/upsidedown_store_api v1.0.1/go.mod h1:MQDVGpHZrpe3Uy26zJBf/a8h0FZY6xJbthIMm8myH2Q=
|
||||||
github.com/blevesearch/vellum v1.0.8 h1:iMGh4lfxza4BnWO/UJTMPlI3HsK9YawjPv+TteVa9ck=
|
|
||||||
github.com/blevesearch/vellum v1.0.8/go.mod h1:+cpRi/tqq49xUYSQN2P7A5zNSNrS+MscLeeaZ3J46UA=
|
github.com/blevesearch/vellum v1.0.8/go.mod h1:+cpRi/tqq49xUYSQN2P7A5zNSNrS+MscLeeaZ3J46UA=
|
||||||
|
github.com/blevesearch/vellum v1.0.9 h1:PL+NWVk3dDGPCV0hoDu9XLLJgqU4E5s/dOeEJByQ2uQ=
|
||||||
|
github.com/blevesearch/vellum v1.0.9/go.mod h1:ul1oT0FhSMDIExNjIxHqJoGpVrBpKCdgDQNxfqgJt7k=
|
||||||
github.com/blevesearch/zapx/v11 v11.3.5 h1:eBQWQ7huA+mzm0sAGnZDwgGGli7S45EO+N+ObFWssbI=
|
github.com/blevesearch/zapx/v11 v11.3.5 h1:eBQWQ7huA+mzm0sAGnZDwgGGli7S45EO+N+ObFWssbI=
|
||||||
github.com/blevesearch/zapx/v11 v11.3.5/go.mod h1:5UdIa/HRMdeRCiLQOyFESsnqBGiip7vQmYReA9toevU=
|
github.com/blevesearch/zapx/v11 v11.3.5/go.mod h1:5UdIa/HRMdeRCiLQOyFESsnqBGiip7vQmYReA9toevU=
|
||||||
github.com/blevesearch/zapx/v12 v12.3.5 h1:5pX2hU+R1aZihT7ac1dNWh1n4wqkIM9pZzWp0ANED9s=
|
github.com/blevesearch/zapx/v12 v12.3.5 h1:5pX2hU+R1aZihT7ac1dNWh1n4wqkIM9pZzWp0ANED9s=
|
||||||
|
|
@ -157,14 +157,14 @@ github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dgraph-io/ristretto v0.1.1-0.20220403145359-8e850b710d6d h1:Wrc3UKTS+cffkOx0xRGFC+ZesNuTfn0ThvEC72N0krk=
|
github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8=
|
||||||
github.com/dgraph-io/ristretto v0.1.1-0.20220403145359-8e850b710d6d/go.mod h1:RAy2GVV4sTWVlNMavv3xhLsk18rxhfhDnombTe6EF5c=
|
github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
|
||||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc=
|
github.com/docker/docker v20.10.19+incompatible h1:lzEmjivyNHFHMNAFLXORMBXyGIhw/UP4DvJwvyKYq64=
|
||||||
github.com/docker/docker v20.10.18+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v20.10.19+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
|
|
@ -184,20 +184,21 @@ github.com/frankban/quicktest v1.0.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBav
|
||||||
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
|
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
|
|
||||||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||||
github.com/getsentry/sentry-go v0.13.0 h1:20dgTiUSfxRB/EhMPtxcL9ZEbM1ZdR+W/7f7NWD+xWo=
|
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||||
github.com/getsentry/sentry-go v0.13.0/go.mod h1:EOsfu5ZdvKPfeHYV6pTVQnsjfp30+XA7//UooKNumH0=
|
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||||
|
github.com/getsentry/sentry-go v0.14.0 h1:rlOBkuFZRKKdUnKO+0U3JclRDQKlRu5vVQtkWSQvC70=
|
||||||
|
github.com/getsentry/sentry-go v0.14.0/go.mod h1:RZPJKSw+adu8PBNygiri/A98FqVr2HtRckJk9XVxJ9I=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||||
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
|
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
|
||||||
github.com/gin-gonic/gin v1.7.7 h1:3DoBmSbJbZAWqXJC3SLjAPfutPJJRN1U5pALB7EeTTs=
|
github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8=
|
||||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||||
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||||
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
|
|
||||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||||
|
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
|
|
@ -210,12 +211,12 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
|
||||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||||
github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
|
|
||||||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||||
github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
|
github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU=
|
||||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||||
|
github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
|
||||||
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
|
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
|
||||||
github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE=
|
github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||||
|
|
@ -225,6 +226,7 @@ github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8=
|
||||||
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||||
github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=
|
github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=
|
||||||
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
|
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
|
||||||
|
github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
|
|
@ -341,8 +343,8 @@ github.com/juju/errors v1.0.0/go.mod h1:B5x9thDqx0wIMH3+aLIMP9HjItInYWObRovoCFM5
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||||
github.com/kardianos/minwinsvc v1.0.0 h1:+JfAi8IBJna0jY2dJGZqi7o15z13JelFIklJCAENALA=
|
github.com/kardianos/minwinsvc v1.0.2 h1:JmZKFJQrmTGa/WiW+vkJXKmfzdjabuEW4Tirj5lLdR0=
|
||||||
github.com/kardianos/minwinsvc v1.0.0/go.mod h1:Bgd0oc+D0Qo3bBytmNtyRKVlp85dAloLKhfxanPFFRc=
|
github.com/kardianos/minwinsvc v1.0.2/go.mod h1:LUZNYhNmxujx2tR7FbdxqYJ9XDDoCd3MQcl1o//FWl4=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
|
|
@ -353,59 +355,60 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
|
|
||||||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||||
|
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||||
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
|
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
|
||||||
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0=
|
github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0=
|
||||||
github.com/lucas-clemente/quic-go v0.29.0 h1:Vw0mGTfmWqGzh4jx/kMymsIkFK6rErFVmg+t9RLrnZE=
|
github.com/lucas-clemente/quic-go v0.29.2 h1:O8Mt0O6LpvEW+wfC40vZdcw0DngwYzoxq5xULZNzSI8=
|
||||||
github.com/lucas-clemente/quic-go v0.29.0/go.mod h1:CTcNfLYJS2UuRNB+zcNlgvkjBhxX6Hm3WUxxAQx2mgE=
|
github.com/lucas-clemente/quic-go v0.29.2/go.mod h1:g6/h9YMmLuU54tL1gW25uIi3VlBp3uv+sBihplIuskE=
|
||||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
|
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
|
||||||
github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk=
|
github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk=
|
||||||
github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s=
|
github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s=
|
||||||
github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM=
|
|
||||||
github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4=
|
github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4=
|
||||||
|
github.com/marten-seemann/qtls-go1-18 v0.1.3 h1:R4H2Ks8P6pAtUagjFty2p7BVHn3XiwDAl7TTQf5h7TI=
|
||||||
|
github.com/marten-seemann/qtls-go1-18 v0.1.3/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4=
|
||||||
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI=
|
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI=
|
||||||
github.com/marten-seemann/qtls-go1-19 v0.1.0 h1:rLFKD/9mp/uq1SYGYuVZhm83wkmU95pK5df3GufyYYU=
|
github.com/marten-seemann/qtls-go1-19 v0.1.1 h1:mnbxeq3oEyQxQXwI4ReCgW9DPoPR94sNlqWoDZnjRIE=
|
||||||
github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI=
|
github.com/marten-seemann/qtls-go1-19 v0.1.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI=
|
||||||
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e h1:DP5RC0Z3XdyBEW5dKt8YPeN6vZbm6OzVaGVp7f1BQRM=
|
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e h1:DP5RC0Z3XdyBEW5dKt8YPeN6vZbm6OzVaGVp7f1BQRM=
|
||||||
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e/go.mod h1:NgPCr+UavRGH6n5jmdX8DuqFZ4JiCWIJoZiuhTRLSUg=
|
github.com/matrix-org/dugong v0.0.0-20210921133753-66e6b1c67e2e/go.mod h1:NgPCr+UavRGH6n5jmdX8DuqFZ4JiCWIJoZiuhTRLSUg=
|
||||||
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91 h1:s7fexw2QV3YD/fRrzEDPNGgTlJlvXY0EHHnT87wF3OA=
|
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91 h1:s7fexw2QV3YD/fRrzEDPNGgTlJlvXY0EHHnT87wF3OA=
|
||||||
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91/go.mod h1:e+cg2q7C7yE5QnAXgzo512tgFh1RbQLC0+jozuegKgo=
|
github.com/matrix-org/go-sqlite3-js v0.0.0-20220419092513-28aa791a1c91/go.mod h1:e+cg2q7C7yE5QnAXgzo512tgFh1RbQLC0+jozuegKgo=
|
||||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 h1:ZtO5uywdd5dLDCud4r0r55eP4j9FuUNpl60Gmntcop4=
|
github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530 h1:kHKxCOLcHH8r4Fzarl4+Y3K5hjothkVW5z7T1dUM11U=
|
||||||
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
|
github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
|
||||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20221011115330-49fa704b9a64 h1:QJmfAPC3P0ZHJzYD/QtbNc5EztKlK1ipRWP5SO/m4jw=
|
github.com/matrix-org/gomatrixserverlib v0.0.0-20221021091412-7c772f1b388a h1:6rJFN5NBuzZ7h5meYkLtXKa6VFZfDc8oVXHd4SDXr5o=
|
||||||
github.com/matrix-org/gomatrixserverlib v0.0.0-20221011115330-49fa704b9a64/go.mod h1:Mtifyr8q8htcBeugvlDnkBcNUy5LO8OzUoplAf1+mb4=
|
github.com/matrix-org/gomatrixserverlib v0.0.0-20221021091412-7c772f1b388a/go.mod h1:Mtifyr8q8htcBeugvlDnkBcNUy5LO8OzUoplAf1+mb4=
|
||||||
github.com/matrix-org/pinecone v0.0.0-20220929155234-2ce51dd4a42c h1:iCHLYwwlPsf4TYFrvhKdhQoAM2lXzcmDZYqwBNWcnVk=
|
github.com/matrix-org/pinecone v0.0.0-20221007145426-3adc85477dd3 h1:lzkSQvBv8TuqKJCPoVwOVvEnARTlua5rrNy/Qw2Vxeo=
|
||||||
github.com/matrix-org/pinecone v0.0.0-20220929155234-2ce51dd4a42c/go.mod h1:K0N1ixHQxXoCyqolDqVxPM3ArrDtcMs8yegOx2Lfv9k=
|
github.com/matrix-org/pinecone v0.0.0-20221007145426-3adc85477dd3/go.mod h1:K0N1ixHQxXoCyqolDqVxPM3ArrDtcMs8yegOx2Lfv9k=
|
||||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 h1:eCEHXWDv9Rm335MSuB49mFUK44bwZPFSDde3ORE3syk=
|
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 h1:eCEHXWDv9Rm335MSuB49mFUK44bwZPFSDde3ORE3syk=
|
||||||
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
|
||||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||||
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
|
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||||
github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI=
|
github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI=
|
||||||
github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||||
github.com/mattomatic/dijkstra v0.0.0-20130617153013-6f6d134eb237/go.mod h1:UOnLAUmVG5paym8pD3C4B9BQylUDC2vXFJJpT7JrlEA=
|
github.com/mattomatic/dijkstra v0.0.0-20130617153013-6f6d134eb237/go.mod h1:UOnLAUmVG5paym8pD3C4B9BQylUDC2vXFJJpT7JrlEA=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
|
github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||||
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||||
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
|
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
|
||||||
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0=
|
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI=
|
||||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
|
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
|
@ -422,10 +425,10 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI=
|
github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI=
|
||||||
github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
|
github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
|
||||||
github.com/nats-io/nats-server/v2 v2.9.2 h1:XNDgJgOYYaYlquLdbSHI3xssLipfKUOq3EmYIMNCOsE=
|
github.com/nats-io/nats-server/v2 v2.9.3 h1:HrfzA7G9LNetKkm1z+jU/e9kuAe+E6uaBuuq9EB5sQQ=
|
||||||
github.com/nats-io/nats-server/v2 v2.9.2/go.mod h1:4sq8wvrpbvSzL1n3ZfEYnH4qeUuIl5W990j3kw13rRk=
|
github.com/nats-io/nats-server/v2 v2.9.3/go.mod h1:4sq8wvrpbvSzL1n3ZfEYnH4qeUuIl5W990j3kw13rRk=
|
||||||
github.com/nats-io/nats.go v1.17.0 h1:1jp5BThsdGlN91hW0k3YEfJbfACjiOYtUiLXG0RL4IE=
|
github.com/nats-io/nats.go v1.18.0 h1:o480Ao6kuSSFyJO75rGTXCEPj7LGkY84C1Ye+Uhm4c0=
|
||||||
github.com/nats-io/nats.go v1.17.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
|
github.com/nats-io/nats.go v1.18.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
|
||||||
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
|
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
|
||||||
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
|
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
|
||||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||||
|
|
@ -454,20 +457,23 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
|
||||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
|
github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
|
||||||
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
|
github.com/onsi/gomega v1.22.1 h1:pY8O4lBfsHKZHM/6nrxkhVPUznOlIu3quZcKP/M20KI=
|
||||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
|
||||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec=
|
github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034=
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
|
||||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||||
|
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
|
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
|
||||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||||
|
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
|
@ -503,8 +509,8 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
|
||||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||||
|
|
@ -533,7 +539,6 @@ github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5k
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
|
||||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
|
|
@ -565,8 +570,9 @@ github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
|
||||||
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||||
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
|
||||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||||
|
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
||||||
|
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||||
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
||||||
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
|
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
|
||||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||||
|
|
@ -577,8 +583,8 @@ github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6
|
||||||
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
|
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
|
||||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||||
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
|
|
||||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||||
|
github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0=
|
||||||
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||||
|
|
@ -625,8 +631,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be h1:fmw3UbQh+nxngCAHrDCCztao/kbYFnWjoqop8dHx05A=
|
golang.org/x/crypto v0.0.0-20221012134737-56aed061732a h1:NmSIgad6KjE6VvHciPZuNRTKxGhlPfD6OA87W/PLkqg=
|
||||||
golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
|
@ -641,8 +647,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||||
golang.org/x/exp v0.0.0-20220916125017-b168a2c6b86b h1:SCE/18RnFsLrjydh/R/s5EVvHoZprqEQUuoxK8q2Pc4=
|
golang.org/x/exp v0.0.0-20221012211006-4de253d81b95 h1:sBdrWpxhGDdTAYNqbgBLAR+ULAPPhfgncLr1X0lyWtg=
|
||||||
golang.org/x/exp v0.0.0-20220916125017-b168a2c6b86b/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
|
golang.org/x/exp v0.0.0-20221012211006-4de253d81b95/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
|
||||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
|
|
@ -661,8 +667,9 @@ golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPI
|
||||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||||
golang.org/x/mobile v0.0.0-20220722155234-aaac322e2105 h1:3vUV5x5+3LfQbgk7paCM6INOaJG9xXQbn79xoNkwfIk=
|
|
||||||
golang.org/x/mobile v0.0.0-20220722155234-aaac322e2105/go.mod h1:pe2sM7Uk+2Su1y7u/6Z8KJ24D7lepUjFZbhFOrmDfuQ=
|
golang.org/x/mobile v0.0.0-20220722155234-aaac322e2105/go.mod h1:pe2sM7Uk+2Su1y7u/6Z8KJ24D7lepUjFZbhFOrmDfuQ=
|
||||||
|
golang.org/x/mobile v0.0.0-20221012134814-c746ac228303 h1:K4fp1rDuJBz0FCPAWzIJwnzwNEM7S6yobdZzMrZ/Zws=
|
||||||
|
golang.org/x/mobile v0.0.0-20221012134814-c746ac228303/go.mod h1:M32cGdzp91A8Ex9qQtyZinr19EYxzkFqDjW2oyHzTDQ=
|
||||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
|
@ -717,8 +724,8 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
||||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.0.0-20220728211354-c7608f3a8462/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
golang.org/x/net v0.0.0-20220728211354-c7608f3a8462/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||||
golang.org/x/net v0.0.0-20220919232410-f2f64ebce3c1 h1:TWZxd/th7FbRSMret2MVQdlI8uT49QEtwZdvJrxjEHU=
|
golang.org/x/net v0.0.0-20221014081412-f15817d10f9b h1:tvrvnPFcdzp294diPnrdZZZ8XUt2Tyj7svb7X52iDuU=
|
||||||
golang.org/x/net v0.0.0-20220919232410-f2f64ebce3c1/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
|
@ -741,6 +748,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
|
@ -763,7 +771,6 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
|
@ -783,15 +790,14 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
|
@ -810,8 +816,10 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220730100132-1609e554cd39/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220730100132-1609e554cd39/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec h1:BkDtF2Ih9xZ7le9ndzTA7KJow28VbQW3odyk/8drmuI=
|
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43 h1:OK7RB6t2WQX54srQQYSXMW8dF5C6/8+oA/s5QBmmto4=
|
||||||
|
golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.0.0-20220919170432-7a66f970e087 h1:tPwmk4vmvVCMdr98VgL4JH+qZxPL8fqlUOHnyOM8N3w=
|
golang.org/x/term v0.0.0-20220919170432-7a66f970e087 h1:tPwmk4vmvVCMdr98VgL4JH+qZxPL8fqlUOHnyOM8N3w=
|
||||||
|
|
@ -823,8 +831,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b h1:NXqSWXSRUSCaFuvitrWtU169I3876zRTalMRbfd6LL0=
|
golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=
|
||||||
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0=
|
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
|
@ -879,6 +887,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
|
||||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.8-0.20211022200916-316ba0b74098/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
golang.org/x/tools v0.1.8-0.20211022200916-316ba0b74098/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
||||||
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
||||||
|
|
@ -1013,8 +1022,8 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
|
||||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
|
||||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,8 @@
|
||||||
package transactions
|
package transactions
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/url"
|
||||||
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
|
@ -29,6 +31,7 @@ type txnsMap map[CacheKey]*util.JSONResponse
|
||||||
type CacheKey struct {
|
type CacheKey struct {
|
||||||
AccessToken string
|
AccessToken string
|
||||||
TxnID string
|
TxnID string
|
||||||
|
Endpoint string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cache represents a temporary store for response entries.
|
// Cache represents a temporary store for response entries.
|
||||||
|
|
@ -57,14 +60,14 @@ func NewWithCleanupPeriod(cleanupPeriod time.Duration) *Cache {
|
||||||
return &t
|
return &t
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchTransaction looks up an entry for the (accessToken, txnID) tuple in Cache.
|
// FetchTransaction looks up an entry for the (accessToken, txnID, req.URL) tuple in Cache.
|
||||||
// Looks in both the txnMaps.
|
// Looks in both the txnMaps.
|
||||||
// Returns (JSON response, true) if txnID is found, else the returned bool is false.
|
// Returns (JSON response, true) if txnID is found, else the returned bool is false.
|
||||||
func (t *Cache) FetchTransaction(accessToken, txnID string) (*util.JSONResponse, bool) {
|
func (t *Cache) FetchTransaction(accessToken, txnID string, u *url.URL) (*util.JSONResponse, bool) {
|
||||||
t.RLock()
|
t.RLock()
|
||||||
defer t.RUnlock()
|
defer t.RUnlock()
|
||||||
for _, txns := range t.txnsMaps {
|
for _, txns := range t.txnsMaps {
|
||||||
res, ok := txns[CacheKey{accessToken, txnID}]
|
res, ok := txns[CacheKey{accessToken, txnID, filepath.Dir(u.Path)}]
|
||||||
if ok {
|
if ok {
|
||||||
return res, true
|
return res, true
|
||||||
}
|
}
|
||||||
|
|
@ -72,13 +75,12 @@ func (t *Cache) FetchTransaction(accessToken, txnID string) (*util.JSONResponse,
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddTransaction adds an entry for the (accessToken, txnID) tuple in Cache.
|
// AddTransaction adds an entry for the (accessToken, txnID, req.URL) tuple in Cache.
|
||||||
// Adds to the front txnMap.
|
// Adds to the front txnMap.
|
||||||
func (t *Cache) AddTransaction(accessToken, txnID string, res *util.JSONResponse) {
|
func (t *Cache) AddTransaction(accessToken, txnID string, u *url.URL, res *util.JSONResponse) {
|
||||||
t.Lock()
|
t.Lock()
|
||||||
defer t.Unlock()
|
defer t.Unlock()
|
||||||
|
t.txnsMaps[0][CacheKey{accessToken, txnID, filepath.Dir(u.Path)}] = res
|
||||||
t.txnsMaps[0][CacheKey{accessToken, txnID}] = res
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// cacheCleanService is responsible for cleaning up entries after cleanupPeriod.
|
// cacheCleanService is responsible for cleaning up entries after cleanupPeriod.
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,9 @@ package transactions
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
|
@ -24,6 +27,16 @@ type fakeType struct {
|
||||||
ID string `json:"ID"`
|
ID string `json:"ID"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCompare(t *testing.T) {
|
||||||
|
u1, _ := url.Parse("/send/1?accessToken=123")
|
||||||
|
u2, _ := url.Parse("/send/1")
|
||||||
|
c1 := CacheKey{"1", "2", filepath.Dir(u1.Path)}
|
||||||
|
c2 := CacheKey{"1", "2", filepath.Dir(u2.Path)}
|
||||||
|
if !reflect.DeepEqual(c1, c2) {
|
||||||
|
t.Fatalf("Cache keys differ: %+v <> %+v", c1, c2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
fakeAccessToken = "aRandomAccessToken"
|
fakeAccessToken = "aRandomAccessToken"
|
||||||
fakeAccessToken2 = "anotherRandomAccessToken"
|
fakeAccessToken2 = "anotherRandomAccessToken"
|
||||||
|
|
@ -34,23 +47,28 @@ var (
|
||||||
fakeResponse2 = &util.JSONResponse{
|
fakeResponse2 = &util.JSONResponse{
|
||||||
Code: http.StatusOK, JSON: fakeType{ID: "1"},
|
Code: http.StatusOK, JSON: fakeType{ID: "1"},
|
||||||
}
|
}
|
||||||
|
fakeResponse3 = &util.JSONResponse{
|
||||||
|
Code: http.StatusOK, JSON: fakeType{ID: "2"},
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestCache creates a New Cache and tests AddTransaction & FetchTransaction
|
// TestCache creates a New Cache and tests AddTransaction & FetchTransaction
|
||||||
func TestCache(t *testing.T) {
|
func TestCache(t *testing.T) {
|
||||||
fakeTxnCache := New()
|
fakeTxnCache := New()
|
||||||
fakeTxnCache.AddTransaction(fakeAccessToken, fakeTxnID, fakeResponse)
|
u, _ := url.Parse("")
|
||||||
|
fakeTxnCache.AddTransaction(fakeAccessToken, fakeTxnID, u, fakeResponse)
|
||||||
|
|
||||||
// Add entries for noise.
|
// Add entries for noise.
|
||||||
for i := 1; i <= 100; i++ {
|
for i := 1; i <= 100; i++ {
|
||||||
fakeTxnCache.AddTransaction(
|
fakeTxnCache.AddTransaction(
|
||||||
fakeAccessToken,
|
fakeAccessToken,
|
||||||
fakeTxnID+strconv.Itoa(i),
|
fakeTxnID+strconv.Itoa(i),
|
||||||
|
u,
|
||||||
&util.JSONResponse{Code: http.StatusOK, JSON: fakeType{ID: strconv.Itoa(i)}},
|
&util.JSONResponse{Code: http.StatusOK, JSON: fakeType{ID: strconv.Itoa(i)}},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
testResponse, ok := fakeTxnCache.FetchTransaction(fakeAccessToken, fakeTxnID)
|
testResponse, ok := fakeTxnCache.FetchTransaction(fakeAccessToken, fakeTxnID, u)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Error("Failed to retrieve entry for txnID: ", fakeTxnID)
|
t.Error("Failed to retrieve entry for txnID: ", fakeTxnID)
|
||||||
} else if testResponse.JSON != fakeResponse.JSON {
|
} else if testResponse.JSON != fakeResponse.JSON {
|
||||||
|
|
@ -59,20 +77,30 @@ func TestCache(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestCacheScope ensures transactions with the same transaction ID are not shared
|
// TestCacheScope ensures transactions with the same transaction ID are not shared
|
||||||
// across multiple access tokens.
|
// across multiple access tokens and endpoints.
|
||||||
func TestCacheScope(t *testing.T) {
|
func TestCacheScope(t *testing.T) {
|
||||||
cache := New()
|
cache := New()
|
||||||
cache.AddTransaction(fakeAccessToken, fakeTxnID, fakeResponse)
|
sendEndpoint, _ := url.Parse("/send/1?accessToken=test")
|
||||||
cache.AddTransaction(fakeAccessToken2, fakeTxnID, fakeResponse2)
|
sendToDeviceEndpoint, _ := url.Parse("/sendToDevice/1")
|
||||||
|
cache.AddTransaction(fakeAccessToken, fakeTxnID, sendEndpoint, fakeResponse)
|
||||||
|
cache.AddTransaction(fakeAccessToken2, fakeTxnID, sendEndpoint, fakeResponse2)
|
||||||
|
cache.AddTransaction(fakeAccessToken2, fakeTxnID, sendToDeviceEndpoint, fakeResponse3)
|
||||||
|
|
||||||
if res, ok := cache.FetchTransaction(fakeAccessToken, fakeTxnID); !ok {
|
if res, ok := cache.FetchTransaction(fakeAccessToken, fakeTxnID, sendEndpoint); !ok {
|
||||||
t.Errorf("failed to retrieve entry for (%s, %s)", fakeAccessToken, fakeTxnID)
|
t.Errorf("failed to retrieve entry for (%s, %s)", fakeAccessToken, fakeTxnID)
|
||||||
} else if res.JSON != fakeResponse.JSON {
|
} else if res.JSON != fakeResponse.JSON {
|
||||||
t.Errorf("Wrong cache entry for (%s, %s). Expected: %v; got: %v", fakeAccessToken, fakeTxnID, fakeResponse.JSON, res.JSON)
|
t.Errorf("Wrong cache entry for (%s, %s). Expected: %v; got: %v", fakeAccessToken, fakeTxnID, fakeResponse.JSON, res.JSON)
|
||||||
}
|
}
|
||||||
if res, ok := cache.FetchTransaction(fakeAccessToken2, fakeTxnID); !ok {
|
if res, ok := cache.FetchTransaction(fakeAccessToken2, fakeTxnID, sendEndpoint); !ok {
|
||||||
t.Errorf("failed to retrieve entry for (%s, %s)", fakeAccessToken, fakeTxnID)
|
t.Errorf("failed to retrieve entry for (%s, %s)", fakeAccessToken, fakeTxnID)
|
||||||
} else if res.JSON != fakeResponse2.JSON {
|
} else if res.JSON != fakeResponse2.JSON {
|
||||||
t.Errorf("Wrong cache entry for (%s, %s). Expected: %v; got: %v", fakeAccessToken, fakeTxnID, fakeResponse2.JSON, res.JSON)
|
t.Errorf("Wrong cache entry for (%s, %s). Expected: %v; got: %v", fakeAccessToken, fakeTxnID, fakeResponse2.JSON, res.JSON)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure the txnID is not shared across endpoints
|
||||||
|
if res, ok := cache.FetchTransaction(fakeAccessToken2, fakeTxnID, sendToDeviceEndpoint); !ok {
|
||||||
|
t.Errorf("failed to retrieve entry for (%s, %s)", fakeAccessToken, fakeTxnID)
|
||||||
|
} else if res.JSON != fakeResponse3.JSON {
|
||||||
|
t.Errorf("Wrong cache entry for (%s, %s). Expected: %v; got: %v", fakeAccessToken, fakeTxnID, fakeResponse2.JSON, res.JSON)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ var build string
|
||||||
const (
|
const (
|
||||||
VersionMajor = 0
|
VersionMajor = 0
|
||||||
VersionMinor = 10
|
VersionMinor = 10
|
||||||
VersionPatch = 2
|
VersionPatch = 3
|
||||||
VersionTag = "" // example: "rc1"
|
VersionTag = "" // example: "rc1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -250,6 +250,7 @@ func (a *KeyInternalAPI) PerformMarkAsStaleIfNeeded(ctx context.Context, req *ap
|
||||||
|
|
||||||
// nolint:gocyclo
|
// nolint:gocyclo
|
||||||
func (a *KeyInternalAPI) QueryKeys(ctx context.Context, req *api.QueryKeysRequest, res *api.QueryKeysResponse) error {
|
func (a *KeyInternalAPI) QueryKeys(ctx context.Context, req *api.QueryKeysRequest, res *api.QueryKeysResponse) error {
|
||||||
|
var respMu sync.Mutex
|
||||||
res.DeviceKeys = make(map[string]map[string]json.RawMessage)
|
res.DeviceKeys = make(map[string]map[string]json.RawMessage)
|
||||||
res.MasterKeys = make(map[string]gomatrixserverlib.CrossSigningKey)
|
res.MasterKeys = make(map[string]gomatrixserverlib.CrossSigningKey)
|
||||||
res.SelfSigningKeys = make(map[string]gomatrixserverlib.CrossSigningKey)
|
res.SelfSigningKeys = make(map[string]gomatrixserverlib.CrossSigningKey)
|
||||||
|
|
@ -329,7 +330,7 @@ func (a *KeyInternalAPI) QueryKeys(ctx context.Context, req *api.QueryKeysReques
|
||||||
}
|
}
|
||||||
|
|
||||||
// attempt to satisfy key queries from the local database first as we should get device updates pushed to us
|
// attempt to satisfy key queries from the local database first as we should get device updates pushed to us
|
||||||
domainToDeviceKeys = a.remoteKeysFromDatabase(ctx, res, domainToDeviceKeys)
|
domainToDeviceKeys = a.remoteKeysFromDatabase(ctx, res, &respMu, domainToDeviceKeys)
|
||||||
if len(domainToDeviceKeys) > 0 || len(domainToCrossSigningKeys) > 0 {
|
if len(domainToDeviceKeys) > 0 || len(domainToCrossSigningKeys) > 0 {
|
||||||
// perform key queries for remote devices
|
// perform key queries for remote devices
|
||||||
a.queryRemoteKeys(ctx, req.Timeout, res, domainToDeviceKeys, domainToCrossSigningKeys)
|
a.queryRemoteKeys(ctx, req.Timeout, res, domainToDeviceKeys, domainToCrossSigningKeys)
|
||||||
|
|
@ -407,7 +408,7 @@ func (a *KeyInternalAPI) QueryKeys(ctx context.Context, req *api.QueryKeysReques
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *KeyInternalAPI) remoteKeysFromDatabase(
|
func (a *KeyInternalAPI) remoteKeysFromDatabase(
|
||||||
ctx context.Context, res *api.QueryKeysResponse, domainToDeviceKeys map[string]map[string][]string,
|
ctx context.Context, res *api.QueryKeysResponse, respMu *sync.Mutex, domainToDeviceKeys map[string]map[string][]string,
|
||||||
) map[string]map[string][]string {
|
) map[string]map[string][]string {
|
||||||
fetchRemote := make(map[string]map[string][]string)
|
fetchRemote := make(map[string]map[string][]string)
|
||||||
for domain, userToDeviceMap := range domainToDeviceKeys {
|
for domain, userToDeviceMap := range domainToDeviceKeys {
|
||||||
|
|
@ -415,7 +416,7 @@ func (a *KeyInternalAPI) remoteKeysFromDatabase(
|
||||||
// we can't safely return keys from the db when all devices are requested as we don't
|
// we can't safely return keys from the db when all devices are requested as we don't
|
||||||
// know if one has just been added.
|
// know if one has just been added.
|
||||||
if len(deviceIDs) > 0 {
|
if len(deviceIDs) > 0 {
|
||||||
err := a.populateResponseWithDeviceKeysFromDatabase(ctx, res, userID, deviceIDs)
|
err := a.populateResponseWithDeviceKeysFromDatabase(ctx, res, respMu, userID, deviceIDs)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -471,7 +472,9 @@ func (a *KeyInternalAPI) queryRemoteKeys(
|
||||||
close(resultCh)
|
close(resultCh)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for result := range resultCh {
|
processResult := func(result *gomatrixserverlib.RespQueryKeys) {
|
||||||
|
respMu.Lock()
|
||||||
|
defer respMu.Unlock()
|
||||||
for userID, nest := range result.DeviceKeys {
|
for userID, nest := range result.DeviceKeys {
|
||||||
res.DeviceKeys[userID] = make(map[string]json.RawMessage)
|
res.DeviceKeys[userID] = make(map[string]json.RawMessage)
|
||||||
for deviceID, deviceKey := range nest {
|
for deviceID, deviceKey := range nest {
|
||||||
|
|
@ -494,6 +497,10 @@ func (a *KeyInternalAPI) queryRemoteKeys(
|
||||||
// TODO: do we want to persist these somewhere now
|
// TODO: do we want to persist these somewhere now
|
||||||
// that we have fetched them?
|
// that we have fetched them?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for result := range resultCh {
|
||||||
|
processResult(result)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *KeyInternalAPI) queryRemoteKeysOnServer(
|
func (a *KeyInternalAPI) queryRemoteKeysOnServer(
|
||||||
|
|
@ -541,9 +548,7 @@ func (a *KeyInternalAPI) queryRemoteKeysOnServer(
|
||||||
}
|
}
|
||||||
// refresh entries from DB: unlike remoteKeysFromDatabase we know we previously had no device info for this
|
// refresh entries from DB: unlike remoteKeysFromDatabase we know we previously had no device info for this
|
||||||
// user so the fact that we're populating all devices here isn't a problem so long as we have devices.
|
// user so the fact that we're populating all devices here isn't a problem so long as we have devices.
|
||||||
respMu.Lock()
|
err = a.populateResponseWithDeviceKeysFromDatabase(ctx, res, respMu, userID, nil)
|
||||||
err = a.populateResponseWithDeviceKeysFromDatabase(ctx, res, userID, nil)
|
|
||||||
respMu.Unlock()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.WithFields(logrus.Fields{
|
logrus.WithFields(logrus.Fields{
|
||||||
logrus.ErrorKey: err,
|
logrus.ErrorKey: err,
|
||||||
|
|
@ -567,25 +572,26 @@ func (a *KeyInternalAPI) queryRemoteKeysOnServer(
|
||||||
res.Failures[serverName] = map[string]interface{}{
|
res.Failures[serverName] = map[string]interface{}{
|
||||||
"message": err.Error(),
|
"message": err.Error(),
|
||||||
}
|
}
|
||||||
|
respMu.Unlock()
|
||||||
|
|
||||||
// last ditch, use the cache only. This is good for when clients hit /keys/query and the remote server
|
// last ditch, use the cache only. This is good for when clients hit /keys/query and the remote server
|
||||||
// is down, better to return something than nothing at all. Clients can know about the failure by
|
// is down, better to return something than nothing at all. Clients can know about the failure by
|
||||||
// inspecting the failures map though so they can know it's a cached response.
|
// inspecting the failures map though so they can know it's a cached response.
|
||||||
for userID, dkeys := range devKeys {
|
for userID, dkeys := range devKeys {
|
||||||
// drop the error as it's already a failure at this point
|
// drop the error as it's already a failure at this point
|
||||||
_ = a.populateResponseWithDeviceKeysFromDatabase(ctx, res, userID, dkeys)
|
_ = a.populateResponseWithDeviceKeysFromDatabase(ctx, res, respMu, userID, dkeys)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sytest expects no failures, if we still could retrieve keys, e.g. from local cache
|
// Sytest expects no failures, if we still could retrieve keys, e.g. from local cache
|
||||||
|
respMu.Lock()
|
||||||
if len(res.DeviceKeys) > 0 {
|
if len(res.DeviceKeys) > 0 {
|
||||||
delete(res.Failures, serverName)
|
delete(res.Failures, serverName)
|
||||||
}
|
}
|
||||||
respMu.Unlock()
|
respMu.Unlock()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *KeyInternalAPI) populateResponseWithDeviceKeysFromDatabase(
|
func (a *KeyInternalAPI) populateResponseWithDeviceKeysFromDatabase(
|
||||||
ctx context.Context, res *api.QueryKeysResponse, userID string, deviceIDs []string,
|
ctx context.Context, res *api.QueryKeysResponse, respMu *sync.Mutex, userID string, deviceIDs []string,
|
||||||
) error {
|
) error {
|
||||||
keys, err := a.DB.DeviceKeysForUser(ctx, userID, deviceIDs, false)
|
keys, err := a.DB.DeviceKeysForUser(ctx, userID, deviceIDs, false)
|
||||||
// if we can't query the db or there are fewer keys than requested, fetch from remote.
|
// if we can't query the db or there are fewer keys than requested, fetch from remote.
|
||||||
|
|
@ -598,9 +604,11 @@ func (a *KeyInternalAPI) populateResponseWithDeviceKeysFromDatabase(
|
||||||
if len(deviceIDs) == 0 && len(keys) == 0 {
|
if len(deviceIDs) == 0 && len(keys) == 0 {
|
||||||
return fmt.Errorf("DeviceKeysForUser %s returned no keys but wanted all keys, falling back to remote", userID)
|
return fmt.Errorf("DeviceKeysForUser %s returned no keys but wanted all keys, falling back to remote", userID)
|
||||||
}
|
}
|
||||||
|
respMu.Lock()
|
||||||
if res.DeviceKeys[userID] == nil {
|
if res.DeviceKeys[userID] == nil {
|
||||||
res.DeviceKeys[userID] = make(map[string]json.RawMessage)
|
res.DeviceKeys[userID] = make(map[string]json.RawMessage)
|
||||||
}
|
}
|
||||||
|
respMu.Unlock()
|
||||||
|
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
if len(key.KeyJSON) == 0 {
|
if len(key.KeyJSON) == 0 {
|
||||||
|
|
@ -610,7 +618,9 @@ func (a *KeyInternalAPI) populateResponseWithDeviceKeysFromDatabase(
|
||||||
key.KeyJSON, _ = sjson.SetBytes(key.KeyJSON, "unsigned", struct {
|
key.KeyJSON, _ = sjson.SetBytes(key.KeyJSON, "unsigned", struct {
|
||||||
DisplayName string `json:"device_display_name,omitempty"`
|
DisplayName string `json:"device_display_name,omitempty"`
|
||||||
}{key.DisplayName})
|
}{key.DisplayName})
|
||||||
|
respMu.Lock()
|
||||||
res.DeviceKeys[userID][key.DeviceID] = key.KeyJSON
|
res.DeviceKeys[userID][key.DeviceID] = key.KeyJSON
|
||||||
|
respMu.Unlock()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -19,10 +19,11 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/roomserver/state"
|
"github.com/matrix-org/dendrite/roomserver/state"
|
||||||
"github.com/matrix-org/dendrite/roomserver/storage"
|
"github.com/matrix-org/dendrite/roomserver/storage"
|
||||||
"github.com/matrix-org/dendrite/roomserver/types"
|
"github.com/matrix-org/dendrite/roomserver/types"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// CheckForSoftFail returns true if the event should be soft-failed
|
// CheckForSoftFail returns true if the event should be soft-failed
|
||||||
|
|
@ -129,6 +130,12 @@ type authEvents struct {
|
||||||
stateKeyNIDMap map[string]types.EventStateKeyNID
|
stateKeyNIDMap map[string]types.EventStateKeyNID
|
||||||
state stateEntryMap
|
state stateEntryMap
|
||||||
events EventMap
|
events EventMap
|
||||||
|
valid bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid verifies that all auth events are from the same room.
|
||||||
|
func (ae *authEvents) Valid() bool {
|
||||||
|
return ae.valid
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create implements gomatrixserverlib.AuthEventProvider
|
// Create implements gomatrixserverlib.AuthEventProvider
|
||||||
|
|
@ -197,6 +204,7 @@ func loadAuthEvents(
|
||||||
needed gomatrixserverlib.StateNeeded,
|
needed gomatrixserverlib.StateNeeded,
|
||||||
state []types.StateEntry,
|
state []types.StateEntry,
|
||||||
) (result authEvents, err error) {
|
) (result authEvents, err error) {
|
||||||
|
result.valid = true
|
||||||
// Look up the numeric IDs for the state keys needed for auth.
|
// Look up the numeric IDs for the state keys needed for auth.
|
||||||
var neededStateKeys []string
|
var neededStateKeys []string
|
||||||
neededStateKeys = append(neededStateKeys, needed.Member...)
|
neededStateKeys = append(neededStateKeys, needed.Member...)
|
||||||
|
|
@ -218,6 +226,16 @@ func loadAuthEvents(
|
||||||
if result.events, err = db.Events(ctx, eventNIDs); err != nil {
|
if result.events, err = db.Events(ctx, eventNIDs); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
roomID := ""
|
||||||
|
for _, ev := range result.events {
|
||||||
|
if roomID == "" {
|
||||||
|
roomID = ev.RoomID()
|
||||||
|
}
|
||||||
|
if ev.RoomID() != roomID {
|
||||||
|
result.valid = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -89,6 +89,13 @@ type Inputer struct {
|
||||||
Queryer *query.Queryer
|
Queryer *query.Queryer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If a room consumer is inactive for a while then we will allow NATS
|
||||||
|
// to clean it up. This stops us from holding onto durable consumers
|
||||||
|
// indefinitely for rooms that might no longer be active, since they do
|
||||||
|
// have an interest overhead in the NATS Server. If the room becomes
|
||||||
|
// active again then we'll recreate the consumer anyway.
|
||||||
|
const inactiveThreshold = time.Hour * 24
|
||||||
|
|
||||||
type worker struct {
|
type worker struct {
|
||||||
phony.Inbox
|
phony.Inbox
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
|
|
@ -125,11 +132,12 @@ func (r *Inputer) startWorkerForRoom(roomID string) {
|
||||||
if _, err := w.r.JetStream.AddConsumer(
|
if _, err := w.r.JetStream.AddConsumer(
|
||||||
r.Cfg.Matrix.JetStream.Prefixed(jetstream.InputRoomEvent),
|
r.Cfg.Matrix.JetStream.Prefixed(jetstream.InputRoomEvent),
|
||||||
&nats.ConsumerConfig{
|
&nats.ConsumerConfig{
|
||||||
Durable: consumer,
|
Durable: consumer,
|
||||||
AckPolicy: nats.AckAllPolicy,
|
AckPolicy: nats.AckAllPolicy,
|
||||||
DeliverPolicy: nats.DeliverAllPolicy,
|
DeliverPolicy: nats.DeliverAllPolicy,
|
||||||
FilterSubject: subject,
|
FilterSubject: subject,
|
||||||
AckWait: MaximumMissingProcessingTime + (time.Second * 10),
|
AckWait: MaximumMissingProcessingTime + (time.Second * 10),
|
||||||
|
InactiveThreshold: inactiveThreshold,
|
||||||
},
|
},
|
||||||
); err != nil {
|
); err != nil {
|
||||||
logrus.WithError(err).Errorf("Failed to create consumer for room %q", w.roomID)
|
logrus.WithError(err).Errorf("Failed to create consumer for room %q", w.roomID)
|
||||||
|
|
@ -145,6 +153,7 @@ func (r *Inputer) startWorkerForRoom(roomID string) {
|
||||||
nats.DeliverAll(),
|
nats.DeliverAll(),
|
||||||
nats.AckWait(MaximumMissingProcessingTime+(time.Second*10)),
|
nats.AckWait(MaximumMissingProcessingTime+(time.Second*10)),
|
||||||
nats.Bind(r.InputRoomEventTopic, consumer),
|
nats.Bind(r.InputRoomEventTopic, consumer),
|
||||||
|
nats.InactiveThreshold(inactiveThreshold),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.WithError(err).Errorf("Failed to subscribe to stream for room %q", w.roomID)
|
logrus.WithError(err).Errorf("Failed to subscribe to stream for room %q", w.roomID)
|
||||||
|
|
@ -180,6 +189,21 @@ func (r *Inputer) Start() error {
|
||||||
nats.ReplayInstant(),
|
nats.ReplayInstant(),
|
||||||
nats.BindStream(r.InputRoomEventTopic),
|
nats.BindStream(r.InputRoomEventTopic),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Make sure that the room consumers have the right config.
|
||||||
|
stream := r.Cfg.Matrix.JetStream.Prefixed(jetstream.InputRoomEvent)
|
||||||
|
for consumer := range r.JetStream.Consumers(stream) {
|
||||||
|
switch {
|
||||||
|
case consumer.Config.Durable == "":
|
||||||
|
continue // Ignore ephemeral consumers
|
||||||
|
case consumer.Config.InactiveThreshold != inactiveThreshold:
|
||||||
|
consumer.Config.InactiveThreshold = inactiveThreshold
|
||||||
|
if _, cerr := r.JetStream.UpdateConsumer(stream, &consumer.Config); cerr != nil {
|
||||||
|
logrus.WithError(cerr).Warnf("Failed to update inactive threshold on consumer %q", consumer.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,9 +19,16 @@ package input
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
"github.com/matrix-org/util"
|
||||||
|
"github.com/opentracing/opentracing-go"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
fedapi "github.com/matrix-org/dendrite/federationapi/api"
|
fedapi "github.com/matrix-org/dendrite/federationapi/api"
|
||||||
"github.com/matrix-org/dendrite/internal"
|
"github.com/matrix-org/dendrite/internal"
|
||||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||||
|
|
@ -31,11 +38,6 @@ import (
|
||||||
"github.com/matrix-org/dendrite/roomserver/internal/helpers"
|
"github.com/matrix-org/dendrite/roomserver/internal/helpers"
|
||||||
"github.com/matrix-org/dendrite/roomserver/state"
|
"github.com/matrix-org/dendrite/roomserver/state"
|
||||||
"github.com/matrix-org/dendrite/roomserver/types"
|
"github.com/matrix-org/dendrite/roomserver/types"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
"github.com/matrix-org/util"
|
|
||||||
"github.com/opentracing/opentracing-go"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: Does this value make sense?
|
// TODO: Does this value make sense?
|
||||||
|
|
@ -196,7 +198,7 @@ func (r *Inputer) processRoomEvent(
|
||||||
isRejected := false
|
isRejected := false
|
||||||
authEvents := gomatrixserverlib.NewAuthEvents(nil)
|
authEvents := gomatrixserverlib.NewAuthEvents(nil)
|
||||||
knownEvents := map[string]*types.Event{}
|
knownEvents := map[string]*types.Event{}
|
||||||
if err = r.fetchAuthEvents(ctx, logger, headered, &authEvents, knownEvents, serverRes.ServerNames); err != nil {
|
if err = r.fetchAuthEvents(ctx, logger, roomInfo, headered, &authEvents, knownEvents, serverRes.ServerNames); err != nil {
|
||||||
return fmt.Errorf("r.fetchAuthEvents: %w", err)
|
return fmt.Errorf("r.fetchAuthEvents: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -336,7 +338,7 @@ func (r *Inputer) processRoomEvent(
|
||||||
// doesn't have any associated state to store and we don't need to
|
// doesn't have any associated state to store and we don't need to
|
||||||
// notify anyone about it.
|
// notify anyone about it.
|
||||||
if input.Kind == api.KindOutlier {
|
if input.Kind == api.KindOutlier {
|
||||||
logger.Debug("Stored outlier")
|
logger.WithField("rejected", isRejected).Debug("Stored outlier")
|
||||||
hooks.Run(hooks.KindNewEventPersisted, headered)
|
hooks.Run(hooks.KindNewEventPersisted, headered)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -536,6 +538,7 @@ func (r *Inputer) processStateBefore(
|
||||||
func (r *Inputer) fetchAuthEvents(
|
func (r *Inputer) fetchAuthEvents(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
logger *logrus.Entry,
|
logger *logrus.Entry,
|
||||||
|
roomInfo *types.RoomInfo,
|
||||||
event *gomatrixserverlib.HeaderedEvent,
|
event *gomatrixserverlib.HeaderedEvent,
|
||||||
auth *gomatrixserverlib.AuthEvents,
|
auth *gomatrixserverlib.AuthEvents,
|
||||||
known map[string]*types.Event,
|
known map[string]*types.Event,
|
||||||
|
|
@ -557,9 +560,19 @@ func (r *Inputer) fetchAuthEvents(
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ev := authEvents[0]
|
ev := authEvents[0]
|
||||||
|
|
||||||
|
isRejected := false
|
||||||
|
if roomInfo != nil {
|
||||||
|
isRejected, err = r.DB.IsEventRejected(ctx, roomInfo.RoomNID, ev.EventID())
|
||||||
|
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return fmt.Errorf("r.DB.IsEventRejected failed: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
known[authEventID] = &ev // don't take the pointer of the iterated event
|
known[authEventID] = &ev // don't take the pointer of the iterated event
|
||||||
if err = auth.AddEvent(ev.Event); err != nil {
|
if !isRejected {
|
||||||
return fmt.Errorf("auth.AddEvent: %w", err)
|
if err = auth.AddEvent(ev.Event); err != nil {
|
||||||
|
return fmt.Errorf("auth.AddEvent: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
63
roomserver/internal/input/input_events_test.go
Normal file
63
roomserver/internal/input/input_events_test.go
Normal file
|
|
@ -0,0 +1,63 @@
|
||||||
|
package input
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_EventAuth(t *testing.T) {
|
||||||
|
alice := test.NewUser(t)
|
||||||
|
bob := test.NewUser(t)
|
||||||
|
|
||||||
|
// create two rooms, so we can craft "illegal" auth events
|
||||||
|
room1 := test.NewRoom(t, alice)
|
||||||
|
room2 := test.NewRoom(t, alice, test.RoomPreset(test.PresetPublicChat))
|
||||||
|
|
||||||
|
authEventIDs := make([]string, 0, 4)
|
||||||
|
authEvents := []*gomatrixserverlib.Event{}
|
||||||
|
|
||||||
|
// Add the legal auth events from room2
|
||||||
|
for _, x := range room2.Events() {
|
||||||
|
if x.Type() == gomatrixserverlib.MRoomCreate {
|
||||||
|
authEventIDs = append(authEventIDs, x.EventID())
|
||||||
|
authEvents = append(authEvents, x.Event)
|
||||||
|
}
|
||||||
|
if x.Type() == gomatrixserverlib.MRoomPowerLevels {
|
||||||
|
authEventIDs = append(authEventIDs, x.EventID())
|
||||||
|
authEvents = append(authEvents, x.Event)
|
||||||
|
}
|
||||||
|
if x.Type() == gomatrixserverlib.MRoomJoinRules {
|
||||||
|
authEventIDs = append(authEventIDs, x.EventID())
|
||||||
|
authEvents = append(authEvents, x.Event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the illegal auth event from room1 (rooms are different)
|
||||||
|
for _, x := range room1.Events() {
|
||||||
|
if x.Type() == gomatrixserverlib.MRoomMember {
|
||||||
|
authEventIDs = append(authEventIDs, x.EventID())
|
||||||
|
authEvents = append(authEvents, x.Event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Craft the illegal join event, with auth events from different rooms
|
||||||
|
ev := room2.CreateEvent(t, bob, "m.room.member", map[string]interface{}{
|
||||||
|
"membership": "join",
|
||||||
|
}, test.WithStateKey(bob.ID), test.WithAuthIDs(authEventIDs))
|
||||||
|
|
||||||
|
// Add the auth events to the allower
|
||||||
|
allower := gomatrixserverlib.NewAuthEvents(nil)
|
||||||
|
for _, a := range authEvents {
|
||||||
|
if err := allower.AddEvent(a); err != nil {
|
||||||
|
t.Fatalf("allower.AddEvent failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally check that the event is NOT allowed
|
||||||
|
if err := gomatrixserverlib.Allowed(ev.Event, &allower); err == nil {
|
||||||
|
t.Fatalf("event should not be allowed, but it was")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -431,6 +431,13 @@ func (s *OutputRoomEventConsumer) onRetireInviteEvent(
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Only notify clients about retired invite events, if the user didn't accept the invite.
|
||||||
|
// The PDU stream will also receive an event about accepting the invitation, so there should
|
||||||
|
// be a "smooth" transition from invite -> join, and not invite -> leave -> join
|
||||||
|
if msg.Membership == gomatrixserverlib.Join {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Notify any active sync requests that the invite has been retired.
|
// Notify any active sync requests that the invite has been retired.
|
||||||
s.inviteStream.Advance(pduPos)
|
s.inviteStream.Advance(pduPos)
|
||||||
s.notifier.OnNewInvite(types.StreamingToken{InvitePosition: pduPos}, msg.TargetUserID)
|
s.notifier.OnNewInvite(types.StreamingToken{InvitePosition: pduPos}, msg.TargetUserID)
|
||||||
|
|
|
||||||
|
|
@ -28,8 +28,9 @@ import (
|
||||||
"github.com/matrix-org/dendrite/syncapi/types"
|
"github.com/matrix-org/dendrite/syncapi/types"
|
||||||
|
|
||||||
"github.com/lib/pq"
|
"github.com/lib/pq"
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
const outputRoomEventsSchema = `
|
const outputRoomEventsSchema = `
|
||||||
|
|
@ -133,7 +134,7 @@ const updateEventJSONSQL = "" +
|
||||||
"UPDATE syncapi_output_room_events SET headered_event_json=$1 WHERE event_id=$2"
|
"UPDATE syncapi_output_room_events SET headered_event_json=$1 WHERE event_id=$2"
|
||||||
|
|
||||||
// In order for us to apply the state updates correctly, rows need to be ordered in the order they were received (id).
|
// In order for us to apply the state updates correctly, rows need to be ordered in the order they were received (id).
|
||||||
const selectStateInRangeSQL = "" +
|
const selectStateInRangeFilteredSQL = "" +
|
||||||
"SELECT event_id, id, headered_event_json, exclude_from_sync, add_state_ids, remove_state_ids, history_visibility" +
|
"SELECT event_id, id, headered_event_json, exclude_from_sync, add_state_ids, remove_state_ids, history_visibility" +
|
||||||
" FROM syncapi_output_room_events" +
|
" FROM syncapi_output_room_events" +
|
||||||
" WHERE (id > $1 AND id <= $2) AND (add_state_ids IS NOT NULL OR remove_state_ids IS NOT NULL)" +
|
" WHERE (id > $1 AND id <= $2) AND (add_state_ids IS NOT NULL OR remove_state_ids IS NOT NULL)" +
|
||||||
|
|
@ -146,6 +147,15 @@ const selectStateInRangeSQL = "" +
|
||||||
" ORDER BY id ASC" +
|
" ORDER BY id ASC" +
|
||||||
" LIMIT $9"
|
" LIMIT $9"
|
||||||
|
|
||||||
|
// In order for us to apply the state updates correctly, rows need to be ordered in the order they were received (id).
|
||||||
|
const selectStateInRangeSQL = "" +
|
||||||
|
"SELECT event_id, id, headered_event_json, exclude_from_sync, add_state_ids, remove_state_ids, history_visibility" +
|
||||||
|
" FROM syncapi_output_room_events" +
|
||||||
|
" WHERE (id > $1 AND id <= $2) AND (add_state_ids IS NOT NULL OR remove_state_ids IS NOT NULL)" +
|
||||||
|
" AND room_id = ANY($3)" +
|
||||||
|
" ORDER BY id ASC" +
|
||||||
|
" LIMIT $4"
|
||||||
|
|
||||||
const deleteEventsForRoomSQL = "" +
|
const deleteEventsForRoomSQL = "" +
|
||||||
"DELETE FROM syncapi_output_room_events WHERE room_id = $1"
|
"DELETE FROM syncapi_output_room_events WHERE room_id = $1"
|
||||||
|
|
||||||
|
|
@ -174,21 +184,22 @@ const purgeEventsSQL = "" +
|
||||||
const selectSearchSQL = "SELECT id, event_id, headered_event_json FROM syncapi_output_room_events WHERE id > $1 AND type = ANY($2) ORDER BY id ASC LIMIT $3"
|
const selectSearchSQL = "SELECT id, event_id, headered_event_json FROM syncapi_output_room_events WHERE id > $1 AND type = ANY($2) ORDER BY id ASC LIMIT $3"
|
||||||
|
|
||||||
type outputRoomEventsStatements struct {
|
type outputRoomEventsStatements struct {
|
||||||
insertEventStmt *sql.Stmt
|
insertEventStmt *sql.Stmt
|
||||||
selectEventsStmt *sql.Stmt
|
selectEventsStmt *sql.Stmt
|
||||||
selectEventsWitFilterStmt *sql.Stmt
|
selectEventsWitFilterStmt *sql.Stmt
|
||||||
selectMaxEventIDStmt *sql.Stmt
|
selectMaxEventIDStmt *sql.Stmt
|
||||||
selectRecentEventsStmt *sql.Stmt
|
selectRecentEventsStmt *sql.Stmt
|
||||||
selectRecentEventsForSyncStmt *sql.Stmt
|
selectRecentEventsForSyncStmt *sql.Stmt
|
||||||
selectEarlyEventsStmt *sql.Stmt
|
selectEarlyEventsStmt *sql.Stmt
|
||||||
selectStateInRangeStmt *sql.Stmt
|
selectStateInRangeFilteredStmt *sql.Stmt
|
||||||
updateEventJSONStmt *sql.Stmt
|
selectStateInRangeStmt *sql.Stmt
|
||||||
deleteEventsForRoomStmt *sql.Stmt
|
updateEventJSONStmt *sql.Stmt
|
||||||
selectContextEventStmt *sql.Stmt
|
deleteEventsForRoomStmt *sql.Stmt
|
||||||
selectContextBeforeEventStmt *sql.Stmt
|
selectContextEventStmt *sql.Stmt
|
||||||
selectContextAfterEventStmt *sql.Stmt
|
selectContextBeforeEventStmt *sql.Stmt
|
||||||
purgeEventsStmt *sql.Stmt
|
selectContextAfterEventStmt *sql.Stmt
|
||||||
selectSearchStmt *sql.Stmt
|
purgeEventsStmt *sql.Stmt
|
||||||
|
selectSearchStmt *sql.Stmt
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPostgresEventsTable(db *sql.DB) (tables.Events, error) {
|
func NewPostgresEventsTable(db *sql.DB) (tables.Events, error) {
|
||||||
|
|
@ -218,6 +229,7 @@ func NewPostgresEventsTable(db *sql.DB) (tables.Events, error) {
|
||||||
{&s.selectRecentEventsStmt, selectRecentEventsSQL},
|
{&s.selectRecentEventsStmt, selectRecentEventsSQL},
|
||||||
{&s.selectRecentEventsForSyncStmt, selectRecentEventsForSyncSQL},
|
{&s.selectRecentEventsForSyncStmt, selectRecentEventsForSyncSQL},
|
||||||
{&s.selectEarlyEventsStmt, selectEarlyEventsSQL},
|
{&s.selectEarlyEventsStmt, selectEarlyEventsSQL},
|
||||||
|
{&s.selectStateInRangeFilteredStmt, selectStateInRangeFilteredSQL},
|
||||||
{&s.selectStateInRangeStmt, selectStateInRangeSQL},
|
{&s.selectStateInRangeStmt, selectStateInRangeSQL},
|
||||||
{&s.updateEventJSONStmt, updateEventJSONSQL},
|
{&s.updateEventJSONStmt, updateEventJSONSQL},
|
||||||
{&s.deleteEventsForRoomStmt, deleteEventsForRoomSQL},
|
{&s.deleteEventsForRoomStmt, deleteEventsForRoomSQL},
|
||||||
|
|
@ -245,17 +257,28 @@ func (s *outputRoomEventsStatements) SelectStateInRange(
|
||||||
ctx context.Context, txn *sql.Tx, r types.Range,
|
ctx context.Context, txn *sql.Tx, r types.Range,
|
||||||
stateFilter *gomatrixserverlib.StateFilter, roomIDs []string,
|
stateFilter *gomatrixserverlib.StateFilter, roomIDs []string,
|
||||||
) (map[string]map[string]bool, map[string]types.StreamEvent, error) {
|
) (map[string]map[string]bool, map[string]types.StreamEvent, error) {
|
||||||
stmt := sqlutil.TxStmt(txn, s.selectStateInRangeStmt)
|
var rows *sql.Rows
|
||||||
senders, notSenders := getSendersStateFilterFilter(stateFilter)
|
var err error
|
||||||
rows, err := stmt.QueryContext(
|
if stateFilter != nil {
|
||||||
ctx, r.Low(), r.High(), pq.StringArray(roomIDs),
|
stmt := sqlutil.TxStmt(txn, s.selectStateInRangeFilteredStmt)
|
||||||
pq.StringArray(senders),
|
senders, notSenders := getSendersStateFilterFilter(stateFilter)
|
||||||
pq.StringArray(notSenders),
|
rows, err = stmt.QueryContext(
|
||||||
pq.StringArray(filterConvertTypeWildcardToSQL(stateFilter.Types)),
|
ctx, r.Low(), r.High(), pq.StringArray(roomIDs),
|
||||||
pq.StringArray(filterConvertTypeWildcardToSQL(stateFilter.NotTypes)),
|
pq.StringArray(senders),
|
||||||
stateFilter.ContainsURL,
|
pq.StringArray(notSenders),
|
||||||
stateFilter.Limit,
|
pq.StringArray(filterConvertTypeWildcardToSQL(stateFilter.Types)),
|
||||||
)
|
pq.StringArray(filterConvertTypeWildcardToSQL(stateFilter.NotTypes)),
|
||||||
|
stateFilter.ContainsURL,
|
||||||
|
stateFilter.Limit,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
stmt := sqlutil.TxStmt(txn, s.selectStateInRangeStmt)
|
||||||
|
rows, err = stmt.QueryContext(
|
||||||
|
ctx, r.Low(), r.High(), pq.StringArray(roomIDs),
|
||||||
|
r.High()-r.Low(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -5,10 +5,11 @@ import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||||
"github.com/matrix-org/dendrite/syncapi/types"
|
"github.com/matrix-org/dendrite/syncapi/types"
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type DatabaseTransaction struct {
|
type DatabaseTransaction struct {
|
||||||
|
|
@ -277,6 +278,7 @@ func (d *DatabaseTransaction) GetBackwardTopologyPos(
|
||||||
// exclusive of oldPos, inclusive of newPos, for the rooms in which
|
// exclusive of oldPos, inclusive of newPos, for the rooms in which
|
||||||
// the user has new membership events.
|
// the user has new membership events.
|
||||||
// A list of joined room IDs is also returned in case the caller needs it.
|
// A list of joined room IDs is also returned in case the caller needs it.
|
||||||
|
// nolint:gocyclo
|
||||||
func (d *DatabaseTransaction) GetStateDeltas(
|
func (d *DatabaseTransaction) GetStateDeltas(
|
||||||
ctx context.Context, device *userapi.Device,
|
ctx context.Context, device *userapi.Device,
|
||||||
r types.Range, userID string,
|
r types.Range, userID string,
|
||||||
|
|
@ -311,7 +313,7 @@ func (d *DatabaseTransaction) GetStateDeltas(
|
||||||
}
|
}
|
||||||
|
|
||||||
// get all the state events ever (i.e. for all available rooms) between these two positions
|
// get all the state events ever (i.e. for all available rooms) between these two positions
|
||||||
stateNeeded, eventMap, err := d.OutputEvents.SelectStateInRange(ctx, d.txn, r, stateFilter, allRoomIDs)
|
stateNeeded, eventMap, err := d.OutputEvents.SelectStateInRange(ctx, d.txn, r, nil, allRoomIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
|
|
@ -326,6 +328,22 @@ func (d *DatabaseTransaction) GetStateDeltas(
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// get all the state events ever (i.e. for all available rooms) between these two positions
|
||||||
|
stateNeededFiltered, eventMapFiltered, err := d.OutputEvents.SelectStateInRange(ctx, d.txn, r, stateFilter, allRoomIDs)
|
||||||
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
stateFiltered, err := d.fetchStateEvents(ctx, d.txn, stateNeededFiltered, eventMapFiltered)
|
||||||
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// find out which rooms this user is peeking, if any.
|
// find out which rooms this user is peeking, if any.
|
||||||
// We do this before joins so any peeks get overwritten
|
// We do this before joins so any peeks get overwritten
|
||||||
peeks, err := d.Peeks.SelectPeeksInRange(ctx, d.txn, userID, device.ID, r)
|
peeks, err := d.Peeks.SelectPeeksInRange(ctx, d.txn, userID, device.ID, r)
|
||||||
|
|
@ -371,6 +389,7 @@ func (d *DatabaseTransaction) GetStateDeltas(
|
||||||
// If our membership is now join but the previous membership wasn't
|
// If our membership is now join but the previous membership wasn't
|
||||||
// then this is a "join transition", so we'll insert this room.
|
// then this is a "join transition", so we'll insert this room.
|
||||||
if prevMembership != membership {
|
if prevMembership != membership {
|
||||||
|
newlyJoinedRooms[roomID] = true
|
||||||
// Get the full room state, as we'll send that down for a newly
|
// Get the full room state, as we'll send that down for a newly
|
||||||
// joined room instead of a delta.
|
// joined room instead of a delta.
|
||||||
var s []types.StreamEvent
|
var s []types.StreamEvent
|
||||||
|
|
@ -383,8 +402,7 @@ func (d *DatabaseTransaction) GetStateDeltas(
|
||||||
|
|
||||||
// Add the information for this room into the state so that
|
// Add the information for this room into the state so that
|
||||||
// it will get added with all of the rest of the joined rooms.
|
// it will get added with all of the rest of the joined rooms.
|
||||||
state[roomID] = s
|
stateFiltered[roomID] = s
|
||||||
newlyJoinedRooms[roomID] = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We won't add joined rooms into the delta at this point as they
|
// We won't add joined rooms into the delta at this point as they
|
||||||
|
|
@ -395,7 +413,7 @@ func (d *DatabaseTransaction) GetStateDeltas(
|
||||||
deltas = append(deltas, types.StateDelta{
|
deltas = append(deltas, types.StateDelta{
|
||||||
Membership: membership,
|
Membership: membership,
|
||||||
MembershipPos: ev.StreamPosition,
|
MembershipPos: ev.StreamPosition,
|
||||||
StateEvents: d.StreamEventsToEvents(device, stateStreamEvents),
|
StateEvents: d.StreamEventsToEvents(device, stateFiltered[roomID]),
|
||||||
RoomID: roomID,
|
RoomID: roomID,
|
||||||
})
|
})
|
||||||
break
|
break
|
||||||
|
|
@ -407,7 +425,7 @@ func (d *DatabaseTransaction) GetStateDeltas(
|
||||||
for _, joinedRoomID := range joinedRoomIDs {
|
for _, joinedRoomID := range joinedRoomIDs {
|
||||||
deltas = append(deltas, types.StateDelta{
|
deltas = append(deltas, types.StateDelta{
|
||||||
Membership: gomatrixserverlib.Join,
|
Membership: gomatrixserverlib.Join,
|
||||||
StateEvents: d.StreamEventsToEvents(device, state[joinedRoomID]),
|
StateEvents: d.StreamEventsToEvents(device, stateFiltered[joinedRoomID]),
|
||||||
RoomID: joinedRoomID,
|
RoomID: joinedRoomID,
|
||||||
NewlyJoined: newlyJoinedRooms[joinedRoomID],
|
NewlyJoined: newlyJoinedRooms[joinedRoomID],
|
||||||
})
|
})
|
||||||
|
|
|
||||||
|
|
@ -29,8 +29,9 @@ import (
|
||||||
"github.com/matrix-org/dendrite/syncapi/storage/tables"
|
"github.com/matrix-org/dendrite/syncapi/storage/tables"
|
||||||
"github.com/matrix-org/dendrite/syncapi/types"
|
"github.com/matrix-org/dendrite/syncapi/types"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
const outputRoomEventsSchema = `
|
const outputRoomEventsSchema = `
|
||||||
|
|
@ -189,21 +190,36 @@ func (s *outputRoomEventsStatements) SelectStateInRange(
|
||||||
for _, roomID := range roomIDs {
|
for _, roomID := range roomIDs {
|
||||||
inputParams = append(inputParams, roomID)
|
inputParams = append(inputParams, roomID)
|
||||||
}
|
}
|
||||||
stmt, params, err := prepareWithFilters(
|
var (
|
||||||
s.db, txn, stmtSQL, inputParams,
|
stmt *sql.Stmt
|
||||||
stateFilter.Senders, stateFilter.NotSenders,
|
params []any
|
||||||
stateFilter.Types, stateFilter.NotTypes,
|
err error
|
||||||
nil, stateFilter.ContainsURL, stateFilter.Limit, FilterOrderAsc,
|
|
||||||
)
|
)
|
||||||
|
if stateFilter != nil {
|
||||||
|
stmt, params, err = prepareWithFilters(
|
||||||
|
s.db, txn, stmtSQL, inputParams,
|
||||||
|
stateFilter.Senders, stateFilter.NotSenders,
|
||||||
|
stateFilter.Types, stateFilter.NotTypes,
|
||||||
|
nil, stateFilter.ContainsURL, stateFilter.Limit, FilterOrderAsc,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
stmt, params, err = prepareWithFilters(
|
||||||
|
s.db, txn, stmtSQL, inputParams,
|
||||||
|
nil, nil,
|
||||||
|
nil, nil,
|
||||||
|
nil, nil, int(r.High()-r.Low()), FilterOrderAsc,
|
||||||
|
)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("s.prepareWithFilters: %w", err)
|
return nil, nil, fmt.Errorf("s.prepareWithFilters: %w", err)
|
||||||
}
|
}
|
||||||
|
defer internal.CloseAndLogIfError(ctx, stmt, "selectStateInRange: stmt.close() failed")
|
||||||
|
|
||||||
rows, err := stmt.QueryContext(ctx, params...)
|
rows, err := stmt.QueryContext(ctx, params...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
defer rows.Close() // nolint: errcheck
|
defer internal.CloseAndLogIfError(ctx, rows, "selectStateInRange: rows.close() failed")
|
||||||
// Fetch all the state change events for all rooms between the two positions then loop each event and:
|
// Fetch all the state change events for all rooms between the two positions then loop each event and:
|
||||||
// - Keep a cache of the event by ID (99% of state change events are for the event itself)
|
// - Keep a cache of the event by ID (99% of state change events are for the event itself)
|
||||||
// - For each room ID, build up an array of event IDs which represents cumulative adds/removes
|
// - For each room ID, build up an array of event IDs which represents cumulative adds/removes
|
||||||
|
|
@ -269,6 +285,7 @@ func (s *outputRoomEventsStatements) SelectMaxEventID(
|
||||||
) (id int64, err error) {
|
) (id int64, err error) {
|
||||||
var nullableID sql.NullInt64
|
var nullableID sql.NullInt64
|
||||||
stmt := sqlutil.TxStmt(txn, s.selectMaxEventIDStmt)
|
stmt := sqlutil.TxStmt(txn, s.selectMaxEventIDStmt)
|
||||||
|
defer internal.CloseAndLogIfError(ctx, stmt, "SelectMaxEventID: stmt.close() failed")
|
||||||
err = stmt.QueryRowContext(ctx).Scan(&nullableID)
|
err = stmt.QueryRowContext(ctx).Scan(&nullableID)
|
||||||
if nullableID.Valid {
|
if nullableID.Valid {
|
||||||
id = nullableID.Int64
|
id = nullableID.Int64
|
||||||
|
|
@ -323,6 +340,7 @@ func (s *outputRoomEventsStatements) InsertEvent(
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
insertStmt := sqlutil.TxStmt(txn, s.insertEventStmt)
|
insertStmt := sqlutil.TxStmt(txn, s.insertEventStmt)
|
||||||
|
defer internal.CloseAndLogIfError(ctx, insertStmt, "InsertEvent: stmt.close() failed")
|
||||||
_, err = insertStmt.ExecContext(
|
_, err = insertStmt.ExecContext(
|
||||||
ctx,
|
ctx,
|
||||||
streamPos,
|
streamPos,
|
||||||
|
|
@ -367,6 +385,7 @@ func (s *outputRoomEventsStatements) SelectRecentEvents(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, fmt.Errorf("s.prepareWithFilters: %w", err)
|
return nil, false, fmt.Errorf("s.prepareWithFilters: %w", err)
|
||||||
}
|
}
|
||||||
|
defer internal.CloseAndLogIfError(ctx, stmt, "selectRecentEvents: stmt.close() failed")
|
||||||
|
|
||||||
rows, err := stmt.QueryContext(ctx, params...)
|
rows, err := stmt.QueryContext(ctx, params...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -415,6 +434,8 @@ func (s *outputRoomEventsStatements) SelectEarlyEvents(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("s.prepareWithFilters: %w", err)
|
return nil, fmt.Errorf("s.prepareWithFilters: %w", err)
|
||||||
}
|
}
|
||||||
|
defer internal.CloseAndLogIfError(ctx, stmt, "SelectEarlyEvents: stmt.close() failed")
|
||||||
|
|
||||||
rows, err := stmt.QueryContext(ctx, params...)
|
rows, err := stmt.QueryContext(ctx, params...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -456,6 +477,8 @@ func (s *outputRoomEventsStatements) SelectEvents(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer internal.CloseAndLogIfError(ctx, stmt, "SelectEvents: stmt.close() failed")
|
||||||
|
|
||||||
rows, err := stmt.QueryContext(ctx, params...)
|
rows, err := stmt.QueryContext(ctx, params...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -558,6 +581,10 @@ func (s *outputRoomEventsStatements) SelectContextBeforeEvent(
|
||||||
filter.Types, filter.NotTypes,
|
filter.Types, filter.NotTypes,
|
||||||
nil, filter.ContainsURL, filter.Limit, FilterOrderDesc,
|
nil, filter.ContainsURL, filter.Limit, FilterOrderDesc,
|
||||||
)
|
)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer internal.CloseAndLogIfError(ctx, stmt, "SelectContextBeforeEvent: stmt.close() failed")
|
||||||
|
|
||||||
rows, err := stmt.QueryContext(ctx, params...)
|
rows, err := stmt.QueryContext(ctx, params...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -596,6 +623,10 @@ func (s *outputRoomEventsStatements) SelectContextAfterEvent(
|
||||||
filter.Types, filter.NotTypes,
|
filter.Types, filter.NotTypes,
|
||||||
nil, filter.ContainsURL, filter.Limit, FilterOrderAsc,
|
nil, filter.ContainsURL, filter.Limit, FilterOrderAsc,
|
||||||
)
|
)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer internal.CloseAndLogIfError(ctx, stmt, "SelectContextAfterEvent: stmt.close() failed")
|
||||||
|
|
||||||
rows, err := stmt.QueryContext(ctx, params...)
|
rows, err := stmt.QueryContext(ctx, params...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -74,21 +74,26 @@ func (p *InviteStreamProvider) IncrementalSync(
|
||||||
return to
|
return to
|
||||||
}
|
}
|
||||||
for roomID := range retiredInvites {
|
for roomID := range retiredInvites {
|
||||||
if _, ok := req.Response.Rooms.Join[roomID]; !ok {
|
if _, ok := req.Response.Rooms.Invite[roomID]; ok {
|
||||||
lr := types.NewLeaveResponse()
|
continue
|
||||||
h := sha256.Sum256(append([]byte(roomID), []byte(strconv.FormatInt(int64(to), 10))...))
|
|
||||||
lr.Timeline.Events = append(lr.Timeline.Events, gomatrixserverlib.ClientEvent{
|
|
||||||
// fake event ID which muxes in the to position
|
|
||||||
EventID: "$" + base64.RawURLEncoding.EncodeToString(h[:]),
|
|
||||||
OriginServerTS: gomatrixserverlib.AsTimestamp(time.Now()),
|
|
||||||
RoomID: roomID,
|
|
||||||
Sender: req.Device.UserID,
|
|
||||||
StateKey: &req.Device.UserID,
|
|
||||||
Type: "m.room.member",
|
|
||||||
Content: gomatrixserverlib.RawJSON(`{"membership":"leave"}`),
|
|
||||||
})
|
|
||||||
req.Response.Rooms.Leave[roomID] = lr
|
|
||||||
}
|
}
|
||||||
|
if _, ok := req.Response.Rooms.Join[roomID]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lr := types.NewLeaveResponse()
|
||||||
|
h := sha256.Sum256(append([]byte(roomID), []byte(strconv.FormatInt(int64(to), 10))...))
|
||||||
|
lr.Timeline.Events = append(lr.Timeline.Events, gomatrixserverlib.ClientEvent{
|
||||||
|
// fake event ID which muxes in the to position
|
||||||
|
EventID: "$" + base64.RawURLEncoding.EncodeToString(h[:]),
|
||||||
|
OriginServerTS: gomatrixserverlib.AsTimestamp(time.Now()),
|
||||||
|
RoomID: roomID,
|
||||||
|
Sender: req.Device.UserID,
|
||||||
|
StateKey: &req.Device.UserID,
|
||||||
|
Type: "m.room.member",
|
||||||
|
Content: gomatrixserverlib.RawJSON(`{"membership":"leave"}`),
|
||||||
|
})
|
||||||
|
req.Response.Rooms.Leave[roomID] = lr
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return maxID
|
return maxID
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ package streams
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||||
"github.com/matrix-org/dendrite/syncapi/storage"
|
"github.com/matrix-org/dendrite/syncapi/storage"
|
||||||
"github.com/matrix-org/dendrite/syncapi/types"
|
"github.com/matrix-org/dendrite/syncapi/types"
|
||||||
)
|
)
|
||||||
|
|
@ -53,7 +54,7 @@ func (p *NotificationDataStreamProvider) IncrementalSync(
|
||||||
for roomID, jr := range req.Response.Rooms.Join {
|
for roomID, jr := range req.Response.Rooms.Join {
|
||||||
counts := countsByRoom[roomID]
|
counts := countsByRoom[roomID]
|
||||||
if counts == nil {
|
if counts == nil {
|
||||||
continue
|
counts = &eventutil.NotificationData{}
|
||||||
}
|
}
|
||||||
jr.UnreadNotifications = &types.UnreadNotifications{
|
jr.UnreadNotifications = &types.UnreadNotifications{
|
||||||
HighlightCount: counts.UnreadHighlightCount,
|
HighlightCount: counts.UnreadHighlightCount,
|
||||||
|
|
|
||||||
|
|
@ -194,7 +194,7 @@ func (p *PDUStreamProvider) IncrementalSync(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var pos types.StreamPosition
|
var pos types.StreamPosition
|
||||||
if pos, err = p.addRoomDeltaToResponse(ctx, snapshot, req.Device, newRange, delta, &eventFilter, &stateFilter, req.Response); err != nil {
|
if pos, err = p.addRoomDeltaToResponse(ctx, snapshot, req.Device, newRange, delta, &eventFilter, &stateFilter, req); err != nil {
|
||||||
req.Log.WithError(err).Error("d.addRoomDeltaToResponse failed")
|
req.Log.WithError(err).Error("d.addRoomDeltaToResponse failed")
|
||||||
if err == context.DeadlineExceeded || err == context.Canceled || err == sql.ErrTxDone {
|
if err == context.DeadlineExceeded || err == context.Canceled || err == sql.ErrTxDone {
|
||||||
return newPos
|
return newPos
|
||||||
|
|
@ -225,7 +225,7 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
|
||||||
delta types.StateDelta,
|
delta types.StateDelta,
|
||||||
eventFilter *gomatrixserverlib.RoomEventFilter,
|
eventFilter *gomatrixserverlib.RoomEventFilter,
|
||||||
stateFilter *gomatrixserverlib.StateFilter,
|
stateFilter *gomatrixserverlib.StateFilter,
|
||||||
res *types.Response,
|
req *types.SyncRequest,
|
||||||
) (types.StreamPosition, error) {
|
) (types.StreamPosition, error) {
|
||||||
if delta.MembershipPos > 0 && delta.Membership == gomatrixserverlib.Leave {
|
if delta.MembershipPos > 0 && delta.Membership == gomatrixserverlib.Leave {
|
||||||
// make sure we don't leak recent events after the leave event.
|
// make sure we don't leak recent events after the leave event.
|
||||||
|
|
@ -290,8 +290,10 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
|
||||||
hasMembershipChange := false
|
hasMembershipChange := false
|
||||||
for _, recentEvent := range recentStreamEvents {
|
for _, recentEvent := range recentStreamEvents {
|
||||||
if recentEvent.Type() == gomatrixserverlib.MRoomMember && recentEvent.StateKey() != nil {
|
if recentEvent.Type() == gomatrixserverlib.MRoomMember && recentEvent.StateKey() != nil {
|
||||||
|
if membership, _ := recentEvent.Membership(); membership == gomatrixserverlib.Join {
|
||||||
|
req.MembershipChanges[*recentEvent.StateKey()] = struct{}{}
|
||||||
|
}
|
||||||
hasMembershipChange = true
|
hasMembershipChange = true
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -318,9 +320,9 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
|
||||||
jr.Timeline.Events = gomatrixserverlib.HeaderedToClientEvents(events, gomatrixserverlib.FormatSync)
|
jr.Timeline.Events = gomatrixserverlib.HeaderedToClientEvents(events, gomatrixserverlib.FormatSync)
|
||||||
// If we are limited by the filter AND the history visibility filter
|
// If we are limited by the filter AND the history visibility filter
|
||||||
// didn't "remove" events, return that the response is limited.
|
// didn't "remove" events, return that the response is limited.
|
||||||
jr.Timeline.Limited = limited && len(events) == len(recentEvents)
|
jr.Timeline.Limited = (limited && len(events) == len(recentEvents)) || delta.NewlyJoined
|
||||||
jr.State.Events = gomatrixserverlib.HeaderedToClientEvents(delta.StateEvents, gomatrixserverlib.FormatSync)
|
jr.State.Events = gomatrixserverlib.HeaderedToClientEvents(delta.StateEvents, gomatrixserverlib.FormatSync)
|
||||||
res.Rooms.Join[delta.RoomID] = jr
|
req.Response.Rooms.Join[delta.RoomID] = jr
|
||||||
|
|
||||||
case gomatrixserverlib.Peek:
|
case gomatrixserverlib.Peek:
|
||||||
jr := types.NewJoinResponse()
|
jr := types.NewJoinResponse()
|
||||||
|
|
@ -329,7 +331,7 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
|
||||||
jr.Timeline.Events = gomatrixserverlib.HeaderedToClientEvents(recentEvents, gomatrixserverlib.FormatSync)
|
jr.Timeline.Events = gomatrixserverlib.HeaderedToClientEvents(recentEvents, gomatrixserverlib.FormatSync)
|
||||||
jr.Timeline.Limited = limited
|
jr.Timeline.Limited = limited
|
||||||
jr.State.Events = gomatrixserverlib.HeaderedToClientEvents(delta.StateEvents, gomatrixserverlib.FormatSync)
|
jr.State.Events = gomatrixserverlib.HeaderedToClientEvents(delta.StateEvents, gomatrixserverlib.FormatSync)
|
||||||
res.Rooms.Peek[delta.RoomID] = jr
|
req.Response.Rooms.Peek[delta.RoomID] = jr
|
||||||
|
|
||||||
case gomatrixserverlib.Leave:
|
case gomatrixserverlib.Leave:
|
||||||
fallthrough // transitions to leave are the same as ban
|
fallthrough // transitions to leave are the same as ban
|
||||||
|
|
@ -342,7 +344,7 @@ func (p *PDUStreamProvider) addRoomDeltaToResponse(
|
||||||
// didn't "remove" events, return that the response is limited.
|
// didn't "remove" events, return that the response is limited.
|
||||||
lr.Timeline.Limited = limited && len(events) == len(recentEvents)
|
lr.Timeline.Limited = limited && len(events) == len(recentEvents)
|
||||||
lr.State.Events = gomatrixserverlib.HeaderedToClientEvents(delta.StateEvents, gomatrixserverlib.FormatSync)
|
lr.State.Events = gomatrixserverlib.HeaderedToClientEvents(delta.StateEvents, gomatrixserverlib.FormatSync)
|
||||||
res.Rooms.Leave[delta.RoomID] = lr
|
req.Response.Rooms.Leave[delta.RoomID] = lr
|
||||||
}
|
}
|
||||||
|
|
||||||
return latestPosition, nil
|
return latestPosition, nil
|
||||||
|
|
|
||||||
|
|
@ -121,7 +121,8 @@ func (p *PresenceStreamProvider) IncrementalSync(
|
||||||
prevPresence := pres.(*types.PresenceInternal)
|
prevPresence := pres.(*types.PresenceInternal)
|
||||||
currentlyActive := prevPresence.CurrentlyActive()
|
currentlyActive := prevPresence.CurrentlyActive()
|
||||||
skip := prevPresence.Equals(presence) && currentlyActive && req.Device.UserID != presence.UserID
|
skip := prevPresence.Equals(presence) && currentlyActive && req.Device.UserID != presence.UserID
|
||||||
if skip {
|
_, membershipChange := req.MembershipChanges[presence.UserID]
|
||||||
|
if skip && !membershipChange {
|
||||||
req.Log.Tracef("Skipping presence, no change (%s)", presence.UserID)
|
req.Log.Tracef("Skipping presence, no change (%s)", presence.UserID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -91,15 +91,16 @@ func newSyncRequest(req *http.Request, device userapi.Device, syncDB storage.Dat
|
||||||
})
|
})
|
||||||
|
|
||||||
return &types.SyncRequest{
|
return &types.SyncRequest{
|
||||||
Context: req.Context(), //
|
Context: req.Context(), //
|
||||||
Log: logger, //
|
Log: logger, //
|
||||||
Device: &device, //
|
Device: &device, //
|
||||||
Response: types.NewResponse(), // Populated by all streams
|
Response: types.NewResponse(), // Populated by all streams
|
||||||
Filter: filter, //
|
Filter: filter, //
|
||||||
Since: since, //
|
Since: since, //
|
||||||
Timeout: timeout, //
|
Timeout: timeout, //
|
||||||
Rooms: make(map[string]string), // Populated by the PDU stream
|
Rooms: make(map[string]string), // Populated by the PDU stream
|
||||||
WantFullState: wantFullState, //
|
WantFullState: wantFullState, //
|
||||||
|
MembershipChanges: make(map[string]struct{}), // Populated by the PDU stream
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,9 +4,10 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
userapi "github.com/matrix-org/dendrite/userapi/api"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
userapi "github.com/matrix-org/dendrite/userapi/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SyncRequest struct {
|
type SyncRequest struct {
|
||||||
|
|
@ -22,6 +23,8 @@ type SyncRequest struct {
|
||||||
// Updated by the PDU stream.
|
// Updated by the PDU stream.
|
||||||
Rooms map[string]string
|
Rooms map[string]string
|
||||||
// Updated by the PDU stream.
|
// Updated by the PDU stream.
|
||||||
|
MembershipChanges map[string]struct{}
|
||||||
|
// Updated by the PDU stream.
|
||||||
IgnoredUsers IgnoredUsers
|
IgnoredUsers IgnoredUsers
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -492,9 +492,11 @@ func (jr JoinResponse) MarshalJSON() ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
if jr.UnreadNotifications != nil &&
|
if jr.UnreadNotifications != nil {
|
||||||
jr.UnreadNotifications.NotificationCount == 0 && jr.UnreadNotifications.HighlightCount == 0 {
|
// if everything else is nil, also remove UnreadNotifications
|
||||||
a.UnreadNotifications = nil
|
if a.State == nil && a.Ephemeral == nil && a.AccountData == nil && a.Timeline == nil && a.Summary == nil {
|
||||||
|
a.UnreadNotifications = nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return json.Marshal(a)
|
return json.Marshal(a)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -22,10 +22,6 @@ Forgotten room messages cannot be paginated
|
||||||
|
|
||||||
Local device key changes get to remote servers with correct prev_id
|
Local device key changes get to remote servers with correct prev_id
|
||||||
|
|
||||||
# Flakey
|
|
||||||
|
|
||||||
Local device key changes appear in /keys/changes
|
|
||||||
|
|
||||||
# we don't support groups
|
# we don't support groups
|
||||||
|
|
||||||
Remove group category
|
Remove group category
|
||||||
|
|
@ -39,12 +35,6 @@ Events in rooms with AS-hosted room aliases are sent to AS server
|
||||||
Inviting an AS-hosted user asks the AS server
|
Inviting an AS-hosted user asks the AS server
|
||||||
Accesing an AS-hosted room alias asks the AS server
|
Accesing an AS-hosted room alias asks the AS server
|
||||||
|
|
||||||
# Flakey, need additional investigation
|
|
||||||
|
|
||||||
Messages that notify from another user increment notification_count
|
|
||||||
Messages that highlight from another user increment unread highlight count
|
|
||||||
Notifications can be viewed with GET /notifications
|
|
||||||
|
|
||||||
# More flakey
|
# More flakey
|
||||||
|
|
||||||
Guest users can join guest_access rooms
|
Guest users can join guest_access rooms
|
||||||
|
|
|
||||||
|
|
@ -743,4 +743,13 @@ User joining then leaving public room appears and dissappears from directory
|
||||||
User in remote room doesn't appear in user directory after server left room
|
User in remote room doesn't appear in user directory after server left room
|
||||||
User in shared private room does appear in user directory until leave
|
User in shared private room does appear in user directory until leave
|
||||||
Existing members see new member's presence
|
Existing members see new member's presence
|
||||||
Inbound federation can return missing events for joined visibility
|
Inbound federation can return missing events for joined visibility
|
||||||
|
outliers whose auth_events are in a different room are correctly rejected
|
||||||
|
Messages that notify from another user increment notification_count
|
||||||
|
Messages that highlight from another user increment unread highlight count
|
||||||
|
Newly joined room has correct timeline in incremental sync
|
||||||
|
When user joins a room the state is included in the next sync
|
||||||
|
When user joins a room the state is included in a gapped sync
|
||||||
|
Messages that notify from another user increment notification_count
|
||||||
|
Messages that highlight from another user increment unread highlight count
|
||||||
|
Notifications can be viewed with GET /notifications
|
||||||
|
|
@ -30,6 +30,7 @@ type eventMods struct {
|
||||||
unsigned interface{}
|
unsigned interface{}
|
||||||
keyID gomatrixserverlib.KeyID
|
keyID gomatrixserverlib.KeyID
|
||||||
privKey ed25519.PrivateKey
|
privKey ed25519.PrivateKey
|
||||||
|
authEvents []string
|
||||||
}
|
}
|
||||||
|
|
||||||
type eventModifier func(e *eventMods)
|
type eventModifier func(e *eventMods)
|
||||||
|
|
@ -52,6 +53,12 @@ func WithUnsigned(unsigned interface{}) eventModifier {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func WithAuthIDs(evs []string) eventModifier {
|
||||||
|
return func(e *eventMods) {
|
||||||
|
e.authEvents = evs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func WithKeyID(keyID gomatrixserverlib.KeyID) eventModifier {
|
func WithKeyID(keyID gomatrixserverlib.KeyID) eventModifier {
|
||||||
return func(e *eventMods) {
|
return func(e *eventMods) {
|
||||||
e.keyID = keyID
|
e.keyID = keyID
|
||||||
|
|
|
||||||
|
|
@ -21,8 +21,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/internal/eventutil"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/internal/eventutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Preset int
|
type Preset int
|
||||||
|
|
@ -174,11 +175,17 @@ func (r *Room) CreateEvent(t *testing.T, creator *User, eventType string, conten
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("CreateEvent[%s]: failed to StateNeededForEventBuilder: %s", eventType, err)
|
t.Fatalf("CreateEvent[%s]: failed to StateNeededForEventBuilder: %s", eventType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
refs, err := eventsNeeded.AuthEventReferences(&r.authEvents)
|
refs, err := eventsNeeded.AuthEventReferences(&r.authEvents)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("CreateEvent[%s]: failed to AuthEventReferences: %s", eventType, err)
|
t.Fatalf("CreateEvent[%s]: failed to AuthEventReferences: %s", eventType, err)
|
||||||
}
|
}
|
||||||
builder.AuthEvents = refs
|
builder.AuthEvents = refs
|
||||||
|
|
||||||
|
if len(mod.authEvents) > 0 {
|
||||||
|
builder.AuthEvents = mod.authEvents
|
||||||
|
}
|
||||||
|
|
||||||
ev, err := builder.Build(
|
ev, err := builder.Build(
|
||||||
mod.originServerTS, mod.origin, mod.keyID,
|
mod.originServerTS, mod.origin, mod.keyID,
|
||||||
mod.privKey, r.Version,
|
mod.privKey, r.Version,
|
||||||
|
|
|
||||||
|
|
@ -96,7 +96,7 @@ type ClientUserAPI interface {
|
||||||
PerformAccountDeactivation(ctx context.Context, req *PerformAccountDeactivationRequest, res *PerformAccountDeactivationResponse) error
|
PerformAccountDeactivation(ctx context.Context, req *PerformAccountDeactivationRequest, res *PerformAccountDeactivationResponse) error
|
||||||
PerformOpenIDTokenCreation(ctx context.Context, req *PerformOpenIDTokenCreationRequest, res *PerformOpenIDTokenCreationResponse) error
|
PerformOpenIDTokenCreation(ctx context.Context, req *PerformOpenIDTokenCreationRequest, res *PerformOpenIDTokenCreationResponse) error
|
||||||
SetAvatarURL(ctx context.Context, req *PerformSetAvatarURLRequest, res *PerformSetAvatarURLResponse) error
|
SetAvatarURL(ctx context.Context, req *PerformSetAvatarURLRequest, res *PerformSetAvatarURLResponse) error
|
||||||
SetDisplayName(ctx context.Context, req *PerformUpdateDisplayNameRequest, res *struct{}) error
|
SetDisplayName(ctx context.Context, req *PerformUpdateDisplayNameRequest, res *PerformUpdateDisplayNameResponse) error
|
||||||
QueryNotifications(ctx context.Context, req *QueryNotificationsRequest, res *QueryNotificationsResponse) error
|
QueryNotifications(ctx context.Context, req *QueryNotificationsRequest, res *QueryNotificationsResponse) error
|
||||||
InputAccountData(ctx context.Context, req *InputAccountDataRequest, res *InputAccountDataResponse) error
|
InputAccountData(ctx context.Context, req *InputAccountDataRequest, res *InputAccountDataResponse) error
|
||||||
PerformKeyBackup(ctx context.Context, req *PerformKeyBackupRequest, res *PerformKeyBackupResponse) error
|
PerformKeyBackup(ctx context.Context, req *PerformKeyBackupRequest, res *PerformKeyBackupResponse) error
|
||||||
|
|
@ -579,7 +579,10 @@ type Notification struct {
|
||||||
type PerformSetAvatarURLRequest struct {
|
type PerformSetAvatarURLRequest struct {
|
||||||
Localpart, AvatarURL string
|
Localpart, AvatarURL string
|
||||||
}
|
}
|
||||||
type PerformSetAvatarURLResponse struct{}
|
type PerformSetAvatarURLResponse struct {
|
||||||
|
Profile *authtypes.Profile `json:"profile"`
|
||||||
|
Changed bool `json:"changed"`
|
||||||
|
}
|
||||||
|
|
||||||
type QueryNumericLocalpartResponse struct {
|
type QueryNumericLocalpartResponse struct {
|
||||||
ID int64
|
ID int64
|
||||||
|
|
@ -606,6 +609,11 @@ type PerformUpdateDisplayNameRequest struct {
|
||||||
Localpart, DisplayName string
|
Localpart, DisplayName string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PerformUpdateDisplayNameResponse struct {
|
||||||
|
Profile *authtypes.Profile `json:"profile"`
|
||||||
|
Changed bool `json:"changed"`
|
||||||
|
}
|
||||||
|
|
||||||
type QueryLocalpartForThreePIDRequest struct {
|
type QueryLocalpartForThreePIDRequest struct {
|
||||||
ThreePID, Medium string
|
ThreePID, Medium string
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -168,7 +168,7 @@ func (t *UserInternalAPITrace) QueryAccountAvailability(ctx context.Context, req
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *UserInternalAPITrace) SetDisplayName(ctx context.Context, req *PerformUpdateDisplayNameRequest, res *struct{}) error {
|
func (t *UserInternalAPITrace) SetDisplayName(ctx context.Context, req *PerformUpdateDisplayNameRequest, res *PerformUpdateDisplayNameResponse) error {
|
||||||
err := t.Impl.SetDisplayName(ctx, req, res)
|
err := t.Impl.SetDisplayName(ctx, req, res)
|
||||||
util.GetLogger(ctx).Infof("SetDisplayName req=%+v res=%+v", js(req), js(res))
|
util.GetLogger(ctx).Infof("SetDisplayName req=%+v res=%+v", js(req), js(res))
|
||||||
return err
|
return err
|
||||||
|
|
|
||||||
|
|
@ -81,7 +81,7 @@ func (s *OutputReceiptEventConsumer) onMessage(ctx context.Context, msgs []*nats
|
||||||
readPos := msg.Header.Get(jetstream.EventID)
|
readPos := msg.Header.Get(jetstream.EventID)
|
||||||
evType := msg.Header.Get("type")
|
evType := msg.Header.Get("type")
|
||||||
|
|
||||||
if readPos == "" || evType != "m.read" {
|
if readPos == "" || (evType != "m.read" && evType != "m.read.private") {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -10,19 +10,24 @@ import (
|
||||||
"github.com/matrix-org/dendrite/internal/pushrules"
|
"github.com/matrix-org/dendrite/internal/pushrules"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/dendrite/test"
|
"github.com/matrix-org/dendrite/test"
|
||||||
|
"github.com/matrix-org/dendrite/test/testrig"
|
||||||
"github.com/matrix-org/dendrite/userapi/storage"
|
"github.com/matrix-org/dendrite/userapi/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
func mustCreateDatabase(t *testing.T, dbType test.DBType) (storage.Database, func()) {
|
func mustCreateDatabase(t *testing.T, dbType test.DBType) (storage.Database, func()) {
|
||||||
|
base, baseclose := testrig.CreateBaseDendrite(t, dbType)
|
||||||
t.Helper()
|
t.Helper()
|
||||||
connStr, close := test.PrepareDBConnectionString(t, dbType)
|
connStr, close := test.PrepareDBConnectionString(t, dbType)
|
||||||
db, err := storage.NewUserAPIDatabase(nil, &config.DatabaseOptions{
|
db, err := storage.NewUserAPIDatabase(base, &config.DatabaseOptions{
|
||||||
ConnectionString: config.DataSource(connStr),
|
ConnectionString: config.DataSource(connStr),
|
||||||
}, "", 4, 0, 0, "")
|
}, "", 4, 0, 0, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create new user db: %v", err)
|
t.Fatalf("failed to create new user db: %v", err)
|
||||||
}
|
}
|
||||||
return db, close
|
return db, func() {
|
||||||
|
close()
|
||||||
|
baseclose()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustCreateEvent(t *testing.T, content string) *gomatrixserverlib.HeaderedEvent {
|
func mustCreateEvent(t *testing.T, content string) *gomatrixserverlib.HeaderedEvent {
|
||||||
|
|
|
||||||
|
|
@ -170,7 +170,7 @@ func (a *UserInternalAPI) PerformAccountCreation(ctx context.Context, req *api.P
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = a.DB.SetDisplayName(ctx, req.Localpart, req.Localpart); err != nil {
|
if _, _, err = a.DB.SetDisplayName(ctx, req.Localpart, req.Localpart); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -813,7 +813,10 @@ func (a *UserInternalAPI) QueryPushRules(ctx context.Context, req *api.QueryPush
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *UserInternalAPI) SetAvatarURL(ctx context.Context, req *api.PerformSetAvatarURLRequest, res *api.PerformSetAvatarURLResponse) error {
|
func (a *UserInternalAPI) SetAvatarURL(ctx context.Context, req *api.PerformSetAvatarURLRequest, res *api.PerformSetAvatarURLResponse) error {
|
||||||
return a.DB.SetAvatarURL(ctx, req.Localpart, req.AvatarURL)
|
profile, changed, err := a.DB.SetAvatarURL(ctx, req.Localpart, req.AvatarURL)
|
||||||
|
res.Profile = profile
|
||||||
|
res.Changed = changed
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *UserInternalAPI) QueryNumericLocalpart(ctx context.Context, res *api.QueryNumericLocalpartResponse) error {
|
func (a *UserInternalAPI) QueryNumericLocalpart(ctx context.Context, res *api.QueryNumericLocalpartResponse) error {
|
||||||
|
|
@ -847,8 +850,11 @@ func (a *UserInternalAPI) QueryAccountByPassword(ctx context.Context, req *api.Q
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *UserInternalAPI) SetDisplayName(ctx context.Context, req *api.PerformUpdateDisplayNameRequest, _ *struct{}) error {
|
func (a *UserInternalAPI) SetDisplayName(ctx context.Context, req *api.PerformUpdateDisplayNameRequest, res *api.PerformUpdateDisplayNameResponse) error {
|
||||||
return a.DB.SetDisplayName(ctx, req.Localpart, req.DisplayName)
|
profile, changed, err := a.DB.SetDisplayName(ctx, req.Localpart, req.DisplayName)
|
||||||
|
res.Profile = profile
|
||||||
|
res.Changed = changed
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *UserInternalAPI) QueryLocalpartForThreePID(ctx context.Context, req *api.QueryLocalpartForThreePIDRequest, res *api.QueryLocalpartForThreePIDResponse) error {
|
func (a *UserInternalAPI) QueryLocalpartForThreePID(ctx context.Context, req *api.QueryLocalpartForThreePIDRequest, res *api.QueryLocalpartForThreePIDResponse) error {
|
||||||
|
|
|
||||||
|
|
@ -388,7 +388,7 @@ func (h *httpUserInternalAPI) QueryAccountByPassword(
|
||||||
func (h *httpUserInternalAPI) SetDisplayName(
|
func (h *httpUserInternalAPI) SetDisplayName(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
request *api.PerformUpdateDisplayNameRequest,
|
request *api.PerformUpdateDisplayNameRequest,
|
||||||
response *struct{},
|
response *api.PerformUpdateDisplayNameResponse,
|
||||||
) error {
|
) error {
|
||||||
return httputil.CallInternalRPCAPI(
|
return httputil.CallInternalRPCAPI(
|
||||||
"SetDisplayName", h.apiURL+PerformSetDisplayNamePath,
|
"SetDisplayName", h.apiURL+PerformSetDisplayNamePath,
|
||||||
|
|
|
||||||
|
|
@ -29,8 +29,8 @@ import (
|
||||||
type Profile interface {
|
type Profile interface {
|
||||||
GetProfileByLocalpart(ctx context.Context, localpart string) (*authtypes.Profile, error)
|
GetProfileByLocalpart(ctx context.Context, localpart string) (*authtypes.Profile, error)
|
||||||
SearchProfiles(ctx context.Context, searchString string, limit int) ([]authtypes.Profile, error)
|
SearchProfiles(ctx context.Context, searchString string, limit int) ([]authtypes.Profile, error)
|
||||||
SetAvatarURL(ctx context.Context, localpart string, avatarURL string) error
|
SetAvatarURL(ctx context.Context, localpart string, avatarURL string) (*authtypes.Profile, bool, error)
|
||||||
SetDisplayName(ctx context.Context, localpart string, displayName string) error
|
SetDisplayName(ctx context.Context, localpart string, displayName string) (*authtypes.Profile, bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type Account interface {
|
type Account interface {
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ import (
|
||||||
|
|
||||||
const accountDataSchema = `
|
const accountDataSchema = `
|
||||||
-- Stores data about accounts data.
|
-- Stores data about accounts data.
|
||||||
CREATE TABLE IF NOT EXISTS account_data (
|
CREATE TABLE IF NOT EXISTS userapi_account_datas (
|
||||||
-- The Matrix user ID localpart for this account
|
-- The Matrix user ID localpart for this account
|
||||||
localpart TEXT NOT NULL,
|
localpart TEXT NOT NULL,
|
||||||
-- The room ID for this data (empty string if not specific to a room)
|
-- The room ID for this data (empty string if not specific to a room)
|
||||||
|
|
@ -41,15 +41,15 @@ CREATE TABLE IF NOT EXISTS account_data (
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertAccountDataSQL = `
|
const insertAccountDataSQL = `
|
||||||
INSERT INTO account_data(localpart, room_id, type, content) VALUES($1, $2, $3, $4)
|
INSERT INTO userapi_account_datas(localpart, room_id, type, content) VALUES($1, $2, $3, $4)
|
||||||
ON CONFLICT (localpart, room_id, type) DO UPDATE SET content = EXCLUDED.content
|
ON CONFLICT (localpart, room_id, type) DO UPDATE SET content = EXCLUDED.content
|
||||||
`
|
`
|
||||||
|
|
||||||
const selectAccountDataSQL = "" +
|
const selectAccountDataSQL = "" +
|
||||||
"SELECT room_id, type, content FROM account_data WHERE localpart = $1"
|
"SELECT room_id, type, content FROM userapi_account_datas WHERE localpart = $1"
|
||||||
|
|
||||||
const selectAccountDataByTypeSQL = "" +
|
const selectAccountDataByTypeSQL = "" +
|
||||||
"SELECT content FROM account_data WHERE localpart = $1 AND room_id = $2 AND type = $3"
|
"SELECT content FROM userapi_account_datas WHERE localpart = $1 AND room_id = $2 AND type = $3"
|
||||||
|
|
||||||
type accountDataStatements struct {
|
type accountDataStatements struct {
|
||||||
insertAccountDataStmt *sql.Stmt
|
insertAccountDataStmt *sql.Stmt
|
||||||
|
|
|
||||||
|
|
@ -32,7 +32,7 @@ import (
|
||||||
|
|
||||||
const accountsSchema = `
|
const accountsSchema = `
|
||||||
-- Stores data about accounts.
|
-- Stores data about accounts.
|
||||||
CREATE TABLE IF NOT EXISTS account_accounts (
|
CREATE TABLE IF NOT EXISTS userapi_accounts (
|
||||||
-- The Matrix user ID localpart for this account
|
-- The Matrix user ID localpart for this account
|
||||||
localpart TEXT NOT NULL PRIMARY KEY,
|
localpart TEXT NOT NULL PRIMARY KEY,
|
||||||
-- When this account was first created, as a unix timestamp (ms resolution).
|
-- When this account was first created, as a unix timestamp (ms resolution).
|
||||||
|
|
@ -51,22 +51,22 @@ CREATE TABLE IF NOT EXISTS account_accounts (
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertAccountSQL = "" +
|
const insertAccountSQL = "" +
|
||||||
"INSERT INTO account_accounts(localpart, created_ts, password_hash, appservice_id, account_type) VALUES ($1, $2, $3, $4, $5)"
|
"INSERT INTO userapi_accounts(localpart, created_ts, password_hash, appservice_id, account_type) VALUES ($1, $2, $3, $4, $5)"
|
||||||
|
|
||||||
const updatePasswordSQL = "" +
|
const updatePasswordSQL = "" +
|
||||||
"UPDATE account_accounts SET password_hash = $1 WHERE localpart = $2"
|
"UPDATE userapi_accounts SET password_hash = $1 WHERE localpart = $2"
|
||||||
|
|
||||||
const deactivateAccountSQL = "" +
|
const deactivateAccountSQL = "" +
|
||||||
"UPDATE account_accounts SET is_deactivated = TRUE WHERE localpart = $1"
|
"UPDATE userapi_accounts SET is_deactivated = TRUE WHERE localpart = $1"
|
||||||
|
|
||||||
const selectAccountByLocalpartSQL = "" +
|
const selectAccountByLocalpartSQL = "" +
|
||||||
"SELECT localpart, appservice_id, account_type FROM account_accounts WHERE localpart = $1"
|
"SELECT localpart, appservice_id, account_type FROM userapi_accounts WHERE localpart = $1"
|
||||||
|
|
||||||
const selectPasswordHashSQL = "" +
|
const selectPasswordHashSQL = "" +
|
||||||
"SELECT password_hash FROM account_accounts WHERE localpart = $1 AND is_deactivated = FALSE"
|
"SELECT password_hash FROM userapi_accounts WHERE localpart = $1 AND is_deactivated = FALSE"
|
||||||
|
|
||||||
const selectNewNumericLocalpartSQL = "" +
|
const selectNewNumericLocalpartSQL = "" +
|
||||||
"SELECT COALESCE(MAX(localpart::bigint), 0) FROM account_accounts WHERE localpart ~ '^[0-9]{1,}$'"
|
"SELECT COALESCE(MAX(localpart::bigint), 0) FROM userapi_accounts WHERE localpart ~ '^[0-9]{1,}$'"
|
||||||
|
|
||||||
type accountsStatements struct {
|
type accountsStatements struct {
|
||||||
insertAccountStmt *sql.Stmt
|
insertAccountStmt *sql.Stmt
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func UpIsActive(ctx context.Context, tx *sql.Tx) error {
|
func UpIsActive(ctx context.Context, tx *sql.Tx) error {
|
||||||
_, err := tx.ExecContext(ctx, "ALTER TABLE account_accounts ADD COLUMN IF NOT EXISTS is_deactivated BOOLEAN DEFAULT FALSE;")
|
_, err := tx.ExecContext(ctx, "ALTER TABLE userapi_accounts ADD COLUMN IF NOT EXISTS is_deactivated BOOLEAN DEFAULT FALSE;")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||||
}
|
}
|
||||||
|
|
@ -15,7 +15,7 @@ func UpIsActive(ctx context.Context, tx *sql.Tx) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func DownIsActive(ctx context.Context, tx *sql.Tx) error {
|
func DownIsActive(ctx context.Context, tx *sql.Tx) error {
|
||||||
_, err := tx.ExecContext(ctx, "ALTER TABLE account_accounts DROP COLUMN is_deactivated;")
|
_, err := tx.ExecContext(ctx, "ALTER TABLE userapi_accounts DROP COLUMN is_deactivated;")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to execute downgrade: %w", err)
|
return fmt.Errorf("failed to execute downgrade: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,9 +8,9 @@ import (
|
||||||
|
|
||||||
func UpLastSeenTSIP(ctx context.Context, tx *sql.Tx) error {
|
func UpLastSeenTSIP(ctx context.Context, tx *sql.Tx) error {
|
||||||
_, err := tx.ExecContext(ctx, `
|
_, err := tx.ExecContext(ctx, `
|
||||||
ALTER TABLE device_devices ADD COLUMN IF NOT EXISTS last_seen_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM CURRENT_TIMESTAMP)*1000;
|
ALTER TABLE userapi_devices ADD COLUMN IF NOT EXISTS last_seen_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM CURRENT_TIMESTAMP)*1000;
|
||||||
ALTER TABLE device_devices ADD COLUMN IF NOT EXISTS ip TEXT;
|
ALTER TABLE userapi_devices ADD COLUMN IF NOT EXISTS ip TEXT;
|
||||||
ALTER TABLE device_devices ADD COLUMN IF NOT EXISTS user_agent TEXT;`)
|
ALTER TABLE userapi_devices ADD COLUMN IF NOT EXISTS user_agent TEXT;`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||||
}
|
}
|
||||||
|
|
@ -19,9 +19,9 @@ ALTER TABLE device_devices ADD COLUMN IF NOT EXISTS user_agent TEXT;`)
|
||||||
|
|
||||||
func DownLastSeenTSIP(ctx context.Context, tx *sql.Tx) error {
|
func DownLastSeenTSIP(ctx context.Context, tx *sql.Tx) error {
|
||||||
_, err := tx.ExecContext(ctx, `
|
_, err := tx.ExecContext(ctx, `
|
||||||
ALTER TABLE device_devices DROP COLUMN last_seen_ts;
|
ALTER TABLE userapi_devices DROP COLUMN last_seen_ts;
|
||||||
ALTER TABLE device_devices DROP COLUMN ip;
|
ALTER TABLE userapi_devices DROP COLUMN ip;
|
||||||
ALTER TABLE device_devices DROP COLUMN user_agent;`)
|
ALTER TABLE userapi_devices DROP COLUMN user_agent;`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to execute downgrade: %w", err)
|
return fmt.Errorf("failed to execute downgrade: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,10 +9,10 @@ import (
|
||||||
func UpAddAccountType(ctx context.Context, tx *sql.Tx) error {
|
func UpAddAccountType(ctx context.Context, tx *sql.Tx) error {
|
||||||
// initially set every account to useraccount, change appservice and guest accounts afterwards
|
// initially set every account to useraccount, change appservice and guest accounts afterwards
|
||||||
// (user = 1, guest = 2, admin = 3, appservice = 4)
|
// (user = 1, guest = 2, admin = 3, appservice = 4)
|
||||||
_, err := tx.ExecContext(ctx, `ALTER TABLE account_accounts ADD COLUMN IF NOT EXISTS account_type SMALLINT NOT NULL DEFAULT 1;
|
_, err := tx.ExecContext(ctx, `ALTER TABLE userapi_accounts ADD COLUMN IF NOT EXISTS account_type SMALLINT NOT NULL DEFAULT 1;
|
||||||
UPDATE account_accounts SET account_type = 4 WHERE appservice_id <> '';
|
UPDATE userapi_accounts SET account_type = 4 WHERE appservice_id <> '';
|
||||||
UPDATE account_accounts SET account_type = 2 WHERE localpart ~ '^[0-9]+$';
|
UPDATE userapi_accounts SET account_type = 2 WHERE localpart ~ '^[0-9]+$';
|
||||||
ALTER TABLE account_accounts ALTER COLUMN account_type DROP DEFAULT;`,
|
ALTER TABLE userapi_accounts ALTER COLUMN account_type DROP DEFAULT;`,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||||
|
|
@ -21,7 +21,7 @@ ALTER TABLE account_accounts ALTER COLUMN account_type DROP DEFAULT;`,
|
||||||
}
|
}
|
||||||
|
|
||||||
func DownAddAccountType(ctx context.Context, tx *sql.Tx) error {
|
func DownAddAccountType(ctx context.Context, tx *sql.Tx) error {
|
||||||
_, err := tx.ExecContext(ctx, "ALTER TABLE account_accounts DROP COLUMN account_type;")
|
_, err := tx.ExecContext(ctx, "ALTER TABLE userapi_accounts DROP COLUMN account_type;")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to execute downgrade: %w", err)
|
return fmt.Errorf("failed to execute downgrade: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,102 @@
|
||||||
|
package deltas
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/lib/pq"
|
||||||
|
)
|
||||||
|
|
||||||
|
var renameTableMappings = map[string]string{
|
||||||
|
"account_accounts": "userapi_accounts",
|
||||||
|
"account_data": "userapi_account_datas",
|
||||||
|
"device_devices": "userapi_devices",
|
||||||
|
"account_e2e_room_keys": "userapi_key_backups",
|
||||||
|
"account_e2e_room_keys_versions": "userapi_key_backup_versions",
|
||||||
|
"login_tokens": "userapi_login_tokens",
|
||||||
|
"open_id_tokens": "userapi_openid_tokens",
|
||||||
|
"account_profiles": "userapi_profiles",
|
||||||
|
"account_threepid": "userapi_threepids",
|
||||||
|
}
|
||||||
|
|
||||||
|
var renameSequenceMappings = map[string]string{
|
||||||
|
"device_session_id_seq": "userapi_device_session_id_seq",
|
||||||
|
"account_e2e_room_keys_versions_seq": "userapi_key_backup_versions_seq",
|
||||||
|
}
|
||||||
|
|
||||||
|
var renameIndicesMappings = map[string]string{
|
||||||
|
"device_localpart_id_idx": "userapi_device_localpart_id_idx",
|
||||||
|
"e2e_room_keys_idx": "userapi_key_backups_idx",
|
||||||
|
"e2e_room_keys_versions_idx": "userapi_key_backups_versions_idx",
|
||||||
|
"account_e2e_room_keys_versions_idx": "userapi_key_backup_versions_idx",
|
||||||
|
"login_tokens_expiration_idx": "userapi_login_tokens_expiration_idx",
|
||||||
|
"account_threepid_localpart": "userapi_threepid_idx",
|
||||||
|
}
|
||||||
|
|
||||||
|
// I know what you're thinking: you're wondering "why doesn't this use $1
|
||||||
|
// and pass variadic parameters to ExecContext?" — the answer is because
|
||||||
|
// PostgreSQL doesn't expect the table name to be specified as a substituted
|
||||||
|
// argument in that way so it results in a syntax error in the query.
|
||||||
|
|
||||||
|
func UpRenameTables(ctx context.Context, tx *sql.Tx) error {
|
||||||
|
for old, new := range renameTableMappings {
|
||||||
|
q := fmt.Sprintf(
|
||||||
|
"ALTER TABLE IF EXISTS %s RENAME TO %s;",
|
||||||
|
pq.QuoteIdentifier(old), pq.QuoteIdentifier(new),
|
||||||
|
)
|
||||||
|
if _, err := tx.ExecContext(ctx, q); err != nil {
|
||||||
|
return fmt.Errorf("rename table %q to %q error: %w", old, new, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for old, new := range renameSequenceMappings {
|
||||||
|
q := fmt.Sprintf(
|
||||||
|
"ALTER SEQUENCE IF EXISTS %s RENAME TO %s;",
|
||||||
|
pq.QuoteIdentifier(old), pq.QuoteIdentifier(new),
|
||||||
|
)
|
||||||
|
if _, err := tx.ExecContext(ctx, q); err != nil {
|
||||||
|
return fmt.Errorf("rename table %q to %q error: %w", old, new, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for old, new := range renameIndicesMappings {
|
||||||
|
q := fmt.Sprintf(
|
||||||
|
"ALTER INDEX IF EXISTS %s RENAME TO %s;",
|
||||||
|
pq.QuoteIdentifier(old), pq.QuoteIdentifier(new),
|
||||||
|
)
|
||||||
|
if _, err := tx.ExecContext(ctx, q); err != nil {
|
||||||
|
return fmt.Errorf("rename table %q to %q error: %w", old, new, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func DownRenameTables(ctx context.Context, tx *sql.Tx) error {
|
||||||
|
for old, new := range renameTableMappings {
|
||||||
|
q := fmt.Sprintf(
|
||||||
|
"ALTER TABLE IF EXISTS %s RENAME TO %s;",
|
||||||
|
pq.QuoteIdentifier(new), pq.QuoteIdentifier(old),
|
||||||
|
)
|
||||||
|
if _, err := tx.ExecContext(ctx, q); err != nil {
|
||||||
|
return fmt.Errorf("rename table %q to %q error: %w", new, old, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for old, new := range renameSequenceMappings {
|
||||||
|
q := fmt.Sprintf(
|
||||||
|
"ALTER SEQUENCE IF EXISTS %s RENAME TO %s;",
|
||||||
|
pq.QuoteIdentifier(new), pq.QuoteIdentifier(old),
|
||||||
|
)
|
||||||
|
if _, err := tx.ExecContext(ctx, q); err != nil {
|
||||||
|
return fmt.Errorf("rename table %q to %q error: %w", new, old, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for old, new := range renameIndicesMappings {
|
||||||
|
q := fmt.Sprintf(
|
||||||
|
"ALTER INDEX IF EXISTS %s RENAME TO %s;",
|
||||||
|
pq.QuoteIdentifier(new), pq.QuoteIdentifier(old),
|
||||||
|
)
|
||||||
|
if _, err := tx.ExecContext(ctx, q); err != nil {
|
||||||
|
return fmt.Errorf("rename table %q to %q error: %w", new, old, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
@ -31,10 +31,10 @@ import (
|
||||||
|
|
||||||
const devicesSchema = `
|
const devicesSchema = `
|
||||||
-- This sequence is used for automatic allocation of session_id.
|
-- This sequence is used for automatic allocation of session_id.
|
||||||
CREATE SEQUENCE IF NOT EXISTS device_session_id_seq START 1;
|
CREATE SEQUENCE IF NOT EXISTS userapi_device_session_id_seq START 1;
|
||||||
|
|
||||||
-- Stores data about devices.
|
-- Stores data about devices.
|
||||||
CREATE TABLE IF NOT EXISTS device_devices (
|
CREATE TABLE IF NOT EXISTS userapi_devices (
|
||||||
-- The access token granted to this device. This has to be the primary key
|
-- The access token granted to this device. This has to be the primary key
|
||||||
-- so we can distinguish which device is making a given request.
|
-- so we can distinguish which device is making a given request.
|
||||||
access_token TEXT NOT NULL PRIMARY KEY,
|
access_token TEXT NOT NULL PRIMARY KEY,
|
||||||
|
|
@ -42,7 +42,7 @@ CREATE TABLE IF NOT EXISTS device_devices (
|
||||||
-- This can be used as a secure substitution of the access token in situations
|
-- This can be used as a secure substitution of the access token in situations
|
||||||
-- where data is associated with access tokens (e.g. transaction storage),
|
-- where data is associated with access tokens (e.g. transaction storage),
|
||||||
-- so we don't have to store users' access tokens everywhere.
|
-- so we don't have to store users' access tokens everywhere.
|
||||||
session_id BIGINT NOT NULL DEFAULT nextval('device_session_id_seq'),
|
session_id BIGINT NOT NULL DEFAULT nextval('userapi_device_session_id_seq'),
|
||||||
-- The device identifier. This only needs to uniquely identify a device for a given user, not globally.
|
-- The device identifier. This only needs to uniquely identify a device for a given user, not globally.
|
||||||
-- access_tokens will be clobbered based on the device ID for a user.
|
-- access_tokens will be clobbered based on the device ID for a user.
|
||||||
device_id TEXT NOT NULL,
|
device_id TEXT NOT NULL,
|
||||||
|
|
@ -65,39 +65,39 @@ CREATE TABLE IF NOT EXISTS device_devices (
|
||||||
);
|
);
|
||||||
|
|
||||||
-- Device IDs must be unique for a given user.
|
-- Device IDs must be unique for a given user.
|
||||||
CREATE UNIQUE INDEX IF NOT EXISTS device_localpart_id_idx ON device_devices(localpart, device_id);
|
CREATE UNIQUE INDEX IF NOT EXISTS userapi_device_localpart_id_idx ON userapi_devices(localpart, device_id);
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertDeviceSQL = "" +
|
const insertDeviceSQL = "" +
|
||||||
"INSERT INTO device_devices(device_id, localpart, access_token, created_ts, display_name, last_seen_ts, ip, user_agent) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)" +
|
"INSERT INTO userapi_devices(device_id, localpart, access_token, created_ts, display_name, last_seen_ts, ip, user_agent) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)" +
|
||||||
" RETURNING session_id"
|
" RETURNING session_id"
|
||||||
|
|
||||||
const selectDeviceByTokenSQL = "" +
|
const selectDeviceByTokenSQL = "" +
|
||||||
"SELECT session_id, device_id, localpart FROM device_devices WHERE access_token = $1"
|
"SELECT session_id, device_id, localpart FROM userapi_devices WHERE access_token = $1"
|
||||||
|
|
||||||
const selectDeviceByIDSQL = "" +
|
const selectDeviceByIDSQL = "" +
|
||||||
"SELECT display_name, last_seen_ts, ip FROM device_devices WHERE localpart = $1 and device_id = $2"
|
"SELECT display_name, last_seen_ts, ip FROM userapi_devices WHERE localpart = $1 and device_id = $2"
|
||||||
|
|
||||||
const selectDevicesByLocalpartSQL = "" +
|
const selectDevicesByLocalpartSQL = "" +
|
||||||
"SELECT device_id, display_name, last_seen_ts, ip, user_agent FROM device_devices WHERE localpart = $1 AND device_id != $2 ORDER BY last_seen_ts DESC"
|
"SELECT device_id, display_name, last_seen_ts, ip, user_agent FROM userapi_devices WHERE localpart = $1 AND device_id != $2 ORDER BY last_seen_ts DESC"
|
||||||
|
|
||||||
const updateDeviceNameSQL = "" +
|
const updateDeviceNameSQL = "" +
|
||||||
"UPDATE device_devices SET display_name = $1 WHERE localpart = $2 AND device_id = $3"
|
"UPDATE userapi_devices SET display_name = $1 WHERE localpart = $2 AND device_id = $3"
|
||||||
|
|
||||||
const deleteDeviceSQL = "" +
|
const deleteDeviceSQL = "" +
|
||||||
"DELETE FROM device_devices WHERE device_id = $1 AND localpart = $2"
|
"DELETE FROM userapi_devices WHERE device_id = $1 AND localpart = $2"
|
||||||
|
|
||||||
const deleteDevicesByLocalpartSQL = "" +
|
const deleteDevicesByLocalpartSQL = "" +
|
||||||
"DELETE FROM device_devices WHERE localpart = $1 AND device_id != $2"
|
"DELETE FROM userapi_devices WHERE localpart = $1 AND device_id != $2"
|
||||||
|
|
||||||
const deleteDevicesSQL = "" +
|
const deleteDevicesSQL = "" +
|
||||||
"DELETE FROM device_devices WHERE localpart = $1 AND device_id = ANY($2)"
|
"DELETE FROM userapi_devices WHERE localpart = $1 AND device_id = ANY($2)"
|
||||||
|
|
||||||
const selectDevicesByIDSQL = "" +
|
const selectDevicesByIDSQL = "" +
|
||||||
"SELECT device_id, localpart, display_name, last_seen_ts FROM device_devices WHERE device_id = ANY($1) ORDER BY last_seen_ts DESC"
|
"SELECT device_id, localpart, display_name, last_seen_ts FROM userapi_devices WHERE device_id = ANY($1) ORDER BY last_seen_ts DESC"
|
||||||
|
|
||||||
const updateDeviceLastSeen = "" +
|
const updateDeviceLastSeen = "" +
|
||||||
"UPDATE device_devices SET last_seen_ts = $1, ip = $2, user_agent = $3 WHERE localpart = $4 AND device_id = $5"
|
"UPDATE userapi_devices SET last_seen_ts = $1, ip = $2, user_agent = $3 WHERE localpart = $4 AND device_id = $5"
|
||||||
|
|
||||||
type devicesStatements struct {
|
type devicesStatements struct {
|
||||||
insertDeviceStmt *sql.Stmt
|
insertDeviceStmt *sql.Stmt
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const keyBackupTableSchema = `
|
const keyBackupTableSchema = `
|
||||||
CREATE TABLE IF NOT EXISTS account_e2e_room_keys (
|
CREATE TABLE IF NOT EXISTS userapi_key_backups (
|
||||||
user_id TEXT NOT NULL,
|
user_id TEXT NOT NULL,
|
||||||
room_id TEXT NOT NULL,
|
room_id TEXT NOT NULL,
|
||||||
session_id TEXT NOT NULL,
|
session_id TEXT NOT NULL,
|
||||||
|
|
@ -37,31 +37,31 @@ CREATE TABLE IF NOT EXISTS account_e2e_room_keys (
|
||||||
is_verified BOOLEAN NOT NULL,
|
is_verified BOOLEAN NOT NULL,
|
||||||
session_data TEXT NOT NULL
|
session_data TEXT NOT NULL
|
||||||
);
|
);
|
||||||
CREATE UNIQUE INDEX IF NOT EXISTS e2e_room_keys_idx ON account_e2e_room_keys(user_id, room_id, session_id, version);
|
CREATE UNIQUE INDEX IF NOT EXISTS userapi_key_backups_idx ON userapi_key_backups(user_id, room_id, session_id, version);
|
||||||
CREATE INDEX IF NOT EXISTS e2e_room_keys_versions_idx ON account_e2e_room_keys(user_id, version);
|
CREATE INDEX IF NOT EXISTS userapi_key_backups_versions_idx ON userapi_key_backups(user_id, version);
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertBackupKeySQL = "" +
|
const insertBackupKeySQL = "" +
|
||||||
"INSERT INTO account_e2e_room_keys(user_id, room_id, session_id, version, first_message_index, forwarded_count, is_verified, session_data) " +
|
"INSERT INTO userapi_key_backups(user_id, room_id, session_id, version, first_message_index, forwarded_count, is_verified, session_data) " +
|
||||||
"VALUES ($1, $2, $3, $4, $5, $6, $7, $8)"
|
"VALUES ($1, $2, $3, $4, $5, $6, $7, $8)"
|
||||||
|
|
||||||
const updateBackupKeySQL = "" +
|
const updateBackupKeySQL = "" +
|
||||||
"UPDATE account_e2e_room_keys SET first_message_index=$1, forwarded_count=$2, is_verified=$3, session_data=$4 " +
|
"UPDATE userapi_key_backups SET first_message_index=$1, forwarded_count=$2, is_verified=$3, session_data=$4 " +
|
||||||
"WHERE user_id=$5 AND room_id=$6 AND session_id=$7 AND version=$8"
|
"WHERE user_id=$5 AND room_id=$6 AND session_id=$7 AND version=$8"
|
||||||
|
|
||||||
const countKeysSQL = "" +
|
const countKeysSQL = "" +
|
||||||
"SELECT COUNT(*) FROM account_e2e_room_keys WHERE user_id = $1 AND version = $2"
|
"SELECT COUNT(*) FROM userapi_key_backups WHERE user_id = $1 AND version = $2"
|
||||||
|
|
||||||
const selectKeysSQL = "" +
|
const selectKeysSQL = "" +
|
||||||
"SELECT room_id, session_id, first_message_index, forwarded_count, is_verified, session_data FROM account_e2e_room_keys " +
|
"SELECT room_id, session_id, first_message_index, forwarded_count, is_verified, session_data FROM userapi_key_backups " +
|
||||||
"WHERE user_id = $1 AND version = $2"
|
"WHERE user_id = $1 AND version = $2"
|
||||||
|
|
||||||
const selectKeysByRoomIDSQL = "" +
|
const selectKeysByRoomIDSQL = "" +
|
||||||
"SELECT room_id, session_id, first_message_index, forwarded_count, is_verified, session_data FROM account_e2e_room_keys " +
|
"SELECT room_id, session_id, first_message_index, forwarded_count, is_verified, session_data FROM userapi_key_backups " +
|
||||||
"WHERE user_id = $1 AND version = $2 AND room_id = $3"
|
"WHERE user_id = $1 AND version = $2 AND room_id = $3"
|
||||||
|
|
||||||
const selectKeysByRoomIDAndSessionIDSQL = "" +
|
const selectKeysByRoomIDAndSessionIDSQL = "" +
|
||||||
"SELECT room_id, session_id, first_message_index, forwarded_count, is_verified, session_data FROM account_e2e_room_keys " +
|
"SELECT room_id, session_id, first_message_index, forwarded_count, is_verified, session_data FROM userapi_key_backups " +
|
||||||
"WHERE user_id = $1 AND version = $2 AND room_id = $3 AND session_id = $4"
|
"WHERE user_id = $1 AND version = $2 AND room_id = $3 AND session_id = $4"
|
||||||
|
|
||||||
type keyBackupStatements struct {
|
type keyBackupStatements struct {
|
||||||
|
|
|
||||||
|
|
@ -26,40 +26,40 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const keyBackupVersionTableSchema = `
|
const keyBackupVersionTableSchema = `
|
||||||
CREATE SEQUENCE IF NOT EXISTS account_e2e_room_keys_versions_seq;
|
CREATE SEQUENCE IF NOT EXISTS userapi_key_backup_versions_seq;
|
||||||
|
|
||||||
-- the metadata for each generation of encrypted e2e session backups
|
-- the metadata for each generation of encrypted e2e session backups
|
||||||
CREATE TABLE IF NOT EXISTS account_e2e_room_keys_versions (
|
CREATE TABLE IF NOT EXISTS userapi_key_backup_versions (
|
||||||
user_id TEXT NOT NULL,
|
user_id TEXT NOT NULL,
|
||||||
-- this means no 2 users will ever have the same version of e2e session backups which strictly
|
-- this means no 2 users will ever have the same version of e2e session backups which strictly
|
||||||
-- isn't necessary, but this is easy to do rather than SELECT MAX(version)+1.
|
-- isn't necessary, but this is easy to do rather than SELECT MAX(version)+1.
|
||||||
version BIGINT DEFAULT nextval('account_e2e_room_keys_versions_seq'),
|
version BIGINT DEFAULT nextval('userapi_key_backup_versions_seq'),
|
||||||
algorithm TEXT NOT NULL,
|
algorithm TEXT NOT NULL,
|
||||||
auth_data TEXT NOT NULL,
|
auth_data TEXT NOT NULL,
|
||||||
etag TEXT NOT NULL,
|
etag TEXT NOT NULL,
|
||||||
deleted SMALLINT DEFAULT 0 NOT NULL
|
deleted SMALLINT DEFAULT 0 NOT NULL
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE UNIQUE INDEX IF NOT EXISTS account_e2e_room_keys_versions_idx ON account_e2e_room_keys_versions(user_id, version);
|
CREATE UNIQUE INDEX IF NOT EXISTS userapi_key_backup_versions_idx ON userapi_key_backup_versions(user_id, version);
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertKeyBackupSQL = "" +
|
const insertKeyBackupSQL = "" +
|
||||||
"INSERT INTO account_e2e_room_keys_versions(user_id, algorithm, auth_data, etag) VALUES ($1, $2, $3, $4) RETURNING version"
|
"INSERT INTO userapi_key_backup_versions(user_id, algorithm, auth_data, etag) VALUES ($1, $2, $3, $4) RETURNING version"
|
||||||
|
|
||||||
const updateKeyBackupAuthDataSQL = "" +
|
const updateKeyBackupAuthDataSQL = "" +
|
||||||
"UPDATE account_e2e_room_keys_versions SET auth_data = $1 WHERE user_id = $2 AND version = $3"
|
"UPDATE userapi_key_backup_versions SET auth_data = $1 WHERE user_id = $2 AND version = $3"
|
||||||
|
|
||||||
const updateKeyBackupETagSQL = "" +
|
const updateKeyBackupETagSQL = "" +
|
||||||
"UPDATE account_e2e_room_keys_versions SET etag = $1 WHERE user_id = $2 AND version = $3"
|
"UPDATE userapi_key_backup_versions SET etag = $1 WHERE user_id = $2 AND version = $3"
|
||||||
|
|
||||||
const deleteKeyBackupSQL = "" +
|
const deleteKeyBackupSQL = "" +
|
||||||
"UPDATE account_e2e_room_keys_versions SET deleted=1 WHERE user_id = $1 AND version = $2"
|
"UPDATE userapi_key_backup_versions SET deleted=1 WHERE user_id = $1 AND version = $2"
|
||||||
|
|
||||||
const selectKeyBackupSQL = "" +
|
const selectKeyBackupSQL = "" +
|
||||||
"SELECT algorithm, auth_data, etag, deleted FROM account_e2e_room_keys_versions WHERE user_id = $1 AND version = $2"
|
"SELECT algorithm, auth_data, etag, deleted FROM userapi_key_backup_versions WHERE user_id = $1 AND version = $2"
|
||||||
|
|
||||||
const selectLatestVersionSQL = "" +
|
const selectLatestVersionSQL = "" +
|
||||||
"SELECT MAX(version) FROM account_e2e_room_keys_versions WHERE user_id = $1"
|
"SELECT MAX(version) FROM userapi_key_backup_versions WHERE user_id = $1"
|
||||||
|
|
||||||
type keyBackupVersionStatements struct {
|
type keyBackupVersionStatements struct {
|
||||||
insertKeyBackupStmt *sql.Stmt
|
insertKeyBackupStmt *sql.Stmt
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const loginTokenSchema = `
|
const loginTokenSchema = `
|
||||||
CREATE TABLE IF NOT EXISTS login_tokens (
|
CREATE TABLE IF NOT EXISTS userapi_login_tokens (
|
||||||
-- The random value of the token issued to a user
|
-- The random value of the token issued to a user
|
||||||
token TEXT NOT NULL PRIMARY KEY,
|
token TEXT NOT NULL PRIMARY KEY,
|
||||||
-- When the token expires
|
-- When the token expires
|
||||||
|
|
@ -37,17 +37,17 @@ CREATE TABLE IF NOT EXISTS login_tokens (
|
||||||
);
|
);
|
||||||
|
|
||||||
-- This index allows efficient garbage collection of expired tokens.
|
-- This index allows efficient garbage collection of expired tokens.
|
||||||
CREATE INDEX IF NOT EXISTS login_tokens_expiration_idx ON login_tokens(token_expires_at);
|
CREATE INDEX IF NOT EXISTS userapi_login_tokens_expiration_idx ON userapi_login_tokens(token_expires_at);
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertLoginTokenSQL = "" +
|
const insertLoginTokenSQL = "" +
|
||||||
"INSERT INTO login_tokens(token, token_expires_at, user_id) VALUES ($1, $2, $3)"
|
"INSERT INTO userapi_login_tokens(token, token_expires_at, user_id) VALUES ($1, $2, $3)"
|
||||||
|
|
||||||
const deleteLoginTokenSQL = "" +
|
const deleteLoginTokenSQL = "" +
|
||||||
"DELETE FROM login_tokens WHERE token = $1 OR token_expires_at <= $2"
|
"DELETE FROM userapi_login_tokens WHERE token = $1 OR token_expires_at <= $2"
|
||||||
|
|
||||||
const selectLoginTokenSQL = "" +
|
const selectLoginTokenSQL = "" +
|
||||||
"SELECT user_id FROM login_tokens WHERE token = $1 AND token_expires_at > $2"
|
"SELECT user_id FROM userapi_login_tokens WHERE token = $1 AND token_expires_at > $2"
|
||||||
|
|
||||||
type loginTokenStatements struct {
|
type loginTokenStatements struct {
|
||||||
insertStmt *sql.Stmt
|
insertStmt *sql.Stmt
|
||||||
|
|
@ -78,7 +78,7 @@ func (s *loginTokenStatements) InsertLoginToken(ctx context.Context, txn *sql.Tx
|
||||||
// deleteByToken removes the named token.
|
// deleteByToken removes the named token.
|
||||||
//
|
//
|
||||||
// As a simple way to garbage-collect stale tokens, we also remove all expired tokens.
|
// As a simple way to garbage-collect stale tokens, we also remove all expired tokens.
|
||||||
// The login_tokens_expiration_idx index should make that efficient.
|
// The userapi_login_tokens_expiration_idx index should make that efficient.
|
||||||
func (s *loginTokenStatements) DeleteLoginToken(ctx context.Context, txn *sql.Tx, token string) error {
|
func (s *loginTokenStatements) DeleteLoginToken(ctx context.Context, txn *sql.Tx, token string) error {
|
||||||
stmt := sqlutil.TxStmt(txn, s.deleteStmt)
|
stmt := sqlutil.TxStmt(txn, s.deleteStmt)
|
||||||
res, err := stmt.ExecContext(ctx, token, time.Now().UTC())
|
res, err := stmt.ExecContext(ctx, token, time.Now().UTC())
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,7 @@ import (
|
||||||
|
|
||||||
const openIDTokenSchema = `
|
const openIDTokenSchema = `
|
||||||
-- Stores data about openid tokens issued for accounts.
|
-- Stores data about openid tokens issued for accounts.
|
||||||
CREATE TABLE IF NOT EXISTS open_id_tokens (
|
CREATE TABLE IF NOT EXISTS userapi_openid_tokens (
|
||||||
-- The value of the token issued to a user
|
-- The value of the token issued to a user
|
||||||
token TEXT NOT NULL PRIMARY KEY,
|
token TEXT NOT NULL PRIMARY KEY,
|
||||||
-- The Matrix user ID for this account
|
-- The Matrix user ID for this account
|
||||||
|
|
@ -24,10 +24,10 @@ CREATE TABLE IF NOT EXISTS open_id_tokens (
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertOpenIDTokenSQL = "" +
|
const insertOpenIDTokenSQL = "" +
|
||||||
"INSERT INTO open_id_tokens(token, localpart, token_expires_at_ms) VALUES ($1, $2, $3)"
|
"INSERT INTO userapi_openid_tokens(token, localpart, token_expires_at_ms) VALUES ($1, $2, $3)"
|
||||||
|
|
||||||
const selectOpenIDTokenSQL = "" +
|
const selectOpenIDTokenSQL = "" +
|
||||||
"SELECT localpart, token_expires_at_ms FROM open_id_tokens WHERE token = $1"
|
"SELECT localpart, token_expires_at_ms FROM userapi_openid_tokens WHERE token = $1"
|
||||||
|
|
||||||
type openIDTokenStatements struct {
|
type openIDTokenStatements struct {
|
||||||
insertTokenStmt *sql.Stmt
|
insertTokenStmt *sql.Stmt
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ import (
|
||||||
|
|
||||||
const profilesSchema = `
|
const profilesSchema = `
|
||||||
-- Stores data about accounts profiles.
|
-- Stores data about accounts profiles.
|
||||||
CREATE TABLE IF NOT EXISTS account_profiles (
|
CREATE TABLE IF NOT EXISTS userapi_profiles (
|
||||||
-- The Matrix user ID localpart for this account
|
-- The Matrix user ID localpart for this account
|
||||||
localpart TEXT NOT NULL PRIMARY KEY,
|
localpart TEXT NOT NULL PRIMARY KEY,
|
||||||
-- The display name for this account
|
-- The display name for this account
|
||||||
|
|
@ -38,19 +38,27 @@ CREATE TABLE IF NOT EXISTS account_profiles (
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertProfileSQL = "" +
|
const insertProfileSQL = "" +
|
||||||
"INSERT INTO account_profiles(localpart, display_name, avatar_url) VALUES ($1, $2, $3)"
|
"INSERT INTO userapi_profiles(localpart, display_name, avatar_url) VALUES ($1, $2, $3)"
|
||||||
|
|
||||||
const selectProfileByLocalpartSQL = "" +
|
const selectProfileByLocalpartSQL = "" +
|
||||||
"SELECT localpart, display_name, avatar_url FROM account_profiles WHERE localpart = $1"
|
"SELECT localpart, display_name, avatar_url FROM userapi_profiles WHERE localpart = $1"
|
||||||
|
|
||||||
const setAvatarURLSQL = "" +
|
const setAvatarURLSQL = "" +
|
||||||
"UPDATE account_profiles SET avatar_url = $1 WHERE localpart = $2"
|
"UPDATE userapi_profiles AS new" +
|
||||||
|
" SET avatar_url = $1" +
|
||||||
|
" FROM userapi_profiles AS old" +
|
||||||
|
" WHERE new.localpart = $2" +
|
||||||
|
" RETURNING new.display_name, old.avatar_url <> new.avatar_url"
|
||||||
|
|
||||||
const setDisplayNameSQL = "" +
|
const setDisplayNameSQL = "" +
|
||||||
"UPDATE account_profiles SET display_name = $1 WHERE localpart = $2"
|
"UPDATE userapi_profiles AS new" +
|
||||||
|
" SET display_name = $1" +
|
||||||
|
" FROM userapi_profiles AS old" +
|
||||||
|
" WHERE new.localpart = $2" +
|
||||||
|
" RETURNING new.avatar_url, old.display_name <> new.display_name"
|
||||||
|
|
||||||
const selectProfilesBySearchSQL = "" +
|
const selectProfilesBySearchSQL = "" +
|
||||||
"SELECT localpart, display_name, avatar_url FROM account_profiles WHERE localpart LIKE $1 OR display_name LIKE $1 LIMIT $2"
|
"SELECT localpart, display_name, avatar_url FROM userapi_profiles WHERE localpart LIKE $1 OR display_name LIKE $1 LIMIT $2"
|
||||||
|
|
||||||
type profilesStatements struct {
|
type profilesStatements struct {
|
||||||
serverNoticesLocalpart string
|
serverNoticesLocalpart string
|
||||||
|
|
@ -100,16 +108,28 @@ func (s *profilesStatements) SelectProfileByLocalpart(
|
||||||
|
|
||||||
func (s *profilesStatements) SetAvatarURL(
|
func (s *profilesStatements) SetAvatarURL(
|
||||||
ctx context.Context, txn *sql.Tx, localpart string, avatarURL string,
|
ctx context.Context, txn *sql.Tx, localpart string, avatarURL string,
|
||||||
) (err error) {
|
) (*authtypes.Profile, bool, error) {
|
||||||
_, err = s.setAvatarURLStmt.ExecContext(ctx, avatarURL, localpart)
|
profile := &authtypes.Profile{
|
||||||
return
|
Localpart: localpart,
|
||||||
|
AvatarURL: avatarURL,
|
||||||
|
}
|
||||||
|
var changed bool
|
||||||
|
stmt := sqlutil.TxStmt(txn, s.setAvatarURLStmt)
|
||||||
|
err := stmt.QueryRowContext(ctx, avatarURL, localpart).Scan(&profile.DisplayName, &changed)
|
||||||
|
return profile, changed, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *profilesStatements) SetDisplayName(
|
func (s *profilesStatements) SetDisplayName(
|
||||||
ctx context.Context, txn *sql.Tx, localpart string, displayName string,
|
ctx context.Context, txn *sql.Tx, localpart string, displayName string,
|
||||||
) (err error) {
|
) (*authtypes.Profile, bool, error) {
|
||||||
_, err = s.setDisplayNameStmt.ExecContext(ctx, displayName, localpart)
|
profile := &authtypes.Profile{
|
||||||
return
|
Localpart: localpart,
|
||||||
|
DisplayName: displayName,
|
||||||
|
}
|
||||||
|
var changed bool
|
||||||
|
stmt := sqlutil.TxStmt(txn, s.setDisplayNameStmt)
|
||||||
|
err := stmt.QueryRowContext(ctx, displayName, localpart).Scan(&profile.AvatarURL, &changed)
|
||||||
|
return profile, changed, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *profilesStatements) SelectProfilesBySearch(
|
func (s *profilesStatements) SelectProfilesBySearch(
|
||||||
|
|
|
||||||
|
|
@ -45,7 +45,7 @@ CREATE INDEX IF NOT EXISTS userapi_daily_visits_localpart_timestamp_idx ON usera
|
||||||
|
|
||||||
const countUsersLastSeenAfterSQL = "" +
|
const countUsersLastSeenAfterSQL = "" +
|
||||||
"SELECT COUNT(*) FROM (" +
|
"SELECT COUNT(*) FROM (" +
|
||||||
" SELECT localpart FROM device_devices WHERE last_seen_ts > $1 " +
|
" SELECT localpart FROM userapi_devices WHERE last_seen_ts > $1 " +
|
||||||
" GROUP BY localpart" +
|
" GROUP BY localpart" +
|
||||||
" ) u"
|
" ) u"
|
||||||
|
|
||||||
|
|
@ -62,7 +62,7 @@ R30Users counts the number of 30 day retained users, defined as:
|
||||||
const countR30UsersSQL = `
|
const countR30UsersSQL = `
|
||||||
SELECT platform, COUNT(*) FROM (
|
SELECT platform, COUNT(*) FROM (
|
||||||
SELECT users.localpart, platform, users.created_ts, MAX(uip.last_seen_ts)
|
SELECT users.localpart, platform, users.created_ts, MAX(uip.last_seen_ts)
|
||||||
FROM account_accounts users
|
FROM userapi_accounts users
|
||||||
INNER JOIN
|
INNER JOIN
|
||||||
(SELECT
|
(SELECT
|
||||||
localpart, last_seen_ts,
|
localpart, last_seen_ts,
|
||||||
|
|
@ -75,7 +75,7 @@ SELECT platform, COUNT(*) FROM (
|
||||||
ELSE 'unknown'
|
ELSE 'unknown'
|
||||||
END
|
END
|
||||||
AS platform
|
AS platform
|
||||||
FROM device_devices
|
FROM userapi_devices
|
||||||
) uip
|
) uip
|
||||||
ON users.localpart = uip.localpart
|
ON users.localpart = uip.localpart
|
||||||
AND users.account_type <> 4
|
AND users.account_type <> 4
|
||||||
|
|
@ -121,7 +121,7 @@ GROUP BY client_type
|
||||||
`
|
`
|
||||||
|
|
||||||
const countUserByAccountTypeSQL = `
|
const countUserByAccountTypeSQL = `
|
||||||
SELECT COUNT(*) FROM account_accounts WHERE account_type = ANY($1)
|
SELECT COUNT(*) FROM userapi_accounts WHERE account_type = ANY($1)
|
||||||
`
|
`
|
||||||
|
|
||||||
// $1 = All non guest AccountType IDs
|
// $1 = All non guest AccountType IDs
|
||||||
|
|
@ -134,7 +134,7 @@ SELECT user_type, COUNT(*) AS count FROM (
|
||||||
WHEN account_type = $2 AND appservice_id IS NULL THEN 'guest'
|
WHEN account_type = $2 AND appservice_id IS NULL THEN 'guest'
|
||||||
WHEN account_type = ANY($1) AND appservice_id IS NOT NULL THEN 'bridged'
|
WHEN account_type = ANY($1) AND appservice_id IS NOT NULL THEN 'bridged'
|
||||||
END AS user_type
|
END AS user_type
|
||||||
FROM account_accounts
|
FROM userapi_accounts
|
||||||
WHERE created_ts > $3
|
WHERE created_ts > $3
|
||||||
) AS t GROUP BY user_type
|
) AS t GROUP BY user_type
|
||||||
`
|
`
|
||||||
|
|
@ -143,14 +143,14 @@ SELECT user_type, COUNT(*) AS count FROM (
|
||||||
const updateUserDailyVisitsSQL = `
|
const updateUserDailyVisitsSQL = `
|
||||||
INSERT INTO userapi_daily_visits(localpart, device_id, timestamp, user_agent)
|
INSERT INTO userapi_daily_visits(localpart, device_id, timestamp, user_agent)
|
||||||
SELECT u.localpart, u.device_id, $1, MAX(u.user_agent)
|
SELECT u.localpart, u.device_id, $1, MAX(u.user_agent)
|
||||||
FROM device_devices AS u
|
FROM userapi_devices AS u
|
||||||
LEFT JOIN (
|
LEFT JOIN (
|
||||||
SELECT localpart, device_id, timestamp FROM userapi_daily_visits
|
SELECT localpart, device_id, timestamp FROM userapi_daily_visits
|
||||||
WHERE timestamp = $1
|
WHERE timestamp = $1
|
||||||
) udv
|
) udv
|
||||||
ON u.localpart = udv.localpart AND u.device_id = udv.device_id
|
ON u.localpart = udv.localpart AND u.device_id = udv.device_id
|
||||||
INNER JOIN device_devices d ON d.localpart = u.localpart
|
INNER JOIN userapi_devices d ON d.localpart = u.localpart
|
||||||
INNER JOIN account_accounts a ON a.localpart = u.localpart
|
INNER JOIN userapi_accounts a ON a.localpart = u.localpart
|
||||||
WHERE $2 <= d.last_seen_ts AND d.last_seen_ts < $3
|
WHERE $2 <= d.last_seen_ts AND d.last_seen_ts < $3
|
||||||
AND a.account_type in (1, 3)
|
AND a.account_type in (1, 3)
|
||||||
GROUP BY u.localpart, u.device_id
|
GROUP BY u.localpart, u.device_id
|
||||||
|
|
|
||||||
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"github.com/matrix-org/dendrite/internal/sqlutil"
|
"github.com/matrix-org/dendrite/internal/sqlutil"
|
||||||
"github.com/matrix-org/dendrite/setup/base"
|
"github.com/matrix-org/dendrite/setup/base"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
|
"github.com/matrix-org/dendrite/userapi/storage/postgres/deltas"
|
||||||
"github.com/matrix-org/dendrite/userapi/storage/shared"
|
"github.com/matrix-org/dendrite/userapi/storage/shared"
|
||||||
|
|
||||||
// Import the postgres database driver.
|
// Import the postgres database driver.
|
||||||
|
|
@ -36,6 +37,16 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions,
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m := sqlutil.NewMigrator(db)
|
||||||
|
m.AddMigrations(sqlutil.Migration{
|
||||||
|
Version: "userapi: rename tables",
|
||||||
|
Up: deltas.UpRenameTables,
|
||||||
|
Down: deltas.DownRenameTables,
|
||||||
|
})
|
||||||
|
if err = m.Up(base.Context()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
accountDataTable, err := NewPostgresAccountDataTable(db)
|
accountDataTable, err := NewPostgresAccountDataTable(db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewPostgresAccountDataTable: %w", err)
|
return nil, fmt.Errorf("NewPostgresAccountDataTable: %w", err)
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ import (
|
||||||
|
|
||||||
const threepidSchema = `
|
const threepidSchema = `
|
||||||
-- Stores data about third party identifiers
|
-- Stores data about third party identifiers
|
||||||
CREATE TABLE IF NOT EXISTS account_threepid (
|
CREATE TABLE IF NOT EXISTS userapi_threepids (
|
||||||
-- The third party identifier
|
-- The third party identifier
|
||||||
threepid TEXT NOT NULL,
|
threepid TEXT NOT NULL,
|
||||||
-- The 3PID medium
|
-- The 3PID medium
|
||||||
|
|
@ -37,20 +37,20 @@ CREATE TABLE IF NOT EXISTS account_threepid (
|
||||||
PRIMARY KEY(threepid, medium)
|
PRIMARY KEY(threepid, medium)
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS account_threepid_localpart ON account_threepid(localpart);
|
CREATE INDEX IF NOT EXISTS userapi_threepid_idx ON userapi_threepids(localpart);
|
||||||
`
|
`
|
||||||
|
|
||||||
const selectLocalpartForThreePIDSQL = "" +
|
const selectLocalpartForThreePIDSQL = "" +
|
||||||
"SELECT localpart FROM account_threepid WHERE threepid = $1 AND medium = $2"
|
"SELECT localpart FROM userapi_threepids WHERE threepid = $1 AND medium = $2"
|
||||||
|
|
||||||
const selectThreePIDsForLocalpartSQL = "" +
|
const selectThreePIDsForLocalpartSQL = "" +
|
||||||
"SELECT threepid, medium FROM account_threepid WHERE localpart = $1"
|
"SELECT threepid, medium FROM userapi_threepids WHERE localpart = $1"
|
||||||
|
|
||||||
const insertThreePIDSQL = "" +
|
const insertThreePIDSQL = "" +
|
||||||
"INSERT INTO account_threepid (threepid, medium, localpart) VALUES ($1, $2, $3)"
|
"INSERT INTO userapi_threepids (threepid, medium, localpart) VALUES ($1, $2, $3)"
|
||||||
|
|
||||||
const deleteThreePIDSQL = "" +
|
const deleteThreePIDSQL = "" +
|
||||||
"DELETE FROM account_threepid WHERE threepid = $1 AND medium = $2"
|
"DELETE FROM userapi_threepids WHERE threepid = $1 AND medium = $2"
|
||||||
|
|
||||||
type threepidStatements struct {
|
type threepidStatements struct {
|
||||||
selectLocalpartForThreePIDStmt *sql.Stmt
|
selectLocalpartForThreePIDStmt *sql.Stmt
|
||||||
|
|
|
||||||
|
|
@ -96,20 +96,24 @@ func (d *Database) GetProfileByLocalpart(
|
||||||
// localpart. Returns an error if something went wrong with the SQL query
|
// localpart. Returns an error if something went wrong with the SQL query
|
||||||
func (d *Database) SetAvatarURL(
|
func (d *Database) SetAvatarURL(
|
||||||
ctx context.Context, localpart string, avatarURL string,
|
ctx context.Context, localpart string, avatarURL string,
|
||||||
) error {
|
) (profile *authtypes.Profile, changed bool, err error) {
|
||||||
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
err = d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||||
return d.Profiles.SetAvatarURL(ctx, txn, localpart, avatarURL)
|
profile, changed, err = d.Profiles.SetAvatarURL(ctx, txn, localpart, avatarURL)
|
||||||
|
return err
|
||||||
})
|
})
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDisplayName updates the display name of the profile associated with the given
|
// SetDisplayName updates the display name of the profile associated with the given
|
||||||
// localpart. Returns an error if something went wrong with the SQL query
|
// localpart. Returns an error if something went wrong with the SQL query
|
||||||
func (d *Database) SetDisplayName(
|
func (d *Database) SetDisplayName(
|
||||||
ctx context.Context, localpart string, displayName string,
|
ctx context.Context, localpart string, displayName string,
|
||||||
) error {
|
) (profile *authtypes.Profile, changed bool, err error) {
|
||||||
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
err = d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
||||||
return d.Profiles.SetDisplayName(ctx, txn, localpart, displayName)
|
profile, changed, err = d.Profiles.SetDisplayName(ctx, txn, localpart, displayName)
|
||||||
|
return err
|
||||||
})
|
})
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetPassword sets the account password to the given hash.
|
// SetPassword sets the account password to the given hash.
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ import (
|
||||||
|
|
||||||
const accountDataSchema = `
|
const accountDataSchema = `
|
||||||
-- Stores data about accounts data.
|
-- Stores data about accounts data.
|
||||||
CREATE TABLE IF NOT EXISTS account_data (
|
CREATE TABLE IF NOT EXISTS userapi_account_datas (
|
||||||
-- The Matrix user ID localpart for this account
|
-- The Matrix user ID localpart for this account
|
||||||
localpart TEXT NOT NULL,
|
localpart TEXT NOT NULL,
|
||||||
-- The room ID for this data (empty string if not specific to a room)
|
-- The room ID for this data (empty string if not specific to a room)
|
||||||
|
|
@ -40,15 +40,15 @@ CREATE TABLE IF NOT EXISTS account_data (
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertAccountDataSQL = `
|
const insertAccountDataSQL = `
|
||||||
INSERT INTO account_data(localpart, room_id, type, content) VALUES($1, $2, $3, $4)
|
INSERT INTO userapi_account_datas(localpart, room_id, type, content) VALUES($1, $2, $3, $4)
|
||||||
ON CONFLICT (localpart, room_id, type) DO UPDATE SET content = $4
|
ON CONFLICT (localpart, room_id, type) DO UPDATE SET content = $4
|
||||||
`
|
`
|
||||||
|
|
||||||
const selectAccountDataSQL = "" +
|
const selectAccountDataSQL = "" +
|
||||||
"SELECT room_id, type, content FROM account_data WHERE localpart = $1"
|
"SELECT room_id, type, content FROM userapi_account_datas WHERE localpart = $1"
|
||||||
|
|
||||||
const selectAccountDataByTypeSQL = "" +
|
const selectAccountDataByTypeSQL = "" +
|
||||||
"SELECT content FROM account_data WHERE localpart = $1 AND room_id = $2 AND type = $3"
|
"SELECT content FROM userapi_account_datas WHERE localpart = $1 AND room_id = $2 AND type = $3"
|
||||||
|
|
||||||
type accountDataStatements struct {
|
type accountDataStatements struct {
|
||||||
db *sql.DB
|
db *sql.DB
|
||||||
|
|
|
||||||
|
|
@ -32,7 +32,7 @@ import (
|
||||||
|
|
||||||
const accountsSchema = `
|
const accountsSchema = `
|
||||||
-- Stores data about accounts.
|
-- Stores data about accounts.
|
||||||
CREATE TABLE IF NOT EXISTS account_accounts (
|
CREATE TABLE IF NOT EXISTS userapi_accounts (
|
||||||
-- The Matrix user ID localpart for this account
|
-- The Matrix user ID localpart for this account
|
||||||
localpart TEXT NOT NULL PRIMARY KEY,
|
localpart TEXT NOT NULL PRIMARY KEY,
|
||||||
-- When this account was first created, as a unix timestamp (ms resolution).
|
-- When this account was first created, as a unix timestamp (ms resolution).
|
||||||
|
|
@ -51,22 +51,22 @@ CREATE TABLE IF NOT EXISTS account_accounts (
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertAccountSQL = "" +
|
const insertAccountSQL = "" +
|
||||||
"INSERT INTO account_accounts(localpart, created_ts, password_hash, appservice_id, account_type) VALUES ($1, $2, $3, $4, $5)"
|
"INSERT INTO userapi_accounts(localpart, created_ts, password_hash, appservice_id, account_type) VALUES ($1, $2, $3, $4, $5)"
|
||||||
|
|
||||||
const updatePasswordSQL = "" +
|
const updatePasswordSQL = "" +
|
||||||
"UPDATE account_accounts SET password_hash = $1 WHERE localpart = $2"
|
"UPDATE userapi_accounts SET password_hash = $1 WHERE localpart = $2"
|
||||||
|
|
||||||
const deactivateAccountSQL = "" +
|
const deactivateAccountSQL = "" +
|
||||||
"UPDATE account_accounts SET is_deactivated = 1 WHERE localpart = $1"
|
"UPDATE userapi_accounts SET is_deactivated = 1 WHERE localpart = $1"
|
||||||
|
|
||||||
const selectAccountByLocalpartSQL = "" +
|
const selectAccountByLocalpartSQL = "" +
|
||||||
"SELECT localpart, appservice_id, account_type FROM account_accounts WHERE localpart = $1"
|
"SELECT localpart, appservice_id, account_type FROM userapi_accounts WHERE localpart = $1"
|
||||||
|
|
||||||
const selectPasswordHashSQL = "" +
|
const selectPasswordHashSQL = "" +
|
||||||
"SELECT password_hash FROM account_accounts WHERE localpart = $1 AND is_deactivated = 0"
|
"SELECT password_hash FROM userapi_accounts WHERE localpart = $1 AND is_deactivated = 0"
|
||||||
|
|
||||||
const selectNewNumericLocalpartSQL = "" +
|
const selectNewNumericLocalpartSQL = "" +
|
||||||
"SELECT COALESCE(MAX(CAST(localpart AS INT)), 0) FROM account_accounts WHERE CAST(localpart AS INT) <> 0"
|
"SELECT COALESCE(MAX(CAST(localpart AS INT)), 0) FROM userapi_accounts WHERE CAST(localpart AS INT) <> 0"
|
||||||
|
|
||||||
type accountsStatements struct {
|
type accountsStatements struct {
|
||||||
db *sql.DB
|
db *sql.DB
|
||||||
|
|
|
||||||
|
|
@ -8,8 +8,8 @@ import (
|
||||||
|
|
||||||
func UpIsActive(ctx context.Context, tx *sql.Tx) error {
|
func UpIsActive(ctx context.Context, tx *sql.Tx) error {
|
||||||
_, err := tx.ExecContext(ctx, `
|
_, err := tx.ExecContext(ctx, `
|
||||||
ALTER TABLE account_accounts RENAME TO account_accounts_tmp;
|
ALTER TABLE userapi_accounts RENAME TO userapi_accounts_tmp;
|
||||||
CREATE TABLE account_accounts (
|
CREATE TABLE userapi_accounts (
|
||||||
localpart TEXT NOT NULL PRIMARY KEY,
|
localpart TEXT NOT NULL PRIMARY KEY,
|
||||||
created_ts BIGINT NOT NULL,
|
created_ts BIGINT NOT NULL,
|
||||||
password_hash TEXT,
|
password_hash TEXT,
|
||||||
|
|
@ -17,13 +17,13 @@ CREATE TABLE account_accounts (
|
||||||
is_deactivated BOOLEAN DEFAULT 0
|
is_deactivated BOOLEAN DEFAULT 0
|
||||||
);
|
);
|
||||||
INSERT
|
INSERT
|
||||||
INTO account_accounts (
|
INTO userapi_accounts (
|
||||||
localpart, created_ts, password_hash, appservice_id
|
localpart, created_ts, password_hash, appservice_id
|
||||||
) SELECT
|
) SELECT
|
||||||
localpart, created_ts, password_hash, appservice_id
|
localpart, created_ts, password_hash, appservice_id
|
||||||
FROM account_accounts_tmp
|
FROM userapi_accounts_tmp
|
||||||
;
|
;
|
||||||
DROP TABLE account_accounts_tmp;`)
|
DROP TABLE userapi_accounts_tmp;`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||||
}
|
}
|
||||||
|
|
@ -32,21 +32,21 @@ DROP TABLE account_accounts_tmp;`)
|
||||||
|
|
||||||
func DownIsActive(ctx context.Context, tx *sql.Tx) error {
|
func DownIsActive(ctx context.Context, tx *sql.Tx) error {
|
||||||
_, err := tx.ExecContext(ctx, `
|
_, err := tx.ExecContext(ctx, `
|
||||||
ALTER TABLE account_accounts RENAME TO account_accounts_tmp;
|
ALTER TABLE userapi_accounts RENAME TO userapi_accounts_tmp;
|
||||||
CREATE TABLE account_accounts (
|
CREATE TABLE userapi_accounts (
|
||||||
localpart TEXT NOT NULL PRIMARY KEY,
|
localpart TEXT NOT NULL PRIMARY KEY,
|
||||||
created_ts BIGINT NOT NULL,
|
created_ts BIGINT NOT NULL,
|
||||||
password_hash TEXT,
|
password_hash TEXT,
|
||||||
appservice_id TEXT
|
appservice_id TEXT
|
||||||
);
|
);
|
||||||
INSERT
|
INSERT
|
||||||
INTO account_accounts (
|
INTO userapi_accounts (
|
||||||
localpart, created_ts, password_hash, appservice_id
|
localpart, created_ts, password_hash, appservice_id
|
||||||
) SELECT
|
) SELECT
|
||||||
localpart, created_ts, password_hash, appservice_id
|
localpart, created_ts, password_hash, appservice_id
|
||||||
FROM account_accounts_tmp
|
FROM userapi_accounts_tmp
|
||||||
;
|
;
|
||||||
DROP TABLE account_accounts_tmp;`)
|
DROP TABLE userapi_accounts_tmp;`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to execute downgrade: %w", err)
|
return fmt.Errorf("failed to execute downgrade: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,8 +8,8 @@ import (
|
||||||
|
|
||||||
func UpLastSeenTSIP(ctx context.Context, tx *sql.Tx) error {
|
func UpLastSeenTSIP(ctx context.Context, tx *sql.Tx) error {
|
||||||
_, err := tx.ExecContext(ctx, `
|
_, err := tx.ExecContext(ctx, `
|
||||||
ALTER TABLE device_devices RENAME TO device_devices_tmp;
|
ALTER TABLE userapi_devices RENAME TO userapi_devices_tmp;
|
||||||
CREATE TABLE device_devices (
|
CREATE TABLE userapi_devices (
|
||||||
access_token TEXT PRIMARY KEY,
|
access_token TEXT PRIMARY KEY,
|
||||||
session_id INTEGER,
|
session_id INTEGER,
|
||||||
device_id TEXT ,
|
device_id TEXT ,
|
||||||
|
|
@ -22,12 +22,12 @@ func UpLastSeenTSIP(ctx context.Context, tx *sql.Tx) error {
|
||||||
UNIQUE (localpart, device_id)
|
UNIQUE (localpart, device_id)
|
||||||
);
|
);
|
||||||
INSERT
|
INSERT
|
||||||
INTO device_devices (
|
INTO userapi_devices (
|
||||||
access_token, session_id, device_id, localpart, created_ts, display_name, last_seen_ts, ip, user_agent
|
access_token, session_id, device_id, localpart, created_ts, display_name, last_seen_ts, ip, user_agent
|
||||||
) SELECT
|
) SELECT
|
||||||
access_token, session_id, device_id, localpart, created_ts, display_name, created_ts, '', ''
|
access_token, session_id, device_id, localpart, created_ts, display_name, created_ts, '', ''
|
||||||
FROM device_devices_tmp;
|
FROM userapi_devices_tmp;
|
||||||
DROP TABLE device_devices_tmp;`)
|
DROP TABLE userapi_devices_tmp;`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to execute upgrade: %w", err)
|
return fmt.Errorf("failed to execute upgrade: %w", err)
|
||||||
}
|
}
|
||||||
|
|
@ -36,8 +36,8 @@ func UpLastSeenTSIP(ctx context.Context, tx *sql.Tx) error {
|
||||||
|
|
||||||
func DownLastSeenTSIP(ctx context.Context, tx *sql.Tx) error {
|
func DownLastSeenTSIP(ctx context.Context, tx *sql.Tx) error {
|
||||||
_, err := tx.ExecContext(ctx, `
|
_, err := tx.ExecContext(ctx, `
|
||||||
ALTER TABLE device_devices RENAME TO device_devices_tmp;
|
ALTER TABLE userapi_devices RENAME TO userapi_devices_tmp;
|
||||||
CREATE TABLE IF NOT EXISTS device_devices (
|
CREATE TABLE IF NOT EXISTS userapi_devices (
|
||||||
access_token TEXT PRIMARY KEY,
|
access_token TEXT PRIMARY KEY,
|
||||||
session_id INTEGER,
|
session_id INTEGER,
|
||||||
device_id TEXT ,
|
device_id TEXT ,
|
||||||
|
|
@ -47,12 +47,12 @@ CREATE TABLE IF NOT EXISTS device_devices (
|
||||||
UNIQUE (localpart, device_id)
|
UNIQUE (localpart, device_id)
|
||||||
);
|
);
|
||||||
INSERT
|
INSERT
|
||||||
INTO device_devices (
|
INTO userapi_devices (
|
||||||
access_token, session_id, device_id, localpart, created_ts, display_name
|
access_token, session_id, device_id, localpart, created_ts, display_name
|
||||||
) SELECT
|
) SELECT
|
||||||
access_token, session_id, device_id, localpart, created_ts, display_name
|
access_token, session_id, device_id, localpart, created_ts, display_name
|
||||||
FROM device_devices_tmp;
|
FROM userapi_devices_tmp;
|
||||||
DROP TABLE device_devices_tmp;`)
|
DROP TABLE userapi_devices_tmp;`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to execute downgrade: %w", err)
|
return fmt.Errorf("failed to execute downgrade: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,8 +9,8 @@ import (
|
||||||
func UpAddAccountType(ctx context.Context, tx *sql.Tx) error {
|
func UpAddAccountType(ctx context.Context, tx *sql.Tx) error {
|
||||||
// initially set every account to useraccount, change appservice and guest accounts afterwards
|
// initially set every account to useraccount, change appservice and guest accounts afterwards
|
||||||
// (user = 1, guest = 2, admin = 3, appservice = 4)
|
// (user = 1, guest = 2, admin = 3, appservice = 4)
|
||||||
_, err := tx.ExecContext(ctx, `ALTER TABLE account_accounts RENAME TO account_accounts_tmp;
|
_, err := tx.ExecContext(ctx, `ALTER TABLE userapi_accounts RENAME TO userapi_accounts_tmp;
|
||||||
CREATE TABLE account_accounts (
|
CREATE TABLE userapi_accounts (
|
||||||
localpart TEXT NOT NULL PRIMARY KEY,
|
localpart TEXT NOT NULL PRIMARY KEY,
|
||||||
created_ts BIGINT NOT NULL,
|
created_ts BIGINT NOT NULL,
|
||||||
password_hash TEXT,
|
password_hash TEXT,
|
||||||
|
|
@ -19,15 +19,15 @@ CREATE TABLE account_accounts (
|
||||||
account_type INTEGER NOT NULL
|
account_type INTEGER NOT NULL
|
||||||
);
|
);
|
||||||
INSERT
|
INSERT
|
||||||
INTO account_accounts (
|
INTO userapi_accounts (
|
||||||
localpart, created_ts, password_hash, appservice_id, account_type
|
localpart, created_ts, password_hash, appservice_id, account_type
|
||||||
) SELECT
|
) SELECT
|
||||||
localpart, created_ts, password_hash, appservice_id, 1
|
localpart, created_ts, password_hash, appservice_id, 1
|
||||||
FROM account_accounts_tmp
|
FROM userapi_accounts_tmp
|
||||||
;
|
;
|
||||||
UPDATE account_accounts SET account_type = 4 WHERE appservice_id <> '';
|
UPDATE userapi_accounts SET account_type = 4 WHERE appservice_id <> '';
|
||||||
UPDATE account_accounts SET account_type = 2 WHERE localpart GLOB '[0-9]*';
|
UPDATE userapi_accounts SET account_type = 2 WHERE localpart GLOB '[0-9]*';
|
||||||
DROP TABLE account_accounts_tmp;`)
|
DROP TABLE userapi_accounts_tmp;`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to add column: %w", err)
|
return fmt.Errorf("failed to add column: %w", err)
|
||||||
}
|
}
|
||||||
|
|
@ -35,7 +35,7 @@ DROP TABLE account_accounts_tmp;`)
|
||||||
}
|
}
|
||||||
|
|
||||||
func DownAddAccountType(ctx context.Context, tx *sql.Tx) error {
|
func DownAddAccountType(ctx context.Context, tx *sql.Tx) error {
|
||||||
_, err := tx.ExecContext(ctx, `ALTER TABLE account_accounts DROP COLUMN account_type;`)
|
_, err := tx.ExecContext(ctx, `ALTER TABLE userapi_accounts DROP COLUMN account_type;`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to execute downgrade: %w", err)
|
return fmt.Errorf("failed to execute downgrade: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
109
userapi/storage/sqlite3/deltas/2022101711000000_rename_tables.go
Normal file
109
userapi/storage/sqlite3/deltas/2022101711000000_rename_tables.go
Normal file
|
|
@ -0,0 +1,109 @@
|
||||||
|
package deltas
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var renameTableMappings = map[string]string{
|
||||||
|
"account_accounts": "userapi_accounts",
|
||||||
|
"account_data": "userapi_account_datas",
|
||||||
|
"device_devices": "userapi_devices",
|
||||||
|
"account_e2e_room_keys": "userapi_key_backups",
|
||||||
|
"account_e2e_room_keys_versions": "userapi_key_backup_versions",
|
||||||
|
"login_tokens": "userapi_login_tokens",
|
||||||
|
"open_id_tokens": "userapi_openid_tokens",
|
||||||
|
"account_profiles": "userapi_profiles",
|
||||||
|
"account_threepid": "userapi_threepids",
|
||||||
|
}
|
||||||
|
|
||||||
|
var renameIndicesMappings = map[string]string{
|
||||||
|
"device_localpart_id_idx": "userapi_device_localpart_id_idx",
|
||||||
|
"e2e_room_keys_idx": "userapi_key_backups_idx",
|
||||||
|
"e2e_room_keys_versions_idx": "userapi_key_backups_versions_idx",
|
||||||
|
"account_e2e_room_keys_versions_idx": "userapi_key_backup_versions_idx",
|
||||||
|
"login_tokens_expiration_idx": "userapi_login_tokens_expiration_idx",
|
||||||
|
"account_threepid_localpart": "userapi_threepid_idx",
|
||||||
|
}
|
||||||
|
|
||||||
|
func UpRenameTables(ctx context.Context, tx *sql.Tx) error {
|
||||||
|
for old, new := range renameTableMappings {
|
||||||
|
// SQLite has no "IF EXISTS" so check if the table exists.
|
||||||
|
var name string
|
||||||
|
if err := tx.QueryRowContext(
|
||||||
|
ctx, "SELECT name FROM sqlite_schema WHERE type = 'table' AND name = $1;", old,
|
||||||
|
).Scan(&name); err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
q := fmt.Sprintf(
|
||||||
|
"ALTER TABLE %s RENAME TO %s;", old, new,
|
||||||
|
)
|
||||||
|
if _, err := tx.ExecContext(ctx, q); err != nil {
|
||||||
|
return fmt.Errorf("rename table %q to %q error: %w", old, new, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for old, new := range renameIndicesMappings {
|
||||||
|
var query string
|
||||||
|
if err := tx.QueryRowContext(
|
||||||
|
ctx, "SELECT sql FROM sqlite_schema WHERE type = 'index' AND name = $1;", old,
|
||||||
|
).Scan(&query); err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
query = strings.Replace(query, old, new, 1)
|
||||||
|
if _, err := tx.ExecContext(ctx, fmt.Sprintf("DROP INDEX %s;", old)); err != nil {
|
||||||
|
return fmt.Errorf("drop index %q to %q error: %w", old, new, err)
|
||||||
|
}
|
||||||
|
if _, err := tx.ExecContext(ctx, query); err != nil {
|
||||||
|
return fmt.Errorf("recreate index %q to %q error: %w", old, new, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func DownRenameTables(ctx context.Context, tx *sql.Tx) error {
|
||||||
|
for old, new := range renameTableMappings {
|
||||||
|
// SQLite has no "IF EXISTS" so check if the table exists.
|
||||||
|
var name string
|
||||||
|
if err := tx.QueryRowContext(
|
||||||
|
ctx, "SELECT name FROM sqlite_schema WHERE type = 'table' AND name = $1;", new,
|
||||||
|
).Scan(&name); err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
q := fmt.Sprintf(
|
||||||
|
"ALTER TABLE %s RENAME TO %s;", new, old,
|
||||||
|
)
|
||||||
|
if _, err := tx.ExecContext(ctx, q); err != nil {
|
||||||
|
return fmt.Errorf("rename table %q to %q error: %w", new, old, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for old, new := range renameIndicesMappings {
|
||||||
|
var query string
|
||||||
|
if err := tx.QueryRowContext(
|
||||||
|
ctx, "SELECT sql FROM sqlite_schema WHERE type = 'index' AND name = $1;", new,
|
||||||
|
).Scan(&query); err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
query = strings.Replace(query, new, old, 1)
|
||||||
|
if _, err := tx.ExecContext(ctx, fmt.Sprintf("DROP INDEX %s;", new)); err != nil {
|
||||||
|
return fmt.Errorf("drop index %q to %q error: %w", new, old, err)
|
||||||
|
}
|
||||||
|
if _, err := tx.ExecContext(ctx, query); err != nil {
|
||||||
|
return fmt.Errorf("recreate index %q to %q error: %w", new, old, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
@ -35,7 +35,7 @@ const devicesSchema = `
|
||||||
-- CREATE SEQUENCE IF NOT EXISTS device_session_id_seq START 1;
|
-- CREATE SEQUENCE IF NOT EXISTS device_session_id_seq START 1;
|
||||||
|
|
||||||
-- Stores data about devices.
|
-- Stores data about devices.
|
||||||
CREATE TABLE IF NOT EXISTS device_devices (
|
CREATE TABLE IF NOT EXISTS userapi_devices (
|
||||||
access_token TEXT PRIMARY KEY,
|
access_token TEXT PRIMARY KEY,
|
||||||
session_id INTEGER,
|
session_id INTEGER,
|
||||||
device_id TEXT ,
|
device_id TEXT ,
|
||||||
|
|
@ -51,38 +51,38 @@ CREATE TABLE IF NOT EXISTS device_devices (
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertDeviceSQL = "" +
|
const insertDeviceSQL = "" +
|
||||||
"INSERT INTO device_devices (device_id, localpart, access_token, created_ts, display_name, session_id, last_seen_ts, ip, user_agent)" +
|
"INSERT INTO userapi_devices (device_id, localpart, access_token, created_ts, display_name, session_id, last_seen_ts, ip, user_agent)" +
|
||||||
" VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)"
|
" VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)"
|
||||||
|
|
||||||
const selectDevicesCountSQL = "" +
|
const selectDevicesCountSQL = "" +
|
||||||
"SELECT COUNT(access_token) FROM device_devices"
|
"SELECT COUNT(access_token) FROM userapi_devices"
|
||||||
|
|
||||||
const selectDeviceByTokenSQL = "" +
|
const selectDeviceByTokenSQL = "" +
|
||||||
"SELECT session_id, device_id, localpart FROM device_devices WHERE access_token = $1"
|
"SELECT session_id, device_id, localpart FROM userapi_devices WHERE access_token = $1"
|
||||||
|
|
||||||
const selectDeviceByIDSQL = "" +
|
const selectDeviceByIDSQL = "" +
|
||||||
"SELECT display_name, last_seen_ts, ip FROM device_devices WHERE localpart = $1 and device_id = $2"
|
"SELECT display_name, last_seen_ts, ip FROM userapi_devices WHERE localpart = $1 and device_id = $2"
|
||||||
|
|
||||||
const selectDevicesByLocalpartSQL = "" +
|
const selectDevicesByLocalpartSQL = "" +
|
||||||
"SELECT device_id, display_name, last_seen_ts, ip, user_agent FROM device_devices WHERE localpart = $1 AND device_id != $2 ORDER BY last_seen_ts DESC"
|
"SELECT device_id, display_name, last_seen_ts, ip, user_agent FROM userapi_devices WHERE localpart = $1 AND device_id != $2 ORDER BY last_seen_ts DESC"
|
||||||
|
|
||||||
const updateDeviceNameSQL = "" +
|
const updateDeviceNameSQL = "" +
|
||||||
"UPDATE device_devices SET display_name = $1 WHERE localpart = $2 AND device_id = $3"
|
"UPDATE userapi_devices SET display_name = $1 WHERE localpart = $2 AND device_id = $3"
|
||||||
|
|
||||||
const deleteDeviceSQL = "" +
|
const deleteDeviceSQL = "" +
|
||||||
"DELETE FROM device_devices WHERE device_id = $1 AND localpart = $2"
|
"DELETE FROM userapi_devices WHERE device_id = $1 AND localpart = $2"
|
||||||
|
|
||||||
const deleteDevicesByLocalpartSQL = "" +
|
const deleteDevicesByLocalpartSQL = "" +
|
||||||
"DELETE FROM device_devices WHERE localpart = $1 AND device_id != $2"
|
"DELETE FROM userapi_devices WHERE localpart = $1 AND device_id != $2"
|
||||||
|
|
||||||
const deleteDevicesSQL = "" +
|
const deleteDevicesSQL = "" +
|
||||||
"DELETE FROM device_devices WHERE localpart = $1 AND device_id IN ($2)"
|
"DELETE FROM userapi_devices WHERE localpart = $1 AND device_id IN ($2)"
|
||||||
|
|
||||||
const selectDevicesByIDSQL = "" +
|
const selectDevicesByIDSQL = "" +
|
||||||
"SELECT device_id, localpart, display_name, last_seen_ts FROM device_devices WHERE device_id IN ($1) ORDER BY last_seen_ts DESC"
|
"SELECT device_id, localpart, display_name, last_seen_ts FROM userapi_devices WHERE device_id IN ($1) ORDER BY last_seen_ts DESC"
|
||||||
|
|
||||||
const updateDeviceLastSeen = "" +
|
const updateDeviceLastSeen = "" +
|
||||||
"UPDATE device_devices SET last_seen_ts = $1, ip = $2, user_agent = $3 WHERE localpart = $4 AND device_id = $5"
|
"UPDATE userapi_devices SET last_seen_ts = $1, ip = $2, user_agent = $3 WHERE localpart = $4 AND device_id = $5"
|
||||||
|
|
||||||
type devicesStatements struct {
|
type devicesStatements struct {
|
||||||
db *sql.DB
|
db *sql.DB
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const keyBackupTableSchema = `
|
const keyBackupTableSchema = `
|
||||||
CREATE TABLE IF NOT EXISTS account_e2e_room_keys (
|
CREATE TABLE IF NOT EXISTS userapi_key_backups (
|
||||||
user_id TEXT NOT NULL,
|
user_id TEXT NOT NULL,
|
||||||
room_id TEXT NOT NULL,
|
room_id TEXT NOT NULL,
|
||||||
session_id TEXT NOT NULL,
|
session_id TEXT NOT NULL,
|
||||||
|
|
@ -37,31 +37,31 @@ CREATE TABLE IF NOT EXISTS account_e2e_room_keys (
|
||||||
is_verified BOOLEAN NOT NULL,
|
is_verified BOOLEAN NOT NULL,
|
||||||
session_data TEXT NOT NULL
|
session_data TEXT NOT NULL
|
||||||
);
|
);
|
||||||
CREATE UNIQUE INDEX IF NOT EXISTS e2e_room_keys_idx ON account_e2e_room_keys(user_id, room_id, session_id, version);
|
CREATE UNIQUE INDEX IF NOT EXISTS e2e_room_keys_idx ON userapi_key_backups(user_id, room_id, session_id, version);
|
||||||
CREATE INDEX IF NOT EXISTS e2e_room_keys_versions_idx ON account_e2e_room_keys(user_id, version);
|
CREATE INDEX IF NOT EXISTS e2e_room_keys_versions_idx ON userapi_key_backups(user_id, version);
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertBackupKeySQL = "" +
|
const insertBackupKeySQL = "" +
|
||||||
"INSERT INTO account_e2e_room_keys(user_id, room_id, session_id, version, first_message_index, forwarded_count, is_verified, session_data) " +
|
"INSERT INTO userapi_key_backups(user_id, room_id, session_id, version, first_message_index, forwarded_count, is_verified, session_data) " +
|
||||||
"VALUES ($1, $2, $3, $4, $5, $6, $7, $8)"
|
"VALUES ($1, $2, $3, $4, $5, $6, $7, $8)"
|
||||||
|
|
||||||
const updateBackupKeySQL = "" +
|
const updateBackupKeySQL = "" +
|
||||||
"UPDATE account_e2e_room_keys SET first_message_index=$1, forwarded_count=$2, is_verified=$3, session_data=$4 " +
|
"UPDATE userapi_key_backups SET first_message_index=$1, forwarded_count=$2, is_verified=$3, session_data=$4 " +
|
||||||
"WHERE user_id=$5 AND room_id=$6 AND session_id=$7 AND version=$8"
|
"WHERE user_id=$5 AND room_id=$6 AND session_id=$7 AND version=$8"
|
||||||
|
|
||||||
const countKeysSQL = "" +
|
const countKeysSQL = "" +
|
||||||
"SELECT COUNT(*) FROM account_e2e_room_keys WHERE user_id = $1 AND version = $2"
|
"SELECT COUNT(*) FROM userapi_key_backups WHERE user_id = $1 AND version = $2"
|
||||||
|
|
||||||
const selectKeysSQL = "" +
|
const selectKeysSQL = "" +
|
||||||
"SELECT room_id, session_id, first_message_index, forwarded_count, is_verified, session_data FROM account_e2e_room_keys " +
|
"SELECT room_id, session_id, first_message_index, forwarded_count, is_verified, session_data FROM userapi_key_backups " +
|
||||||
"WHERE user_id = $1 AND version = $2"
|
"WHERE user_id = $1 AND version = $2"
|
||||||
|
|
||||||
const selectKeysByRoomIDSQL = "" +
|
const selectKeysByRoomIDSQL = "" +
|
||||||
"SELECT room_id, session_id, first_message_index, forwarded_count, is_verified, session_data FROM account_e2e_room_keys " +
|
"SELECT room_id, session_id, first_message_index, forwarded_count, is_verified, session_data FROM userapi_key_backups " +
|
||||||
"WHERE user_id = $1 AND version = $2 AND room_id = $3"
|
"WHERE user_id = $1 AND version = $2 AND room_id = $3"
|
||||||
|
|
||||||
const selectKeysByRoomIDAndSessionIDSQL = "" +
|
const selectKeysByRoomIDAndSessionIDSQL = "" +
|
||||||
"SELECT room_id, session_id, first_message_index, forwarded_count, is_verified, session_data FROM account_e2e_room_keys " +
|
"SELECT room_id, session_id, first_message_index, forwarded_count, is_verified, session_data FROM userapi_key_backups " +
|
||||||
"WHERE user_id = $1 AND version = $2 AND room_id = $3 AND session_id = $4"
|
"WHERE user_id = $1 AND version = $2 AND room_id = $3 AND session_id = $4"
|
||||||
|
|
||||||
type keyBackupStatements struct {
|
type keyBackupStatements struct {
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ import (
|
||||||
|
|
||||||
const keyBackupVersionTableSchema = `
|
const keyBackupVersionTableSchema = `
|
||||||
-- the metadata for each generation of encrypted e2e session backups
|
-- the metadata for each generation of encrypted e2e session backups
|
||||||
CREATE TABLE IF NOT EXISTS account_e2e_room_keys_versions (
|
CREATE TABLE IF NOT EXISTS userapi_key_backup_versions (
|
||||||
user_id TEXT NOT NULL,
|
user_id TEXT NOT NULL,
|
||||||
-- this means no 2 users will ever have the same version of e2e session backups which strictly
|
-- this means no 2 users will ever have the same version of e2e session backups which strictly
|
||||||
-- isn't necessary, but this is easy to do rather than SELECT MAX(version)+1.
|
-- isn't necessary, but this is easy to do rather than SELECT MAX(version)+1.
|
||||||
|
|
@ -38,26 +38,26 @@ CREATE TABLE IF NOT EXISTS account_e2e_room_keys_versions (
|
||||||
deleted INTEGER DEFAULT 0 NOT NULL
|
deleted INTEGER DEFAULT 0 NOT NULL
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE UNIQUE INDEX IF NOT EXISTS account_e2e_room_keys_versions_idx ON account_e2e_room_keys_versions(user_id, version);
|
CREATE UNIQUE INDEX IF NOT EXISTS userapi_key_backup_versions_idx ON userapi_key_backup_versions(user_id, version);
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertKeyBackupSQL = "" +
|
const insertKeyBackupSQL = "" +
|
||||||
"INSERT INTO account_e2e_room_keys_versions(user_id, algorithm, auth_data, etag) VALUES ($1, $2, $3, $4) RETURNING version"
|
"INSERT INTO userapi_key_backup_versions(user_id, algorithm, auth_data, etag) VALUES ($1, $2, $3, $4) RETURNING version"
|
||||||
|
|
||||||
const updateKeyBackupAuthDataSQL = "" +
|
const updateKeyBackupAuthDataSQL = "" +
|
||||||
"UPDATE account_e2e_room_keys_versions SET auth_data = $1 WHERE user_id = $2 AND version = $3"
|
"UPDATE userapi_key_backup_versions SET auth_data = $1 WHERE user_id = $2 AND version = $3"
|
||||||
|
|
||||||
const updateKeyBackupETagSQL = "" +
|
const updateKeyBackupETagSQL = "" +
|
||||||
"UPDATE account_e2e_room_keys_versions SET etag = $1 WHERE user_id = $2 AND version = $3"
|
"UPDATE userapi_key_backup_versions SET etag = $1 WHERE user_id = $2 AND version = $3"
|
||||||
|
|
||||||
const deleteKeyBackupSQL = "" +
|
const deleteKeyBackupSQL = "" +
|
||||||
"UPDATE account_e2e_room_keys_versions SET deleted=1 WHERE user_id = $1 AND version = $2"
|
"UPDATE userapi_key_backup_versions SET deleted=1 WHERE user_id = $1 AND version = $2"
|
||||||
|
|
||||||
const selectKeyBackupSQL = "" +
|
const selectKeyBackupSQL = "" +
|
||||||
"SELECT algorithm, auth_data, etag, deleted FROM account_e2e_room_keys_versions WHERE user_id = $1 AND version = $2"
|
"SELECT algorithm, auth_data, etag, deleted FROM userapi_key_backup_versions WHERE user_id = $1 AND version = $2"
|
||||||
|
|
||||||
const selectLatestVersionSQL = "" +
|
const selectLatestVersionSQL = "" +
|
||||||
"SELECT MAX(version) FROM account_e2e_room_keys_versions WHERE user_id = $1"
|
"SELECT MAX(version) FROM userapi_key_backup_versions WHERE user_id = $1"
|
||||||
|
|
||||||
type keyBackupVersionStatements struct {
|
type keyBackupVersionStatements struct {
|
||||||
insertKeyBackupStmt *sql.Stmt
|
insertKeyBackupStmt *sql.Stmt
|
||||||
|
|
|
||||||
|
|
@ -32,7 +32,7 @@ type loginTokenStatements struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
const loginTokenSchema = `
|
const loginTokenSchema = `
|
||||||
CREATE TABLE IF NOT EXISTS login_tokens (
|
CREATE TABLE IF NOT EXISTS userapi_login_tokens (
|
||||||
-- The random value of the token issued to a user
|
-- The random value of the token issued to a user
|
||||||
token TEXT NOT NULL PRIMARY KEY,
|
token TEXT NOT NULL PRIMARY KEY,
|
||||||
-- When the token expires
|
-- When the token expires
|
||||||
|
|
@ -43,17 +43,17 @@ CREATE TABLE IF NOT EXISTS login_tokens (
|
||||||
);
|
);
|
||||||
|
|
||||||
-- This index allows efficient garbage collection of expired tokens.
|
-- This index allows efficient garbage collection of expired tokens.
|
||||||
CREATE INDEX IF NOT EXISTS login_tokens_expiration_idx ON login_tokens(token_expires_at);
|
CREATE INDEX IF NOT EXISTS login_tokens_expiration_idx ON userapi_login_tokens(token_expires_at);
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertLoginTokenSQL = "" +
|
const insertLoginTokenSQL = "" +
|
||||||
"INSERT INTO login_tokens(token, token_expires_at, user_id) VALUES ($1, $2, $3)"
|
"INSERT INTO userapi_login_tokens(token, token_expires_at, user_id) VALUES ($1, $2, $3)"
|
||||||
|
|
||||||
const deleteLoginTokenSQL = "" +
|
const deleteLoginTokenSQL = "" +
|
||||||
"DELETE FROM login_tokens WHERE token = $1 OR token_expires_at <= $2"
|
"DELETE FROM userapi_login_tokens WHERE token = $1 OR token_expires_at <= $2"
|
||||||
|
|
||||||
const selectLoginTokenSQL = "" +
|
const selectLoginTokenSQL = "" +
|
||||||
"SELECT user_id FROM login_tokens WHERE token = $1 AND token_expires_at > $2"
|
"SELECT user_id FROM userapi_login_tokens WHERE token = $1 AND token_expires_at > $2"
|
||||||
|
|
||||||
func NewSQLiteLoginTokenTable(db *sql.DB) (tables.LoginTokenTable, error) {
|
func NewSQLiteLoginTokenTable(db *sql.DB) (tables.LoginTokenTable, error) {
|
||||||
s := &loginTokenStatements{}
|
s := &loginTokenStatements{}
|
||||||
|
|
@ -78,7 +78,7 @@ func (s *loginTokenStatements) InsertLoginToken(ctx context.Context, txn *sql.Tx
|
||||||
// deleteByToken removes the named token.
|
// deleteByToken removes the named token.
|
||||||
//
|
//
|
||||||
// As a simple way to garbage-collect stale tokens, we also remove all expired tokens.
|
// As a simple way to garbage-collect stale tokens, we also remove all expired tokens.
|
||||||
// The login_tokens_expiration_idx index should make that efficient.
|
// The userapi_login_tokens_expiration_idx index should make that efficient.
|
||||||
func (s *loginTokenStatements) DeleteLoginToken(ctx context.Context, txn *sql.Tx, token string) error {
|
func (s *loginTokenStatements) DeleteLoginToken(ctx context.Context, txn *sql.Tx, token string) error {
|
||||||
stmt := sqlutil.TxStmt(txn, s.deleteStmt)
|
stmt := sqlutil.TxStmt(txn, s.deleteStmt)
|
||||||
res, err := stmt.ExecContext(ctx, token, time.Now().UTC())
|
res, err := stmt.ExecContext(ctx, token, time.Now().UTC())
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,7 @@ import (
|
||||||
|
|
||||||
const openIDTokenSchema = `
|
const openIDTokenSchema = `
|
||||||
-- Stores data about accounts.
|
-- Stores data about accounts.
|
||||||
CREATE TABLE IF NOT EXISTS open_id_tokens (
|
CREATE TABLE IF NOT EXISTS userapi_openid_tokens (
|
||||||
-- The value of the token issued to a user
|
-- The value of the token issued to a user
|
||||||
token TEXT NOT NULL PRIMARY KEY,
|
token TEXT NOT NULL PRIMARY KEY,
|
||||||
-- The Matrix user ID for this account
|
-- The Matrix user ID for this account
|
||||||
|
|
@ -24,10 +24,10 @@ CREATE TABLE IF NOT EXISTS open_id_tokens (
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertOpenIDTokenSQL = "" +
|
const insertOpenIDTokenSQL = "" +
|
||||||
"INSERT INTO open_id_tokens(token, localpart, token_expires_at_ms) VALUES ($1, $2, $3)"
|
"INSERT INTO userapi_openid_tokens(token, localpart, token_expires_at_ms) VALUES ($1, $2, $3)"
|
||||||
|
|
||||||
const selectOpenIDTokenSQL = "" +
|
const selectOpenIDTokenSQL = "" +
|
||||||
"SELECT localpart, token_expires_at_ms FROM open_id_tokens WHERE token = $1"
|
"SELECT localpart, token_expires_at_ms FROM userapi_openid_tokens WHERE token = $1"
|
||||||
|
|
||||||
type openIDTokenStatements struct {
|
type openIDTokenStatements struct {
|
||||||
db *sql.DB
|
db *sql.DB
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ import (
|
||||||
|
|
||||||
const profilesSchema = `
|
const profilesSchema = `
|
||||||
-- Stores data about accounts profiles.
|
-- Stores data about accounts profiles.
|
||||||
CREATE TABLE IF NOT EXISTS account_profiles (
|
CREATE TABLE IF NOT EXISTS userapi_profiles (
|
||||||
-- The Matrix user ID localpart for this account
|
-- The Matrix user ID localpart for this account
|
||||||
localpart TEXT NOT NULL PRIMARY KEY,
|
localpart TEXT NOT NULL PRIMARY KEY,
|
||||||
-- The display name for this account
|
-- The display name for this account
|
||||||
|
|
@ -38,19 +38,21 @@ CREATE TABLE IF NOT EXISTS account_profiles (
|
||||||
`
|
`
|
||||||
|
|
||||||
const insertProfileSQL = "" +
|
const insertProfileSQL = "" +
|
||||||
"INSERT INTO account_profiles(localpart, display_name, avatar_url) VALUES ($1, $2, $3)"
|
"INSERT INTO userapi_profiles(localpart, display_name, avatar_url) VALUES ($1, $2, $3)"
|
||||||
|
|
||||||
const selectProfileByLocalpartSQL = "" +
|
const selectProfileByLocalpartSQL = "" +
|
||||||
"SELECT localpart, display_name, avatar_url FROM account_profiles WHERE localpart = $1"
|
"SELECT localpart, display_name, avatar_url FROM userapi_profiles WHERE localpart = $1"
|
||||||
|
|
||||||
const setAvatarURLSQL = "" +
|
const setAvatarURLSQL = "" +
|
||||||
"UPDATE account_profiles SET avatar_url = $1 WHERE localpart = $2"
|
"UPDATE userapi_profiles SET avatar_url = $1 WHERE localpart = $2" +
|
||||||
|
" RETURNING display_name"
|
||||||
|
|
||||||
const setDisplayNameSQL = "" +
|
const setDisplayNameSQL = "" +
|
||||||
"UPDATE account_profiles SET display_name = $1 WHERE localpart = $2"
|
"UPDATE userapi_profiles SET display_name = $1 WHERE localpart = $2" +
|
||||||
|
" RETURNING avatar_url"
|
||||||
|
|
||||||
const selectProfilesBySearchSQL = "" +
|
const selectProfilesBySearchSQL = "" +
|
||||||
"SELECT localpart, display_name, avatar_url FROM account_profiles WHERE localpart LIKE $1 OR display_name LIKE $1 LIMIT $2"
|
"SELECT localpart, display_name, avatar_url FROM userapi_profiles WHERE localpart LIKE $1 OR display_name LIKE $1 LIMIT $2"
|
||||||
|
|
||||||
type profilesStatements struct {
|
type profilesStatements struct {
|
||||||
db *sql.DB
|
db *sql.DB
|
||||||
|
|
@ -102,18 +104,40 @@ func (s *profilesStatements) SelectProfileByLocalpart(
|
||||||
|
|
||||||
func (s *profilesStatements) SetAvatarURL(
|
func (s *profilesStatements) SetAvatarURL(
|
||||||
ctx context.Context, txn *sql.Tx, localpart string, avatarURL string,
|
ctx context.Context, txn *sql.Tx, localpart string, avatarURL string,
|
||||||
) (err error) {
|
) (*authtypes.Profile, bool, error) {
|
||||||
|
profile := &authtypes.Profile{
|
||||||
|
Localpart: localpart,
|
||||||
|
AvatarURL: avatarURL,
|
||||||
|
}
|
||||||
|
old, err := s.SelectProfileByLocalpart(ctx, localpart)
|
||||||
|
if err != nil {
|
||||||
|
return old, false, err
|
||||||
|
}
|
||||||
|
if old.AvatarURL == avatarURL {
|
||||||
|
return old, false, nil
|
||||||
|
}
|
||||||
stmt := sqlutil.TxStmt(txn, s.setAvatarURLStmt)
|
stmt := sqlutil.TxStmt(txn, s.setAvatarURLStmt)
|
||||||
_, err = stmt.ExecContext(ctx, avatarURL, localpart)
|
err = stmt.QueryRowContext(ctx, avatarURL, localpart).Scan(&profile.DisplayName)
|
||||||
return
|
return profile, true, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *profilesStatements) SetDisplayName(
|
func (s *profilesStatements) SetDisplayName(
|
||||||
ctx context.Context, txn *sql.Tx, localpart string, displayName string,
|
ctx context.Context, txn *sql.Tx, localpart string, displayName string,
|
||||||
) (err error) {
|
) (*authtypes.Profile, bool, error) {
|
||||||
|
profile := &authtypes.Profile{
|
||||||
|
Localpart: localpart,
|
||||||
|
DisplayName: displayName,
|
||||||
|
}
|
||||||
|
old, err := s.SelectProfileByLocalpart(ctx, localpart)
|
||||||
|
if err != nil {
|
||||||
|
return old, false, err
|
||||||
|
}
|
||||||
|
if old.DisplayName == displayName {
|
||||||
|
return old, false, nil
|
||||||
|
}
|
||||||
stmt := sqlutil.TxStmt(txn, s.setDisplayNameStmt)
|
stmt := sqlutil.TxStmt(txn, s.setDisplayNameStmt)
|
||||||
_, err = stmt.ExecContext(ctx, displayName, localpart)
|
err = stmt.QueryRowContext(ctx, displayName, localpart).Scan(&profile.AvatarURL)
|
||||||
return
|
return profile, true, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *profilesStatements) SelectProfilesBySearch(
|
func (s *profilesStatements) SelectProfilesBySearch(
|
||||||
|
|
|
||||||
|
|
@ -46,7 +46,7 @@ CREATE INDEX IF NOT EXISTS userapi_daily_visits_localpart_timestamp_idx ON usera
|
||||||
|
|
||||||
const countUsersLastSeenAfterSQL = "" +
|
const countUsersLastSeenAfterSQL = "" +
|
||||||
"SELECT COUNT(*) FROM (" +
|
"SELECT COUNT(*) FROM (" +
|
||||||
" SELECT localpart FROM device_devices WHERE last_seen_ts > $1 " +
|
" SELECT localpart FROM userapi_devices WHERE last_seen_ts > $1 " +
|
||||||
" GROUP BY localpart" +
|
" GROUP BY localpart" +
|
||||||
" ) u"
|
" ) u"
|
||||||
|
|
||||||
|
|
@ -63,7 +63,7 @@ R30Users counts the number of 30 day retained users, defined as:
|
||||||
const countR30UsersSQL = `
|
const countR30UsersSQL = `
|
||||||
SELECT platform, COUNT(*) FROM (
|
SELECT platform, COUNT(*) FROM (
|
||||||
SELECT users.localpart, platform, users.created_ts, MAX(uip.last_seen_ts)
|
SELECT users.localpart, platform, users.created_ts, MAX(uip.last_seen_ts)
|
||||||
FROM account_accounts users
|
FROM userapi_accounts users
|
||||||
INNER JOIN
|
INNER JOIN
|
||||||
(SELECT
|
(SELECT
|
||||||
localpart, last_seen_ts,
|
localpart, last_seen_ts,
|
||||||
|
|
@ -76,7 +76,7 @@ SELECT platform, COUNT(*) FROM (
|
||||||
ELSE 'unknown'
|
ELSE 'unknown'
|
||||||
END
|
END
|
||||||
AS platform
|
AS platform
|
||||||
FROM device_devices
|
FROM userapi_devices
|
||||||
) uip
|
) uip
|
||||||
ON users.localpart = uip.localpart
|
ON users.localpart = uip.localpart
|
||||||
AND users.account_type <> 4
|
AND users.account_type <> 4
|
||||||
|
|
@ -126,7 +126,7 @@ GROUP BY client_type
|
||||||
`
|
`
|
||||||
|
|
||||||
const countUserByAccountTypeSQL = `
|
const countUserByAccountTypeSQL = `
|
||||||
SELECT COUNT(*) FROM account_accounts WHERE account_type IN ($1)
|
SELECT COUNT(*) FROM userapi_accounts WHERE account_type IN ($1)
|
||||||
`
|
`
|
||||||
|
|
||||||
// $1 = Guest AccountType
|
// $1 = Guest AccountType
|
||||||
|
|
@ -139,7 +139,7 @@ SELECT user_type, COUNT(*) AS count FROM (
|
||||||
WHEN account_type = $4 AND appservice_id IS NULL THEN 'guest'
|
WHEN account_type = $4 AND appservice_id IS NULL THEN 'guest'
|
||||||
WHEN account_type IN ($5) AND appservice_id IS NOT NULL THEN 'bridged'
|
WHEN account_type IN ($5) AND appservice_id IS NOT NULL THEN 'bridged'
|
||||||
END AS user_type
|
END AS user_type
|
||||||
FROM account_accounts
|
FROM userapi_accounts
|
||||||
WHERE created_ts > $8
|
WHERE created_ts > $8
|
||||||
) AS t GROUP BY user_type
|
) AS t GROUP BY user_type
|
||||||
`
|
`
|
||||||
|
|
@ -148,14 +148,14 @@ SELECT user_type, COUNT(*) AS count FROM (
|
||||||
const updateUserDailyVisitsSQL = `
|
const updateUserDailyVisitsSQL = `
|
||||||
INSERT INTO userapi_daily_visits(localpart, device_id, timestamp, user_agent)
|
INSERT INTO userapi_daily_visits(localpart, device_id, timestamp, user_agent)
|
||||||
SELECT u.localpart, u.device_id, $1, MAX(u.user_agent)
|
SELECT u.localpart, u.device_id, $1, MAX(u.user_agent)
|
||||||
FROM device_devices AS u
|
FROM userapi_devices AS u
|
||||||
LEFT JOIN (
|
LEFT JOIN (
|
||||||
SELECT localpart, device_id, timestamp FROM userapi_daily_visits
|
SELECT localpart, device_id, timestamp FROM userapi_daily_visits
|
||||||
WHERE timestamp = $1
|
WHERE timestamp = $1
|
||||||
) udv
|
) udv
|
||||||
ON u.localpart = udv.localpart AND u.device_id = udv.device_id
|
ON u.localpart = udv.localpart AND u.device_id = udv.device_id
|
||||||
INNER JOIN device_devices d ON d.localpart = u.localpart
|
INNER JOIN userapi_devices d ON d.localpart = u.localpart
|
||||||
INNER JOIN account_accounts a ON a.localpart = u.localpart
|
INNER JOIN userapi_accounts a ON a.localpart = u.localpart
|
||||||
WHERE $2 <= d.last_seen_ts AND d.last_seen_ts < $3
|
WHERE $2 <= d.last_seen_ts AND d.last_seen_ts < $3
|
||||||
AND a.account_type in (1, 3)
|
AND a.account_type in (1, 3)
|
||||||
GROUP BY u.localpart, u.device_id
|
GROUP BY u.localpart, u.device_id
|
||||||
|
|
|
||||||
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/userapi/storage/shared"
|
"github.com/matrix-org/dendrite/userapi/storage/shared"
|
||||||
|
"github.com/matrix-org/dendrite/userapi/storage/sqlite3/deltas"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewDatabase creates a new accounts and profiles database
|
// NewDatabase creates a new accounts and profiles database
|
||||||
|
|
@ -34,6 +35,16 @@ func NewDatabase(base *base.BaseDendrite, dbProperties *config.DatabaseOptions,
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m := sqlutil.NewMigrator(db)
|
||||||
|
m.AddMigrations(sqlutil.Migration{
|
||||||
|
Version: "userapi: rename tables",
|
||||||
|
Up: deltas.UpRenameTables,
|
||||||
|
Down: deltas.DownRenameTables,
|
||||||
|
})
|
||||||
|
if err = m.Up(base.Context()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
accountDataTable, err := NewSQLiteAccountDataTable(db)
|
accountDataTable, err := NewSQLiteAccountDataTable(db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewSQLiteAccountDataTable: %w", err)
|
return nil, fmt.Errorf("NewSQLiteAccountDataTable: %w", err)
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ import (
|
||||||
|
|
||||||
const threepidSchema = `
|
const threepidSchema = `
|
||||||
-- Stores data about third party identifiers
|
-- Stores data about third party identifiers
|
||||||
CREATE TABLE IF NOT EXISTS account_threepid (
|
CREATE TABLE IF NOT EXISTS userapi_threepids (
|
||||||
-- The third party identifier
|
-- The third party identifier
|
||||||
threepid TEXT NOT NULL,
|
threepid TEXT NOT NULL,
|
||||||
-- The 3PID medium
|
-- The 3PID medium
|
||||||
|
|
@ -38,20 +38,20 @@ CREATE TABLE IF NOT EXISTS account_threepid (
|
||||||
PRIMARY KEY(threepid, medium)
|
PRIMARY KEY(threepid, medium)
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS account_threepid_localpart ON account_threepid(localpart);
|
CREATE INDEX IF NOT EXISTS account_threepid_localpart ON userapi_threepids(localpart);
|
||||||
`
|
`
|
||||||
|
|
||||||
const selectLocalpartForThreePIDSQL = "" +
|
const selectLocalpartForThreePIDSQL = "" +
|
||||||
"SELECT localpart FROM account_threepid WHERE threepid = $1 AND medium = $2"
|
"SELECT localpart FROM userapi_threepids WHERE threepid = $1 AND medium = $2"
|
||||||
|
|
||||||
const selectThreePIDsForLocalpartSQL = "" +
|
const selectThreePIDsForLocalpartSQL = "" +
|
||||||
"SELECT threepid, medium FROM account_threepid WHERE localpart = $1"
|
"SELECT threepid, medium FROM userapi_threepids WHERE localpart = $1"
|
||||||
|
|
||||||
const insertThreePIDSQL = "" +
|
const insertThreePIDSQL = "" +
|
||||||
"INSERT INTO account_threepid (threepid, medium, localpart) VALUES ($1, $2, $3)"
|
"INSERT INTO userapi_threepids (threepid, medium, localpart) VALUES ($1, $2, $3)"
|
||||||
|
|
||||||
const deleteThreePIDSQL = "" +
|
const deleteThreePIDSQL = "" +
|
||||||
"DELETE FROM account_threepid WHERE threepid = $1 AND medium = $2"
|
"DELETE FROM userapi_threepids WHERE threepid = $1 AND medium = $2"
|
||||||
|
|
||||||
type threepidStatements struct {
|
type threepidStatements struct {
|
||||||
db *sql.DB
|
db *sql.DB
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/matrix-org/dendrite/internal/pushrules"
|
"github.com/matrix-org/dendrite/internal/pushrules"
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/dendrite/test"
|
"github.com/matrix-org/dendrite/test"
|
||||||
|
"github.com/matrix-org/dendrite/test/testrig"
|
||||||
"github.com/matrix-org/dendrite/userapi/api"
|
"github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/matrix-org/dendrite/userapi/storage"
|
"github.com/matrix-org/dendrite/userapi/storage"
|
||||||
"github.com/matrix-org/dendrite/userapi/storage/tables"
|
"github.com/matrix-org/dendrite/userapi/storage/tables"
|
||||||
|
|
@ -29,14 +30,18 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func mustCreateDatabase(t *testing.T, dbType test.DBType) (storage.Database, func()) {
|
func mustCreateDatabase(t *testing.T, dbType test.DBType) (storage.Database, func()) {
|
||||||
|
base, baseclose := testrig.CreateBaseDendrite(t, dbType)
|
||||||
connStr, close := test.PrepareDBConnectionString(t, dbType)
|
connStr, close := test.PrepareDBConnectionString(t, dbType)
|
||||||
db, err := storage.NewUserAPIDatabase(nil, &config.DatabaseOptions{
|
db, err := storage.NewUserAPIDatabase(base, &config.DatabaseOptions{
|
||||||
ConnectionString: config.DataSource(connStr),
|
ConnectionString: config.DataSource(connStr),
|
||||||
}, "localhost", bcrypt.MinCost, openIDLifetimeMS, loginTokenLifetime, "_server")
|
}, "localhost", bcrypt.MinCost, openIDLifetimeMS, loginTokenLifetime, "_server")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("NewUserAPIDatabase returned %s", err)
|
t.Fatalf("NewUserAPIDatabase returned %s", err)
|
||||||
}
|
}
|
||||||
return db, close
|
return db, func() {
|
||||||
|
close()
|
||||||
|
baseclose()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests storing and getting account data
|
// Tests storing and getting account data
|
||||||
|
|
@ -192,19 +197,18 @@ func Test_Devices(t *testing.T) {
|
||||||
newName := "new display name"
|
newName := "new display name"
|
||||||
err = db.UpdateDevice(ctx, localpart, deviceWithID.ID, &newName)
|
err = db.UpdateDevice(ctx, localpart, deviceWithID.ID, &newName)
|
||||||
assert.NoError(t, err, "unable to update device displayname")
|
assert.NoError(t, err, "unable to update device displayname")
|
||||||
|
updatedAfterTimestamp := time.Now().Unix()
|
||||||
err = db.UpdateDeviceLastSeen(ctx, localpart, deviceWithID.ID, "127.0.0.1", "Element Web")
|
err = db.UpdateDeviceLastSeen(ctx, localpart, deviceWithID.ID, "127.0.0.1", "Element Web")
|
||||||
assert.NoError(t, err, "unable to update device last seen")
|
assert.NoError(t, err, "unable to update device last seen")
|
||||||
|
|
||||||
deviceWithID.DisplayName = newName
|
deviceWithID.DisplayName = newName
|
||||||
deviceWithID.LastSeenIP = "127.0.0.1"
|
deviceWithID.LastSeenIP = "127.0.0.1"
|
||||||
deviceWithID.LastSeenTS = int64(gomatrixserverlib.AsTimestamp(time.Now().Truncate(time.Second)))
|
|
||||||
gotDevice, err = db.GetDeviceByID(ctx, localpart, deviceWithID.ID)
|
gotDevice, err = db.GetDeviceByID(ctx, localpart, deviceWithID.ID)
|
||||||
assert.NoError(t, err, "unable to get device by id")
|
assert.NoError(t, err, "unable to get device by id")
|
||||||
assert.Equal(t, 2, len(devices))
|
assert.Equal(t, 2, len(devices))
|
||||||
assert.Equal(t, deviceWithID.DisplayName, gotDevice.DisplayName)
|
assert.Equal(t, deviceWithID.DisplayName, gotDevice.DisplayName)
|
||||||
assert.Equal(t, deviceWithID.LastSeenIP, gotDevice.LastSeenIP)
|
assert.Equal(t, deviceWithID.LastSeenIP, gotDevice.LastSeenIP)
|
||||||
truncatedTime := gomatrixserverlib.Timestamp(gotDevice.LastSeenTS).Time().Truncate(time.Second)
|
assert.Greater(t, gotDevice.LastSeenTS, updatedAfterTimestamp)
|
||||||
assert.Equal(t, gomatrixserverlib.Timestamp(deviceWithID.LastSeenTS), gomatrixserverlib.AsTimestamp(truncatedTime))
|
|
||||||
|
|
||||||
// create one more device and remove the devices step by step
|
// create one more device and remove the devices step by step
|
||||||
newDeviceID := util.RandomString(16)
|
newDeviceID := util.RandomString(16)
|
||||||
|
|
@ -378,15 +382,23 @@ func Test_Profile(t *testing.T) {
|
||||||
|
|
||||||
// set avatar & displayname
|
// set avatar & displayname
|
||||||
wantProfile.DisplayName = "Alice"
|
wantProfile.DisplayName = "Alice"
|
||||||
wantProfile.AvatarURL = "mxc://aliceAvatar"
|
gotProfile, changed, err := db.SetDisplayName(ctx, aliceLocalpart, "Alice")
|
||||||
err = db.SetDisplayName(ctx, aliceLocalpart, "Alice")
|
|
||||||
assert.NoError(t, err, "unable to set displayname")
|
|
||||||
err = db.SetAvatarURL(ctx, aliceLocalpart, "mxc://aliceAvatar")
|
|
||||||
assert.NoError(t, err, "unable to set avatar url")
|
|
||||||
// verify profile
|
|
||||||
gotProfile, err = db.GetProfileByLocalpart(ctx, aliceLocalpart)
|
|
||||||
assert.NoError(t, err, "unable to get profile by localpart")
|
|
||||||
assert.Equal(t, wantProfile, gotProfile)
|
assert.Equal(t, wantProfile, gotProfile)
|
||||||
|
assert.NoError(t, err, "unable to set displayname")
|
||||||
|
assert.True(t, changed)
|
||||||
|
|
||||||
|
wantProfile.AvatarURL = "mxc://aliceAvatar"
|
||||||
|
gotProfile, changed, err = db.SetAvatarURL(ctx, aliceLocalpart, "mxc://aliceAvatar")
|
||||||
|
assert.NoError(t, err, "unable to set avatar url")
|
||||||
|
assert.Equal(t, wantProfile, gotProfile)
|
||||||
|
assert.True(t, changed)
|
||||||
|
|
||||||
|
// Setting the same avatar again doesn't change anything
|
||||||
|
wantProfile.AvatarURL = "mxc://aliceAvatar"
|
||||||
|
gotProfile, changed, err = db.SetAvatarURL(ctx, aliceLocalpart, "mxc://aliceAvatar")
|
||||||
|
assert.NoError(t, err, "unable to set avatar url")
|
||||||
|
assert.Equal(t, wantProfile, gotProfile)
|
||||||
|
assert.False(t, changed)
|
||||||
|
|
||||||
// search profiles
|
// search profiles
|
||||||
searchRes, err := db.SearchProfiles(ctx, "Alice", 2)
|
searchRes, err := db.SearchProfiles(ctx, "Alice", 2)
|
||||||
|
|
|
||||||
|
|
@ -84,8 +84,8 @@ type OpenIDTable interface {
|
||||||
type ProfileTable interface {
|
type ProfileTable interface {
|
||||||
InsertProfile(ctx context.Context, txn *sql.Tx, localpart string) error
|
InsertProfile(ctx context.Context, txn *sql.Tx, localpart string) error
|
||||||
SelectProfileByLocalpart(ctx context.Context, localpart string) (*authtypes.Profile, error)
|
SelectProfileByLocalpart(ctx context.Context, localpart string) (*authtypes.Profile, error)
|
||||||
SetAvatarURL(ctx context.Context, txn *sql.Tx, localpart string, avatarURL string) (err error)
|
SetAvatarURL(ctx context.Context, txn *sql.Tx, localpart string, avatarURL string) (*authtypes.Profile, bool, error)
|
||||||
SetDisplayName(ctx context.Context, txn *sql.Tx, localpart string, displayName string) (err error)
|
SetDisplayName(ctx context.Context, txn *sql.Tx, localpart string, displayName string) (*authtypes.Profile, bool, error)
|
||||||
SelectProfilesBySearch(ctx context.Context, searchString string, limit int) ([]authtypes.Profile, error)
|
SelectProfilesBySearch(ctx context.Context, searchString string, limit int) ([]authtypes.Profile, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -106,7 +106,7 @@ func mustUpdateDeviceLastSeen(
|
||||||
timestamp time.Time,
|
timestamp time.Time,
|
||||||
) {
|
) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
_, err := db.ExecContext(ctx, "UPDATE device_devices SET last_seen_ts = $1 WHERE localpart = $2", gomatrixserverlib.AsTimestamp(timestamp), localpart)
|
_, err := db.ExecContext(ctx, "UPDATE userapi_devices SET last_seen_ts = $1 WHERE localpart = $2", gomatrixserverlib.AsTimestamp(timestamp), localpart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to update device last seen")
|
t.Fatalf("unable to update device last seen")
|
||||||
}
|
}
|
||||||
|
|
@ -119,7 +119,7 @@ func mustUserUpdateRegistered(
|
||||||
localpart string,
|
localpart string,
|
||||||
timestamp time.Time,
|
timestamp time.Time,
|
||||||
) {
|
) {
|
||||||
_, err := db.ExecContext(ctx, "UPDATE account_accounts SET created_ts = $1 WHERE localpart = $2", gomatrixserverlib.AsTimestamp(timestamp), localpart)
|
_, err := db.ExecContext(ctx, "UPDATE userapi_accounts SET created_ts = $1 WHERE localpart = $2", gomatrixserverlib.AsTimestamp(timestamp), localpart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to update device last seen")
|
t.Fatalf("unable to update device last seen")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,13 +23,15 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"github.com/matrix-org/dendrite/internal/httputil"
|
|
||||||
"github.com/matrix-org/dendrite/test"
|
|
||||||
"github.com/matrix-org/dendrite/userapi"
|
|
||||||
"github.com/matrix-org/dendrite/userapi/inthttp"
|
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
"golang.org/x/crypto/bcrypt"
|
"golang.org/x/crypto/bcrypt"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/internal/httputil"
|
||||||
|
"github.com/matrix-org/dendrite/test"
|
||||||
|
"github.com/matrix-org/dendrite/test/testrig"
|
||||||
|
"github.com/matrix-org/dendrite/userapi"
|
||||||
|
"github.com/matrix-org/dendrite/userapi/inthttp"
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/setup/config"
|
"github.com/matrix-org/dendrite/setup/config"
|
||||||
"github.com/matrix-org/dendrite/userapi/api"
|
"github.com/matrix-org/dendrite/userapi/api"
|
||||||
"github.com/matrix-org/dendrite/userapi/internal"
|
"github.com/matrix-org/dendrite/userapi/internal"
|
||||||
|
|
@ -48,9 +50,9 @@ func MustMakeInternalAPI(t *testing.T, opts apiTestOpts, dbType test.DBType) (ap
|
||||||
if opts.loginTokenLifetime == 0 {
|
if opts.loginTokenLifetime == 0 {
|
||||||
opts.loginTokenLifetime = api.DefaultLoginTokenLifetime * time.Millisecond
|
opts.loginTokenLifetime = api.DefaultLoginTokenLifetime * time.Millisecond
|
||||||
}
|
}
|
||||||
|
base, baseclose := testrig.CreateBaseDendrite(t, dbType)
|
||||||
connStr, close := test.PrepareDBConnectionString(t, dbType)
|
connStr, close := test.PrepareDBConnectionString(t, dbType)
|
||||||
|
accountDB, err := storage.NewUserAPIDatabase(base, &config.DatabaseOptions{
|
||||||
accountDB, err := storage.NewUserAPIDatabase(nil, &config.DatabaseOptions{
|
|
||||||
ConnectionString: config.DataSource(connStr),
|
ConnectionString: config.DataSource(connStr),
|
||||||
}, serverName, bcrypt.MinCost, config.DefaultOpenIDTokenLifetimeMS, opts.loginTokenLifetime, "")
|
}, serverName, bcrypt.MinCost, config.DefaultOpenIDTokenLifetimeMS, opts.loginTokenLifetime, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -64,9 +66,12 @@ func MustMakeInternalAPI(t *testing.T, opts apiTestOpts, dbType test.DBType) (ap
|
||||||
}
|
}
|
||||||
|
|
||||||
return &internal.UserInternalAPI{
|
return &internal.UserInternalAPI{
|
||||||
DB: accountDB,
|
DB: accountDB,
|
||||||
ServerName: cfg.Matrix.ServerName,
|
ServerName: cfg.Matrix.ServerName,
|
||||||
}, accountDB, close
|
}, accountDB, func() {
|
||||||
|
close()
|
||||||
|
baseclose()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestQueryProfile(t *testing.T) {
|
func TestQueryProfile(t *testing.T) {
|
||||||
|
|
@ -79,10 +84,10 @@ func TestQueryProfile(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to make account: %s", err)
|
t.Fatalf("failed to make account: %s", err)
|
||||||
}
|
}
|
||||||
if err := accountDB.SetAvatarURL(context.TODO(), "alice", aliceAvatarURL); err != nil {
|
if _, _, err := accountDB.SetAvatarURL(context.TODO(), "alice", aliceAvatarURL); err != nil {
|
||||||
t.Fatalf("failed to set avatar url: %s", err)
|
t.Fatalf("failed to set avatar url: %s", err)
|
||||||
}
|
}
|
||||||
if err := accountDB.SetDisplayName(context.TODO(), "alice", aliceDisplayName); err != nil {
|
if _, _, err := accountDB.SetDisplayName(context.TODO(), "alice", aliceDisplayName); err != nil {
|
||||||
t.Fatalf("failed to set display name: %s", err)
|
t.Fatalf("failed to set display name: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue