mirror of
https://github.com/matrix-org/dendrite.git
synced 2025-12-26 08:13:09 -06:00
Merge branch 'master' into neilalexander/pinecone
This commit is contained in:
commit
da08bb2589
16
CHANGES.md
16
CHANGES.md
|
|
@ -1,5 +1,21 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## Dendrite 0.3.10 (2021-02-17)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* In-memory caches will now gradually evict old entries, reducing idle memory usage
|
||||||
|
* Federation sender queues will now be fully unloaded when idle, reducing idle memory usage
|
||||||
|
* The `power_level_content_override` option is now supported in `/createRoom`
|
||||||
|
* The `/send` endpoint will now attempt more servers in the room when trying to fetch missing events or state
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* A panic in the membership updater has been fixed
|
||||||
|
* Events in the sync API that weren't excluded from sync can no longer be incorrectly excluded from sync by backfill
|
||||||
|
* Retrieving remote media now correcly respects the locally configured maximum file size, even when the `Content-Length` header is unavailable
|
||||||
|
* The `/send` endpoint will no longer hit the database more than once to find servers in the room
|
||||||
|
|
||||||
## Dendrite 0.3.9 (2021-02-04)
|
## Dendrite 0.3.9 (2021-02-04)
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
|
||||||
|
|
@ -23,13 +23,13 @@ RUN apt-get update && apt-get -y install python
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
ADD https://github.com/matrix-org/go-http-js-libp2p/archive/master.tar.gz /build/libp2p.tar.gz
|
ADD https://github.com/matrix-org/go-http-js-libp2p/archive/master.tar.gz /build/libp2p.tar.gz
|
||||||
RUN tar xvfz libp2p.tar.gz
|
RUN tar xvfz libp2p.tar.gz
|
||||||
ADD https://github.com/vector-im/riot-web/archive/matthew/p2p.tar.gz /build/p2p.tar.gz
|
ADD https://github.com/vector-im/element-web/archive/matthew/p2p.tar.gz /build/p2p.tar.gz
|
||||||
RUN tar xvfz p2p.tar.gz
|
RUN tar xvfz p2p.tar.gz
|
||||||
|
|
||||||
# Install deps for riot-web, symlink in libp2p repo and build that too
|
# Install deps for element-web, symlink in libp2p repo and build that too
|
||||||
WORKDIR /build/riot-web-matthew-p2p
|
WORKDIR /build/element-web-matthew-p2p
|
||||||
RUN yarn install
|
RUN yarn install
|
||||||
RUN ln -s /build/go-http-js-libp2p-master /build/riot-web-matthew-p2p/node_modules/go-http-js-libp2p
|
RUN ln -s /build/go-http-js-libp2p-master /build/element-web-matthew-p2p/node_modules/go-http-js-libp2p
|
||||||
RUN (cd node_modules/go-http-js-libp2p && yarn install)
|
RUN (cd node_modules/go-http-js-libp2p && yarn install)
|
||||||
COPY --from=gobuild /build/dendrite-master/main.wasm ./src/vector/dendrite.wasm
|
COPY --from=gobuild /build/dendrite-master/main.wasm ./src/vector/dendrite.wasm
|
||||||
# build it all
|
# build it all
|
||||||
|
|
@ -108,4 +108,4 @@ server { \n\
|
||||||
} \n\
|
} \n\
|
||||||
}' > /etc/nginx/conf.d/default.conf
|
}' > /etc/nginx/conf.d/default.conf
|
||||||
RUN sed -i 's/}/ application\/wasm wasm;\n}/g' /etc/nginx/mime.types
|
RUN sed -i 's/}/ application\/wasm wasm;\n}/g' /etc/nginx/mime.types
|
||||||
COPY --from=jsbuild /build/riot-web-matthew-p2p/webapp /usr/share/nginx/html
|
COPY --from=jsbuild /build/element-web-matthew-p2p/webapp /usr/share/nginx/html
|
||||||
|
|
|
||||||
|
|
@ -309,12 +309,12 @@ user_api:
|
||||||
listen: http://0.0.0.0:7781
|
listen: http://0.0.0.0:7781
|
||||||
connect: http://user_api:7781
|
connect: http://user_api:7781
|
||||||
account_database:
|
account_database:
|
||||||
connection_string: postgresql://dendrite:itsasecret@postgres/dendrite_account?sslmode=disable
|
connection_string: postgresql://dendrite:itsasecret@postgres/dendrite_userapi_accounts?sslmode=disable
|
||||||
max_open_conns: 10
|
max_open_conns: 10
|
||||||
max_idle_conns: 2
|
max_idle_conns: 2
|
||||||
conn_max_lifetime: -1
|
conn_max_lifetime: -1
|
||||||
device_database:
|
device_database:
|
||||||
connection_string: postgresql://dendrite:itsasecret@postgres/dendrite_device?sslmode=disable
|
connection_string: postgresql://dendrite:itsasecret@postgres/dendrite_userapi_devices?sslmode=disable
|
||||||
max_open_conns: 10
|
max_open_conns: 10
|
||||||
max_idle_conns: 2
|
max_idle_conns: 2
|
||||||
conn_max_lifetime: -1
|
conn_max_lifetime: -1
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
for db in account device mediaapi syncapi roomserver signingkeyserver keyserver federationsender appservice naffka; do
|
for db in userapi_accounts userapi_devices mediaapi syncapi roomserver signingkeyserver keyserver federationsender appservice naffka; do
|
||||||
createdb -U dendrite -O dendrite dendrite_$db
|
createdb -U dendrite -O dendrite dendrite_$db
|
||||||
done
|
done
|
||||||
|
|
|
||||||
|
|
@ -109,7 +109,7 @@ On macOS, omit `sudo -u postgres` from the below commands.
|
||||||
* If you want to run each Dendrite component with its own database:
|
* If you want to run each Dendrite component with its own database:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
for i in mediaapi syncapi roomserver signingkeyserver federationsender appservice keyserver userapi_account userapi_device naffka; do
|
for i in mediaapi syncapi roomserver signingkeyserver federationsender appservice keyserver userapi_accounts userapi_devices naffka; do
|
||||||
sudo -u postgres createdb -O dendrite dendrite_$i
|
sudo -u postgres createdb -O dendrite dendrite_$i
|
||||||
done
|
done
|
||||||
```
|
```
|
||||||
|
|
|
||||||
|
|
@ -46,6 +46,7 @@ const (
|
||||||
// ensures that only one request is in flight to a given destination
|
// ensures that only one request is in flight to a given destination
|
||||||
// at a time.
|
// at a time.
|
||||||
type destinationQueue struct {
|
type destinationQueue struct {
|
||||||
|
queues *OutgoingQueues
|
||||||
db storage.Database
|
db storage.Database
|
||||||
process *process.ProcessContext
|
process *process.ProcessContext
|
||||||
signing *SigningInfo
|
signing *SigningInfo
|
||||||
|
|
@ -246,6 +247,7 @@ func (oq *destinationQueue) backgroundSend() {
|
||||||
}
|
}
|
||||||
destinationQueueRunning.Inc()
|
destinationQueueRunning.Inc()
|
||||||
defer destinationQueueRunning.Dec()
|
defer destinationQueueRunning.Dec()
|
||||||
|
defer oq.queues.clearQueue(oq)
|
||||||
defer oq.running.Store(false)
|
defer oq.running.Store(false)
|
||||||
|
|
||||||
// Mark the queue as overflowed, so we will consult the database
|
// Mark the queue as overflowed, so we will consult the database
|
||||||
|
|
|
||||||
|
|
@ -120,7 +120,7 @@ func NewOutgoingQueues(
|
||||||
log.WithError(err).Error("Failed to get EDU server names for destination queue hydration")
|
log.WithError(err).Error("Failed to get EDU server names for destination queue hydration")
|
||||||
}
|
}
|
||||||
for serverName := range serverNames {
|
for serverName := range serverNames {
|
||||||
if queue := queues.getQueue(serverName); !queue.statistics.Blacklisted() {
|
if queue := queues.getQueue(serverName); queue != nil {
|
||||||
queue.wakeQueueIfNeeded()
|
queue.wakeQueueIfNeeded()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -148,12 +148,16 @@ type queuedEDU struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (oqs *OutgoingQueues) getQueue(destination gomatrixserverlib.ServerName) *destinationQueue {
|
func (oqs *OutgoingQueues) getQueue(destination gomatrixserverlib.ServerName) *destinationQueue {
|
||||||
|
if oqs.statistics.ForServer(destination).Blacklisted() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
oqs.queuesMutex.Lock()
|
oqs.queuesMutex.Lock()
|
||||||
defer oqs.queuesMutex.Unlock()
|
defer oqs.queuesMutex.Unlock()
|
||||||
oq := oqs.queues[destination]
|
oq, ok := oqs.queues[destination]
|
||||||
if oq == nil {
|
if !ok {
|
||||||
destinationQueueTotal.Inc()
|
destinationQueueTotal.Inc()
|
||||||
oq = &destinationQueue{
|
oq = &destinationQueue{
|
||||||
|
queues: oqs,
|
||||||
db: oqs.db,
|
db: oqs.db,
|
||||||
process: oqs.process,
|
process: oqs.process,
|
||||||
rsAPI: oqs.rsAPI,
|
rsAPI: oqs.rsAPI,
|
||||||
|
|
@ -170,6 +174,16 @@ func (oqs *OutgoingQueues) getQueue(destination gomatrixserverlib.ServerName) *d
|
||||||
return oq
|
return oq
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (oqs *OutgoingQueues) clearQueue(oq *destinationQueue) {
|
||||||
|
oqs.queuesMutex.Lock()
|
||||||
|
defer oqs.queuesMutex.Unlock()
|
||||||
|
|
||||||
|
close(oq.notify)
|
||||||
|
close(oq.interruptBackoff)
|
||||||
|
delete(oqs.queues, oq.destination)
|
||||||
|
destinationQueueTotal.Dec()
|
||||||
|
}
|
||||||
|
|
||||||
type ErrorFederationDisabled struct {
|
type ErrorFederationDisabled struct {
|
||||||
Message string
|
Message string
|
||||||
}
|
}
|
||||||
|
|
@ -236,7 +250,9 @@ func (oqs *OutgoingQueues) SendEvent(
|
||||||
}
|
}
|
||||||
|
|
||||||
for destination := range destmap {
|
for destination := range destmap {
|
||||||
oqs.getQueue(destination).sendEvent(ev, nid)
|
if queue := oqs.getQueue(destination); queue != nil {
|
||||||
|
queue.sendEvent(ev, nid)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -306,7 +322,9 @@ func (oqs *OutgoingQueues) SendEDU(
|
||||||
}
|
}
|
||||||
|
|
||||||
for destination := range destmap {
|
for destination := range destmap {
|
||||||
oqs.getQueue(destination).sendEDU(e, nid)
|
if queue := oqs.getQueue(destination); queue != nil {
|
||||||
|
queue.sendEDU(e, nid)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -317,9 +335,7 @@ func (oqs *OutgoingQueues) RetryServer(srv gomatrixserverlib.ServerName) {
|
||||||
if oqs.disabled {
|
if oqs.disabled {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
q := oqs.getQueue(srv)
|
if queue := oqs.getQueue(srv); queue != nil {
|
||||||
if q == nil {
|
queue.wakeQueueIfNeeded()
|
||||||
return
|
|
||||||
}
|
}
|
||||||
q.wakeQueueIfNeeded()
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ package caching
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
@ -72,6 +73,11 @@ func NewInMemoryLRUCache(enablePrometheus bool) (*Caches, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
go cacheCleaner(
|
||||||
|
roomVersions, serverKeys, roomServerStateKeyNIDs,
|
||||||
|
roomServerEventTypeNIDs, roomServerRoomIDs,
|
||||||
|
roomInfos, federationEvents,
|
||||||
|
)
|
||||||
return &Caches{
|
return &Caches{
|
||||||
RoomVersions: roomVersions,
|
RoomVersions: roomVersions,
|
||||||
ServerKeys: serverKeys,
|
ServerKeys: serverKeys,
|
||||||
|
|
@ -83,6 +89,20 @@ func NewInMemoryLRUCache(enablePrometheus bool) (*Caches, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func cacheCleaner(caches ...*InMemoryLRUCachePartition) {
|
||||||
|
for {
|
||||||
|
time.Sleep(time.Minute)
|
||||||
|
for _, cache := range caches {
|
||||||
|
// Hold onto the last 10% of the cache entries, since
|
||||||
|
// otherwise a quiet period might cause us to evict all
|
||||||
|
// cache entries entirely.
|
||||||
|
if cache.lru.Len() > cache.maxEntries/10 {
|
||||||
|
cache.lru.RemoveOldest()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type InMemoryLRUCachePartition struct {
|
type InMemoryLRUCachePartition struct {
|
||||||
name string
|
name string
|
||||||
mutable bool
|
mutable bool
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ var build string
|
||||||
const (
|
const (
|
||||||
VersionMajor = 0
|
VersionMajor = 0
|
||||||
VersionMinor = 3
|
VersionMinor = 3
|
||||||
VersionPatch = 9
|
VersionPatch = 10
|
||||||
VersionTag = "" // example: "rc1"
|
VersionTag = "" // example: "rc1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -109,7 +109,7 @@ func RemoveDir(dir types.Path, logger *log.Entry) {
|
||||||
// WriteTempFile writes to a new temporary file.
|
// WriteTempFile writes to a new temporary file.
|
||||||
// The file is deleted if there was an error while writing.
|
// The file is deleted if there was an error while writing.
|
||||||
func WriteTempFile(
|
func WriteTempFile(
|
||||||
ctx context.Context, reqReader io.Reader, maxFileSizeBytes config.FileSizeBytes, absBasePath config.Path,
|
ctx context.Context, reqReader io.Reader, absBasePath config.Path,
|
||||||
) (hash types.Base64Hash, size types.FileSizeBytes, path types.Path, err error) {
|
) (hash types.Base64Hash, size types.FileSizeBytes, path types.Path, err error) {
|
||||||
size = -1
|
size = -1
|
||||||
logger := util.GetLogger(ctx)
|
logger := util.GetLogger(ctx)
|
||||||
|
|
@ -124,18 +124,11 @@ func WriteTempFile(
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// If the max_file_size_bytes configuration option is set to a positive
|
|
||||||
// number then limit the upload to that size. Otherwise, just read the
|
|
||||||
// whole file.
|
|
||||||
limitedReader := reqReader
|
|
||||||
if maxFileSizeBytes > 0 {
|
|
||||||
limitedReader = io.LimitReader(reqReader, int64(maxFileSizeBytes))
|
|
||||||
}
|
|
||||||
// Hash the file data. The hash will be returned. The hash is useful as a
|
// Hash the file data. The hash will be returned. The hash is useful as a
|
||||||
// method of deduplicating files to save storage, as well as a way to conduct
|
// method of deduplicating files to save storage, as well as a way to conduct
|
||||||
// integrity checks on the file data in the repository.
|
// integrity checks on the file data in the repository.
|
||||||
hasher := sha256.New()
|
hasher := sha256.New()
|
||||||
teeReader := io.TeeReader(limitedReader, hasher)
|
teeReader := io.TeeReader(reqReader, hasher)
|
||||||
bytesWritten, err := io.Copy(tmpFileWriter, teeReader)
|
bytesWritten, err := io.Copy(tmpFileWriter, teeReader)
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
RemoveDir(tmpDir, logger)
|
RemoveDir(tmpDir, logger)
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
@ -214,7 +215,7 @@ func (r *downloadRequest) doDownload(
|
||||||
ctx, r.MediaMetadata.MediaID, r.MediaMetadata.Origin,
|
ctx, r.MediaMetadata.MediaID, r.MediaMetadata.Origin,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error querying the database")
|
return nil, fmt.Errorf("db.GetMediaMetadata: %w", err)
|
||||||
}
|
}
|
||||||
if mediaMetadata == nil {
|
if mediaMetadata == nil {
|
||||||
if r.MediaMetadata.Origin == cfg.Matrix.ServerName {
|
if r.MediaMetadata.Origin == cfg.Matrix.ServerName {
|
||||||
|
|
@ -253,16 +254,16 @@ func (r *downloadRequest) respondFromLocalFile(
|
||||||
) (*types.MediaMetadata, error) {
|
) (*types.MediaMetadata, error) {
|
||||||
filePath, err := fileutils.GetPathFromBase64Hash(r.MediaMetadata.Base64Hash, absBasePath)
|
filePath, err := fileutils.GetPathFromBase64Hash(r.MediaMetadata.Base64Hash, absBasePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get file path from metadata")
|
return nil, fmt.Errorf("fileutils.GetPathFromBase64Hash: %w", err)
|
||||||
}
|
}
|
||||||
file, err := os.Open(filePath)
|
file, err := os.Open(filePath)
|
||||||
defer file.Close() // nolint: errcheck, staticcheck, megacheck
|
defer file.Close() // nolint: errcheck, staticcheck, megacheck
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to open file")
|
return nil, fmt.Errorf("os.Open: %w", err)
|
||||||
}
|
}
|
||||||
stat, err := file.Stat()
|
stat, err := file.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to stat file")
|
return nil, fmt.Errorf("file.Stat: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.MediaMetadata.FileSizeBytes > 0 && int64(r.MediaMetadata.FileSizeBytes) != stat.Size() {
|
if r.MediaMetadata.FileSizeBytes > 0 && int64(r.MediaMetadata.FileSizeBytes) != stat.Size() {
|
||||||
|
|
@ -324,7 +325,7 @@ func (r *downloadRequest) respondFromLocalFile(
|
||||||
w.Header().Set("Content-Security-Policy", contentSecurityPolicy)
|
w.Header().Set("Content-Security-Policy", contentSecurityPolicy)
|
||||||
|
|
||||||
if _, err := io.Copy(w, responseFile); err != nil {
|
if _, err := io.Copy(w, responseFile); err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to copy from cache")
|
return nil, fmt.Errorf("io.Copy: %w", err)
|
||||||
}
|
}
|
||||||
return responseMetadata, nil
|
return responseMetadata, nil
|
||||||
}
|
}
|
||||||
|
|
@ -421,7 +422,7 @@ func (r *downloadRequest) getThumbnailFile(
|
||||||
ctx, r.MediaMetadata.MediaID, r.MediaMetadata.Origin,
|
ctx, r.MediaMetadata.MediaID, r.MediaMetadata.Origin,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "error looking up thumbnails")
|
return nil, nil, fmt.Errorf("db.GetThumbnails: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we get a thumbnailSize, a pre-generated thumbnail would be best but it is not yet generated.
|
// If we get a thumbnailSize, a pre-generated thumbnail would be best but it is not yet generated.
|
||||||
|
|
@ -459,12 +460,12 @@ func (r *downloadRequest) getThumbnailFile(
|
||||||
thumbFile, err := os.Open(string(thumbPath))
|
thumbFile, err := os.Open(string(thumbPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
thumbFile.Close() // nolint: errcheck
|
thumbFile.Close() // nolint: errcheck
|
||||||
return nil, nil, errors.Wrap(err, "failed to open file")
|
return nil, nil, fmt.Errorf("os.Open: %w", err)
|
||||||
}
|
}
|
||||||
thumbStat, err := thumbFile.Stat()
|
thumbStat, err := thumbFile.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
thumbFile.Close() // nolint: errcheck
|
thumbFile.Close() // nolint: errcheck
|
||||||
return nil, nil, errors.Wrap(err, "failed to stat file")
|
return nil, nil, fmt.Errorf("thumbFile.Stat: %w", err)
|
||||||
}
|
}
|
||||||
if types.FileSizeBytes(thumbStat.Size()) != thumbnail.MediaMetadata.FileSizeBytes {
|
if types.FileSizeBytes(thumbStat.Size()) != thumbnail.MediaMetadata.FileSizeBytes {
|
||||||
thumbFile.Close() // nolint: errcheck
|
thumbFile.Close() // nolint: errcheck
|
||||||
|
|
@ -491,7 +492,7 @@ func (r *downloadRequest) generateThumbnail(
|
||||||
activeThumbnailGeneration, maxThumbnailGenerators, db, r.Logger,
|
activeThumbnailGeneration, maxThumbnailGenerators, db, r.Logger,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error creating thumbnail")
|
return nil, fmt.Errorf("thumbnailer.GenerateThumbnail: %w", err)
|
||||||
}
|
}
|
||||||
if busy {
|
if busy {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
|
@ -502,7 +503,7 @@ func (r *downloadRequest) generateThumbnail(
|
||||||
thumbnailSize.Width, thumbnailSize.Height, thumbnailSize.ResizeMethod,
|
thumbnailSize.Width, thumbnailSize.Height, thumbnailSize.ResizeMethod,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error looking up thumbnail")
|
return nil, fmt.Errorf("db.GetThumbnail: %w", err)
|
||||||
}
|
}
|
||||||
return thumbnail, nil
|
return thumbnail, nil
|
||||||
}
|
}
|
||||||
|
|
@ -543,7 +544,7 @@ func (r *downloadRequest) getRemoteFile(
|
||||||
ctx, r.MediaMetadata.MediaID, r.MediaMetadata.Origin,
|
ctx, r.MediaMetadata.MediaID, r.MediaMetadata.Origin,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "error querying the database.")
|
return fmt.Errorf("db.GetMediaMetadata: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if mediaMetadata == nil {
|
if mediaMetadata == nil {
|
||||||
|
|
@ -555,7 +556,7 @@ func (r *downloadRequest) getRemoteFile(
|
||||||
cfg.MaxThumbnailGenerators,
|
cfg.MaxThumbnailGenerators,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "error querying the database.")
|
return fmt.Errorf("r.fetchRemoteFileAndStoreMetadata: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// If we have a record, we can respond from the local file
|
// If we have a record, we can respond from the local file
|
||||||
|
|
@ -673,6 +674,43 @@ func (r *downloadRequest) fetchRemoteFileAndStoreMetadata(
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *downloadRequest) GetContentLengthAndReader(contentLengthHeader string, body *io.ReadCloser, maxFileSizeBytes config.FileSizeBytes) (int64, io.Reader, error) {
|
||||||
|
reader := *body
|
||||||
|
var contentLength int64
|
||||||
|
|
||||||
|
if contentLengthHeader != "" {
|
||||||
|
// A Content-Length header is provided. Let's try to parse it.
|
||||||
|
parsedLength, parseErr := strconv.ParseInt(contentLengthHeader, 10, 64)
|
||||||
|
if parseErr != nil {
|
||||||
|
r.Logger.WithError(parseErr).Warn("Failed to parse content length")
|
||||||
|
return 0, nil, fmt.Errorf("strconv.ParseInt: %w", parseErr)
|
||||||
|
}
|
||||||
|
if parsedLength > int64(maxFileSizeBytes) {
|
||||||
|
return 0, nil, fmt.Errorf(
|
||||||
|
"remote file size (%d bytes) exceeds locally configured max media size (%d bytes)",
|
||||||
|
parsedLength, maxFileSizeBytes,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We successfully parsed the Content-Length, so we'll return a limited
|
||||||
|
// reader that restricts us to reading only up to this size.
|
||||||
|
reader = ioutil.NopCloser(io.LimitReader(*body, parsedLength))
|
||||||
|
contentLength = parsedLength
|
||||||
|
} else {
|
||||||
|
// Content-Length header is missing. If we have a maximum file size
|
||||||
|
// configured then we'll just make sure that the reader is limited to
|
||||||
|
// that size. We'll return a zero content length, but that's OK, since
|
||||||
|
// ultimately it will get rewritten later when the temp file is written
|
||||||
|
// to disk.
|
||||||
|
if maxFileSizeBytes > 0 {
|
||||||
|
reader = ioutil.NopCloser(io.LimitReader(*body, int64(maxFileSizeBytes)))
|
||||||
|
}
|
||||||
|
contentLength = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return contentLength, reader, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *downloadRequest) fetchRemoteFile(
|
func (r *downloadRequest) fetchRemoteFile(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
client *gomatrixserverlib.Client,
|
client *gomatrixserverlib.Client,
|
||||||
|
|
@ -692,16 +730,18 @@ func (r *downloadRequest) fetchRemoteFile(
|
||||||
}
|
}
|
||||||
defer resp.Body.Close() // nolint: errcheck
|
defer resp.Body.Close() // nolint: errcheck
|
||||||
|
|
||||||
// get metadata from request and set metadata on response
|
// The reader returned here will be limited either by the Content-Length
|
||||||
contentLength, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
// and/or the configured maximum media size.
|
||||||
if err != nil {
|
contentLength, reader, parseErr := r.GetContentLengthAndReader(resp.Header.Get("Content-Length"), &resp.Body, maxFileSizeBytes)
|
||||||
r.Logger.WithError(err).Warn("Failed to parse content length")
|
if parseErr != nil {
|
||||||
return "", false, errors.Wrap(err, "invalid response from remote server")
|
return "", false, parseErr
|
||||||
}
|
}
|
||||||
|
|
||||||
if contentLength > int64(maxFileSizeBytes) {
|
if contentLength > int64(maxFileSizeBytes) {
|
||||||
// TODO: Bubble up this as a 413
|
// TODO: Bubble up this as a 413
|
||||||
return "", false, fmt.Errorf("remote file is too large (%v > %v bytes)", contentLength, maxFileSizeBytes)
|
return "", false, fmt.Errorf("remote file is too large (%v > %v bytes)", contentLength, maxFileSizeBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
r.MediaMetadata.FileSizeBytes = types.FileSizeBytes(contentLength)
|
r.MediaMetadata.FileSizeBytes = types.FileSizeBytes(contentLength)
|
||||||
r.MediaMetadata.ContentType = types.ContentType(resp.Header.Get("Content-Type"))
|
r.MediaMetadata.ContentType = types.ContentType(resp.Header.Get("Content-Type"))
|
||||||
|
|
||||||
|
|
@ -728,7 +768,7 @@ func (r *downloadRequest) fetchRemoteFile(
|
||||||
// method of deduplicating files to save storage, as well as a way to conduct
|
// method of deduplicating files to save storage, as well as a way to conduct
|
||||||
// integrity checks on the file data in the repository.
|
// integrity checks on the file data in the repository.
|
||||||
// Data is truncated to maxFileSizeBytes. Content-Length was reported as 0 < Content-Length <= maxFileSizeBytes so this is OK.
|
// Data is truncated to maxFileSizeBytes. Content-Length was reported as 0 < Content-Length <= maxFileSizeBytes so this is OK.
|
||||||
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, resp.Body, maxFileSizeBytes, absBasePath)
|
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, reader, absBasePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Logger.WithError(err).WithFields(log.Fields{
|
r.Logger.WithError(err).WithFields(log.Fields{
|
||||||
"MaxFileSizeBytes": maxFileSizeBytes,
|
"MaxFileSizeBytes": maxFileSizeBytes,
|
||||||
|
|
@ -747,7 +787,7 @@ func (r *downloadRequest) fetchRemoteFile(
|
||||||
// The database is the source of truth so we need to have moved the file first
|
// The database is the source of truth so we need to have moved the file first
|
||||||
finalPath, duplicate, err := fileutils.MoveFileWithHashCheck(tmpDir, r.MediaMetadata, absBasePath, r.Logger)
|
finalPath, duplicate, err := fileutils.MoveFileWithHashCheck(tmpDir, r.MediaMetadata, absBasePath, r.Logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, errors.Wrap(err, "failed to move file")
|
return "", false, fmt.Errorf("fileutils.MoveFileWithHashCheck: %w", err)
|
||||||
}
|
}
|
||||||
if duplicate {
|
if duplicate {
|
||||||
r.Logger.WithField("dst", finalPath).Info("File was stored previously - discarding duplicate")
|
r.Logger.WithField("dst", finalPath).Info("File was stored previously - discarding duplicate")
|
||||||
|
|
|
||||||
|
|
@ -147,7 +147,7 @@ func (r *uploadRequest) doUpload(
|
||||||
// r.storeFileAndMetadata(ctx, tmpDir, ...)
|
// r.storeFileAndMetadata(ctx, tmpDir, ...)
|
||||||
// before you return from doUpload else we will leak a temp file. We could make this nicer with a `WithTransaction` style of
|
// before you return from doUpload else we will leak a temp file. We could make this nicer with a `WithTransaction` style of
|
||||||
// nested function to guarantee either storage or cleanup.
|
// nested function to guarantee either storage or cleanup.
|
||||||
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, reqReader, *cfg.MaxFileSizeBytes, cfg.AbsBasePath)
|
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, reqReader, cfg.AbsBasePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Logger.WithError(err).WithFields(log.Fields{
|
r.Logger.WithError(err).WithFields(log.Fields{
|
||||||
"MaxFileSizeBytes": *cfg.MaxFileSizeBytes,
|
"MaxFileSizeBytes": *cfg.MaxFileSizeBytes,
|
||||||
|
|
|
||||||
|
|
@ -75,7 +75,7 @@ const insertEventSQL = "" +
|
||||||
"INSERT INTO syncapi_output_room_events (" +
|
"INSERT INTO syncapi_output_room_events (" +
|
||||||
"room_id, event_id, headered_event_json, type, sender, contains_url, add_state_ids, remove_state_ids, session_id, transaction_id, exclude_from_sync" +
|
"room_id, event_id, headered_event_json, type, sender, contains_url, add_state_ids, remove_state_ids, session_id, transaction_id, exclude_from_sync" +
|
||||||
") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) " +
|
") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) " +
|
||||||
"ON CONFLICT ON CONSTRAINT syncapi_event_id_idx DO UPDATE SET exclude_from_sync = $11 " +
|
"ON CONFLICT ON CONSTRAINT syncapi_event_id_idx DO UPDATE SET exclude_from_sync = (excluded.exclude_from_sync AND $11) " +
|
||||||
"RETURNING id"
|
"RETURNING id"
|
||||||
|
|
||||||
const selectEventsSQL = "" +
|
const selectEventsSQL = "" +
|
||||||
|
|
|
||||||
|
|
@ -54,7 +54,7 @@ const insertEventSQL = "" +
|
||||||
"INSERT INTO syncapi_output_room_events (" +
|
"INSERT INTO syncapi_output_room_events (" +
|
||||||
"id, room_id, event_id, headered_event_json, type, sender, contains_url, add_state_ids, remove_state_ids, session_id, transaction_id, exclude_from_sync" +
|
"id, room_id, event_id, headered_event_json, type, sender, contains_url, add_state_ids, remove_state_ids, session_id, transaction_id, exclude_from_sync" +
|
||||||
") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) " +
|
") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) " +
|
||||||
"ON CONFLICT (event_id) DO UPDATE SET exclude_from_sync = $13"
|
"ON CONFLICT (event_id) DO UPDATE SET exclude_from_sync = (excluded.exclude_from_sync AND $13)"
|
||||||
|
|
||||||
const selectEventsSQL = "" +
|
const selectEventsSQL = "" +
|
||||||
"SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events WHERE event_id = $1"
|
"SELECT event_id, id, headered_event_json, session_id, exclude_from_sync, transaction_id FROM syncapi_output_room_events WHERE event_id = $1"
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue