diff --git a/src/github.com/matrix-org/dendrite/cmd/dendrite-media-api-server/main.go b/src/github.com/matrix-org/dendrite/cmd/dendrite-media-api-server/main.go index 3895348db..3dcf74b12 100644 --- a/src/github.com/matrix-org/dendrite/cmd/dendrite-media-api-server/main.go +++ b/src/github.com/matrix-org/dendrite/cmd/dendrite-media-api-server/main.go @@ -50,10 +50,10 @@ func main() { } cfg := &config.MediaAPI{ - ServerName: types.ServerName(serverName), - BasePath: types.Path(basePath), - MaxFileSize: 10 * 1024 * 1024, - DataSource: dataSource, + ServerName: types.ServerName(serverName), + BasePath: types.Path(basePath), + MaxFileSizeBytes: 10 * 1024 * 1024, + DataSource: dataSource, } db, err := storage.Open(cfg.DataSource) diff --git a/src/github.com/matrix-org/dendrite/mediaapi/config/config.go b/src/github.com/matrix-org/dendrite/mediaapi/config/config.go index 2002cd86a..060964d27 100644 --- a/src/github.com/matrix-org/dendrite/mediaapi/config/config.go +++ b/src/github.com/matrix-org/dendrite/mediaapi/config/config.go @@ -24,7 +24,7 @@ type MediaAPI struct { BasePath types.Path `yaml:"base_path"` // The maximum file size in bytes that is allowed to be stored on this server. // Note that remote files larger than this can still be proxied to a client, they will just not be cached. - MaxFileSize types.ContentLength `yaml:"base_path"` + MaxFileSizeBytes types.ContentLength `yaml:"base_path"` // The postgres connection config for connecting to the database e.g a postgres:// URI DataSource string `yaml:"database"` } diff --git a/src/github.com/matrix-org/dendrite/mediaapi/writers/download.go b/src/github.com/matrix-org/dendrite/mediaapi/writers/download.go index ac8aaff01..e72fcae07 100644 --- a/src/github.com/matrix-org/dendrite/mediaapi/writers/download.go +++ b/src/github.com/matrix-org/dendrite/mediaapi/writers/download.go @@ -163,7 +163,7 @@ func Download(w http.ResponseWriter, req *http.Request, origin types.ServerName, } } - r.respondFromRemoteFile(w, cfg.BasePath, cfg.MaxFileSize, db, activeRemoteRequests) + r.respondFromRemoteFile(w, cfg.BasePath, cfg.MaxFileSizeBytes, db, activeRemoteRequests) } else { // If we do not have a record and the origin is local, or if we have another error from the database, the file is not found r.Logger.Warnln("Failed to look up file in database:", err) @@ -280,8 +280,8 @@ func (r *downloadRequest) createRemoteRequest() (*http.Response, *util.JSONRespo // copyToActiveAndPassive works like io.Copy except it copies from the reader to both of the writers // If there is an error with the reader or the active writer, that is considered an error // If there is an error with the passive writer, that is non-critical and copying continues -// maxFileSize limits the amount of data written to the passive writer -func copyToActiveAndPassive(r io.Reader, wActive io.Writer, wPassive io.Writer, maxFileSize types.ContentLength, mediaMetadata *types.MediaMetadata) (int64, int64, error) { +// maxFileSizeBytes limits the amount of data written to the passive writer +func copyToActiveAndPassive(r io.Reader, wActive io.Writer, wPassive io.Writer, maxFileSizeBytes types.ContentLength, mediaMetadata *types.MediaMetadata) (int64, int64, error) { var bytesResponded, bytesWritten int64 = 0, 0 var copyError error // Note: the buffer size is the same as is used in io.Copy() @@ -301,8 +301,8 @@ func copyToActiveAndPassive(r io.Reader, wActive io.Writer, wPassive io.Writer, // Note: if we get here then copyError != errFileIsTooLarge && copyError != errWrite // as if copyError == errResponse || copyError == errWrite then we would have broken // out of the loop and there are no other cases - // if larger than maxFileSize then stop writing to disk and discard cached file - if bytesWritten+int64(len(buffer)) > int64(maxFileSize) { + // if larger than maxFileSizeBytes then stop writing to disk and discard cached file + if bytesWritten+int64(len(buffer)) > int64(maxFileSizeBytes) { copyError = errFileIsTooLarge } else { // write to disk @@ -400,7 +400,7 @@ func (r *downloadRequest) commitFileAndMetadata(tmpDir types.Path, basePath type return updateActiveRemoteRequests } -func (r *downloadRequest) respondFromRemoteFile(w http.ResponseWriter, basePath types.Path, maxFileSize types.ContentLength, db *storage.Database, activeRemoteRequests *types.ActiveRemoteRequests) { +func (r *downloadRequest) respondFromRemoteFile(w http.ResponseWriter, basePath types.Path, maxFileSizeBytes types.ContentLength, db *storage.Database, activeRemoteRequests *types.ActiveRemoteRequests) { r.Logger.WithFields(log.Fields{ "MediaID": r.MediaMetadata.MediaID, "Origin": r.MediaMetadata.Origin, @@ -472,7 +472,7 @@ func (r *downloadRequest) respondFromRemoteFile(w http.ResponseWriter, basePath // bytesResponded is the total number of bytes written to the response to the client request // bytesWritten is the total number of bytes written to disk - bytesResponded, bytesWritten, fetchError := copyToActiveAndPassive(resp.Body, w, tmpFileWriter, maxFileSize, r.MediaMetadata) + bytesResponded, bytesWritten, fetchError := copyToActiveAndPassive(resp.Body, w, tmpFileWriter, maxFileSizeBytes, r.MediaMetadata) tmpFileWriter.Flush() if fetchError != nil { logFields := log.Fields{ @@ -480,7 +480,7 @@ func (r *downloadRequest) respondFromRemoteFile(w http.ResponseWriter, basePath "Origin": r.MediaMetadata.Origin, } if fetchError == errFileIsTooLarge { - logFields["MaxFileSize"] = maxFileSize + logFields["MaxFileSizeBytes"] = maxFileSizeBytes } r.Logger.WithFields(logFields).Warnln(fetchError) tmpDirErr := os.RemoveAll(string(tmpDir)) diff --git a/src/github.com/matrix-org/dendrite/mediaapi/writers/upload.go b/src/github.com/matrix-org/dendrite/mediaapi/writers/upload.go index 37749ca10..a5085d595 100644 --- a/src/github.com/matrix-org/dendrite/mediaapi/writers/upload.go +++ b/src/github.com/matrix-org/dendrite/mediaapi/writers/upload.go @@ -43,7 +43,7 @@ type uploadRequest struct { } // Validate validates the uploadRequest fields -func (r uploadRequest) Validate(maxFileSize types.ContentLength) *util.JSONResponse { +func (r uploadRequest) Validate(maxFileSizeBytes types.ContentLength) *util.JSONResponse { // TODO: Any validation to be done on ContentDisposition? if r.MediaMetadata.ContentLength < 1 { @@ -52,10 +52,10 @@ func (r uploadRequest) Validate(maxFileSize types.ContentLength) *util.JSONRespo JSON: jsonerror.Unknown("HTTP Content-Length request header must be greater than zero."), } } - if maxFileSize > 0 && r.MediaMetadata.ContentLength > maxFileSize { + if maxFileSizeBytes > 0 && r.MediaMetadata.ContentLength > maxFileSizeBytes { return &util.JSONResponse{ Code: 400, - JSON: jsonerror.Unknown(fmt.Sprintf("HTTP Content-Length is greater than the maximum allowed upload size (%v).", maxFileSize)), + JSON: jsonerror.Unknown(fmt.Sprintf("HTTP Content-Length is greater than the maximum allowed upload size (%v).", maxFileSizeBytes)), } } // TODO: Check if the Content-Type is a valid type? @@ -135,7 +135,7 @@ func Upload(req *http.Request, cfg *config.MediaAPI, db *storage.Database) util. }, } - if resErr = r.Validate(cfg.MaxFileSize); resErr != nil { + if resErr = r.Validate(cfg.MaxFileSizeBytes); resErr != nil { return *resErr } @@ -161,7 +161,7 @@ func Upload(req *http.Request, cfg *config.MediaAPI, db *storage.Database) util. // The limited reader restricts how many bytes are read from the body to the specified maximum bytes // Note: the golang HTTP server closes the request body - limitedBody := io.LimitReader(req.Body, int64(cfg.MaxFileSize)) + limitedBody := io.LimitReader(req.Body, int64(cfg.MaxFileSizeBytes)) hasher := sha256.New() reader := io.TeeReader(limitedBody, hasher)