Let LimitReader read MaxFileSizeBytes + 1

This commit is contained in:
Till Faelligen 2021-04-27 07:14:07 +02:00
parent a28af5b750
commit d27d4c9f2e
2 changed files with 21 additions and 5 deletions

View file

@ -147,7 +147,7 @@ func (r *uploadRequest) doUpload(
// r.storeFileAndMetadata(ctx, tmpDir, ...)
// before you return from doUpload else we will leak a temp file. We could make this nicer with a `WithTransaction` style of
// nested function to guarantee either storage or cleanup.
lr := io.LimitReader(reqReader, int64(*cfg.MaxFileSizeBytes))
lr := io.LimitReader(reqReader, int64(*cfg.MaxFileSizeBytes)+1)
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, lr, cfg.AbsBasePath)
if err != nil {
r.Logger.WithError(err).WithFields(log.Fields{
@ -159,9 +159,8 @@ func (r *uploadRequest) doUpload(
}
}
// Check if temp file size is greater (should not happen, LimitReader stops when the defined size is reached.)
// or equal to the max file size configuration.
if bytesWritten >= types.FileSizeBytes(*cfg.MaxFileSizeBytes) {
// Check if temp file size exceeds max file size configuration
if bytesWritten > types.FileSizeBytes(*cfg.MaxFileSizeBytes) {
fileutils.RemoveDir(tmpDir, r.Logger) // delete temp file
return requestEntityTooLargeJSONResponse(*cfg.MaxFileSizeBytes)
}

View file

@ -83,6 +83,23 @@ func Test_uploadRequest_doUpload(t *testing.T) {
},
want: nil,
},
{
name: "upload ok (exact size)",
args: args{
ctx: context.Background(),
reqReader: strings.NewReader("testtest"),
cfg: cfg,
db: db,
},
fields: fields{
Logger: logger,
MediaMetadata: &types.MediaMetadata{
MediaID: "1338",
UploadName: "test ok (exact size)",
},
},
want: nil,
},
{
name: "upload not ok",
args: args{
@ -94,7 +111,7 @@ func Test_uploadRequest_doUpload(t *testing.T) {
fields: fields{
Logger: logger,
MediaMetadata: &types.MediaMetadata{
MediaID: "1337",
MediaID: "1339",
UploadName: "test fail",
},
},