azureblob/b2/dropbox/gcs/koofr/qingstor/s3: fix 0 length files

In 0386d22cc9 we introduced a test for 0 length files read the
way mount does.

This test failed on these backends which we fix up here.
s3-about
Nick Craig-Wood 2019-08-06 15:18:08 +01:00
parent 27a075e9fc
commit e502be475a
8 changed files with 31 additions and 13 deletions

View File

@ -13,6 +13,7 @@ import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"io" "io"
"log"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@ -1185,7 +1186,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if o.AccessTier() == azblob.AccessTierArchive { if o.AccessTier() == azblob.AccessTierArchive {
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first") return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
} }
fs.FixRangeOption(options, o.size)
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
case *fs.RangeOption: case *fs.RangeOption:
@ -1205,6 +1206,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
ac := azblob.BlobAccessConditions{} ac := azblob.BlobAccessConditions{}
var dowloadResponse *azblob.DownloadResponse var dowloadResponse *azblob.DownloadResponse
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
log.Printf("offset=%d, count=%v", offset, count)
dowloadResponse, err = blob.Download(ctx, offset, count, ac, false) dowloadResponse, err = blob.Download(ctx, offset, count, ac, false)
return o.fs.shouldRetry(err) return o.fs.shouldRetry(err)
}) })

View File

@ -1531,6 +1531,7 @@ var _ io.ReadCloser = &openFile{}
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Options: options, Options: options,

View File

@ -975,6 +975,7 @@ func (o *Object) Storable() bool {
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options) headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers} arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {

View File

@ -966,6 +966,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil { if err != nil {
return nil, err return nil, err
} }
fs.FixRangeOption(options, o.bytes)
fs.OpenOptionAddHTTPHeaders(req.Header, options) fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {

View File

@ -154,6 +154,7 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
var sOff, eOff int64 = 0, -1 var sOff, eOff int64 = 0, -1
fs.FixRangeOption(options, o.Size())
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
case *fs.SeekOption: case *fs.SeekOption:
@ -170,13 +171,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
if sOff == 0 && eOff < 0 { if sOff == 0 && eOff < 0 {
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath()) return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
} }
if sOff < 0 {
sOff = o.Size() - eOff
eOff = o.Size()
}
if eOff > o.Size() {
eOff = o.Size()
}
span := &koofrclient.FileSpan{ span := &koofrclient.FileSpan{
Start: sOff, Start: sOff,
End: eOff, End: eOff,

View File

@ -964,6 +964,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
key := o.fs.root + o.remote key := o.fs.root + o.remote
req := qs.GetObjectInput{} req := qs.GetObjectInput{}
fs.FixRangeOption(options, o.size)
for _, option := range options { for _, option := range options {
switch option.(type) { switch option.(type) {
case *fs.RangeOption, *fs.SeekOption: case *fs.RangeOption, *fs.SeekOption:

View File

@ -1765,6 +1765,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Bucket: &o.fs.bucket, Bucket: &o.fs.bucket,
Key: &key, Key: &key,
} }
fs.FixRangeOption(options, o.bytes)
for _, option := range options { for _, option := range options {
switch option.(type) { switch option.(type) {
case *fs.RangeOption, *fs.SeekOption: case *fs.RangeOption, *fs.SeekOption:

View File

@ -11,6 +11,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -288,12 +289,19 @@ func TestWriteFileModTimeWithOpenWriters(t *testing.T) {
} }
} }
func TestFileZeroLength(t *testing.T) { func testFileReadAt(t *testing.T, n int) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
vfs, fh := writeHandleCreate(t, r) vfs, fh := writeHandleCreate(t, r)
// Close the file without writing to it contents := []byte(random.String(n))
if n != 0 {
written, err := fh.Write(contents)
require.NoError(t, err)
assert.Equal(t, n, written)
}
// Close the file without writing to it if n==0
err := fh.Close() err := fh.Close()
if errors.Cause(err) == fs.ErrorCantUploadEmptyFiles { if errors.Cause(err) == fs.ErrorCantUploadEmptyFiles {
t.Logf("skipping test: %v", err) t.Logf("skipping test: %v", err)
@ -301,18 +309,27 @@ func TestFileZeroLength(t *testing.T) {
} }
assert.NoError(t, err) assert.NoError(t, err)
// read the 0 length file back in using ReadAt into a buffer // read the file back in using ReadAt into a buffer
// this simulates what mount does // this simulates what mount does
rd, err := vfs.OpenFile("file1", os.O_RDONLY, 0) rd, err := vfs.OpenFile("file1", os.O_RDONLY, 0)
require.NoError(t, err) require.NoError(t, err)
buf := make([]byte, 1024) buf := make([]byte, 1024)
n, err := rd.ReadAt(buf, 0) read, err := rd.ReadAt(buf, 0)
if err != io.EOF { if err != io.EOF {
assert.NoError(t, err) assert.NoError(t, err)
} }
assert.Equal(t, 0, n) assert.Equal(t, read, n)
assert.Equal(t, contents, buf[:read])
err = rd.Close() err = rd.Close()
assert.NoError(t, err) assert.NoError(t, err)
} }
func TestFileReadAtZeroLength(t *testing.T) {
testFileReadAt(t, 0)
}
func TestFileReadAtNonZeroLength(t *testing.T) {
testFileReadAt(t, 100)
}