backend/s3: use low-level-retries as the number of SDK retries

Amazon S3 is built to handle different kinds of workloads.
In rare cases where S3 is not able to scale for whatever reason users
will face status 500 errors.
Main mechanism for handling these errors are retries.
Amount of needed retries varies for each different use case.

This change is making retries for s3 backend configurable by using
--low-level-retries option.
s3-about
Aleksandar Janković 2020-02-24 16:43:45 +01:00 committed by GitHub
parent ac9cb50fdb
commit 5470d34740
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 7 additions and 4 deletions

View File

@ -850,7 +850,6 @@ In Ceph, this can be increased with the "rgw list buckets max chunk" option.
Default: memoryPoolFlushTime,
Advanced: true,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
@ -866,7 +865,6 @@ This option controls how often unused buffers will be removed from the pool.`,
const (
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
@ -1095,7 +1093,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
opt.ForcePathStyle = false
}
awsConfig := aws.NewConfig().
WithMaxRetries(maxRetries).
WithMaxRetries(fs.Config.LowLevelRetries).
WithCredentials(cred).
WithHTTPClient(fshttp.NewClient(fs.Config)).
WithS3ForcePathStyle(opt.ForcePathStyle).
@ -1202,12 +1200,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
pc := fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep)))
// Set pacer retries to 0 because we are relying on SDK retry mechanism.
// Setting it to 1 because in context of pacer it means 1 attempt.
pc.SetRetries(1)
f := &Fs{
name: name,
opt: *opt,
c: c,
ses: ses,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
pacer: pc,
cache: bucket.NewCache(),
srv: fshttp.NewClient(fs.Config),
pools: make(map[int64]*pool.Pool),