vendor: update github.com/t3rm1n4l/go-mega to fix mega "illegal base64 data at input byte 22"

Thanks to Ajaja for figuring this out.

See: https://forum.rclone.org/t/problem-to-login-with-mega/12276
s3-about
Nick Craig-Wood 2020-01-11 16:47:06 +00:00
parent 87c844bce1
commit 92662baceb
41 changed files with 643 additions and 5107 deletions

4
go.mod
View File

@ -50,14 +50,14 @@ require (
github.com/spf13/cobra v0.0.5
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.4.0
github.com/t3rm1n4l/go-mega v0.0.0-20191014094753-e8695d78299a
github.com/t3rm1n4l/go-mega v0.0.0-20200111163430-ad0abe77ec81
github.com/xanzy/ssh-agent v0.2.1
github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60
github.com/yunify/qingstor-sdk-go/v3 v3.1.1
go.etcd.io/bbolt v1.3.3 // indirect
go.opencensus.io v0.22.2 // indirect
goftp.io/server v0.0.0-20190812052725-72a57b186803
golang.org/x/crypto v0.0.0-20191108234033-bd318be0434a
golang.org/x/crypto v0.0.0-20200109152110-61a87790db17
golang.org/x/net v0.0.0-20191109021931-daa7c04131f5
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e

13
go.sum
View File

@ -1,5 +1,3 @@
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669 h1:FNCRpXiquG1aoyqcIWVFmpTSKVcx2bQD38uZZeGtdlw=
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
bazil.org/fuse v0.0.0-20191225233854-3a99aca11732 h1:gaB1+kZCJDExjlrdy37gIwxV0M7v81EzIFKQZ5o5YV0=
bazil.org/fuse v0.0.0-20191225233854-3a99aca11732/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
@ -253,9 +251,10 @@ github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709 h1:Ko2LQMrRU+Oy
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/t3rm1n4l/go-mega v0.0.0-20191014094753-e8695d78299a h1:9VwG6wBA1jd6oOCnmQ/OaKM1GRfChadtH5N3bx1oSKE=
github.com/t3rm1n4l/go-mega v0.0.0-20191014094753-e8695d78299a/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
github.com/t3rm1n4l/go-mega v0.0.0-20200111163430-ad0abe77ec81 h1:VEZ6xZtFfxY5urHL/baBvXKSJBYTc9c51WhPprpWKjQ=
github.com/t3rm1n4l/go-mega v0.0.0-20200111163430-ad0abe77ec81/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
@ -285,8 +284,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191108234033-bd318be0434a h1:R/qVym5WAxsZWQqZCwDY/8sdVKV1m1WgU4/S5IRQAzc=
golang.org/x/crypto v0.0.0-20191108234033-bd318be0434a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200109152110-61a87790db17 h1:nVJ3guKA9qdkEQ3TUdXI9QSINo2CUPM/cySEvw2w8I0=
golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -347,8 +346,6 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 h1:dHtDnRWQtSx0Hjq9kvKFpBh9uPPKfQN70NZZmvssGwk=
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ=
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

View File

@ -1,443 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package s3iface provides an interface to enable mocking the Amazon Simple Storage Service service client
// for testing your code.
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters.
package s3iface
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
)
// S3API provides an interface to enable mocking the
// s3.S3 service client's API operation,
// paginators, and waiters. This make unit testing your code that calls out
// to the SDK's service client's calls easier.
//
// The best way to use this interface is so the SDK's service client's calls
// can be stubbed out for unit testing your code with the SDK without needing
// to inject custom request handlers into the SDK's request pipeline.
//
// // myFunc uses an SDK service client to make a request to
// // Amazon Simple Storage Service.
// func myFunc(svc s3iface.S3API) bool {
// // Make svc.AbortMultipartUpload request
// }
//
// func main() {
// sess := session.New()
// svc := s3.New(sess)
//
// myFunc(svc)
// }
//
// In your _test.go file:
//
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockS3Client struct {
// s3iface.S3API
// }
// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) {
// // mock response/functionality
// }
//
// func TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockS3Client{}
//
// myfunc(mockSvc)
//
// // Verify myFunc's functionality
// }
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters. Its suggested to use the pattern above for testing, or using
// tooling to generate mocks to satisfy the interfaces.
type S3API interface {
AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error)
AbortMultipartUploadWithContext(aws.Context, *s3.AbortMultipartUploadInput, ...request.Option) (*s3.AbortMultipartUploadOutput, error)
AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput)
CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
CompleteMultipartUploadWithContext(aws.Context, *s3.CompleteMultipartUploadInput, ...request.Option) (*s3.CompleteMultipartUploadOutput, error)
CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput)
CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
CopyObjectWithContext(aws.Context, *s3.CopyObjectInput, ...request.Option) (*s3.CopyObjectOutput, error)
CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput)
CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error)
CreateBucketWithContext(aws.Context, *s3.CreateBucketInput, ...request.Option) (*s3.CreateBucketOutput, error)
CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput)
CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
CreateMultipartUploadWithContext(aws.Context, *s3.CreateMultipartUploadInput, ...request.Option) (*s3.CreateMultipartUploadOutput, error)
CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput)
DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error)
DeleteBucketWithContext(aws.Context, *s3.DeleteBucketInput, ...request.Option) (*s3.DeleteBucketOutput, error)
DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput)
DeleteBucketAnalyticsConfiguration(*s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error)
DeleteBucketAnalyticsConfigurationWithContext(aws.Context, *s3.DeleteBucketAnalyticsConfigurationInput, ...request.Option) (*s3.DeleteBucketAnalyticsConfigurationOutput, error)
DeleteBucketAnalyticsConfigurationRequest(*s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput)
DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error)
DeleteBucketCorsWithContext(aws.Context, *s3.DeleteBucketCorsInput, ...request.Option) (*s3.DeleteBucketCorsOutput, error)
DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput)
DeleteBucketEncryption(*s3.DeleteBucketEncryptionInput) (*s3.DeleteBucketEncryptionOutput, error)
DeleteBucketEncryptionWithContext(aws.Context, *s3.DeleteBucketEncryptionInput, ...request.Option) (*s3.DeleteBucketEncryptionOutput, error)
DeleteBucketEncryptionRequest(*s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput)
DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error)
DeleteBucketInventoryConfigurationWithContext(aws.Context, *s3.DeleteBucketInventoryConfigurationInput, ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error)
DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput)
DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error)
DeleteBucketLifecycleWithContext(aws.Context, *s3.DeleteBucketLifecycleInput, ...request.Option) (*s3.DeleteBucketLifecycleOutput, error)
DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput)
DeleteBucketMetricsConfiguration(*s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error)
DeleteBucketMetricsConfigurationWithContext(aws.Context, *s3.DeleteBucketMetricsConfigurationInput, ...request.Option) (*s3.DeleteBucketMetricsConfigurationOutput, error)
DeleteBucketMetricsConfigurationRequest(*s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput)
DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error)
DeleteBucketPolicyWithContext(aws.Context, *s3.DeleteBucketPolicyInput, ...request.Option) (*s3.DeleteBucketPolicyOutput, error)
DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput)
DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error)
DeleteBucketReplicationWithContext(aws.Context, *s3.DeleteBucketReplicationInput, ...request.Option) (*s3.DeleteBucketReplicationOutput, error)
DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput)
DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error)
DeleteBucketTaggingWithContext(aws.Context, *s3.DeleteBucketTaggingInput, ...request.Option) (*s3.DeleteBucketTaggingOutput, error)
DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput)
DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error)
DeleteBucketWebsiteWithContext(aws.Context, *s3.DeleteBucketWebsiteInput, ...request.Option) (*s3.DeleteBucketWebsiteOutput, error)
DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput)
DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
DeleteObjectWithContext(aws.Context, *s3.DeleteObjectInput, ...request.Option) (*s3.DeleteObjectOutput, error)
DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput)
DeleteObjectTagging(*s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error)
DeleteObjectTaggingWithContext(aws.Context, *s3.DeleteObjectTaggingInput, ...request.Option) (*s3.DeleteObjectTaggingOutput, error)
DeleteObjectTaggingRequest(*s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput)
DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error)
DeleteObjectsWithContext(aws.Context, *s3.DeleteObjectsInput, ...request.Option) (*s3.DeleteObjectsOutput, error)
DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput)
DeletePublicAccessBlock(*s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error)
DeletePublicAccessBlockWithContext(aws.Context, *s3.DeletePublicAccessBlockInput, ...request.Option) (*s3.DeletePublicAccessBlockOutput, error)
DeletePublicAccessBlockRequest(*s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput)
GetBucketAccelerateConfiguration(*s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error)
GetBucketAccelerateConfigurationWithContext(aws.Context, *s3.GetBucketAccelerateConfigurationInput, ...request.Option) (*s3.GetBucketAccelerateConfigurationOutput, error)
GetBucketAccelerateConfigurationRequest(*s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput)
GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error)
GetBucketAclWithContext(aws.Context, *s3.GetBucketAclInput, ...request.Option) (*s3.GetBucketAclOutput, error)
GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput)
GetBucketAnalyticsConfiguration(*s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error)
GetBucketAnalyticsConfigurationWithContext(aws.Context, *s3.GetBucketAnalyticsConfigurationInput, ...request.Option) (*s3.GetBucketAnalyticsConfigurationOutput, error)
GetBucketAnalyticsConfigurationRequest(*s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput)
GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error)
GetBucketCorsWithContext(aws.Context, *s3.GetBucketCorsInput, ...request.Option) (*s3.GetBucketCorsOutput, error)
GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput)
GetBucketEncryption(*s3.GetBucketEncryptionInput) (*s3.GetBucketEncryptionOutput, error)
GetBucketEncryptionWithContext(aws.Context, *s3.GetBucketEncryptionInput, ...request.Option) (*s3.GetBucketEncryptionOutput, error)
GetBucketEncryptionRequest(*s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput)
GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error)
GetBucketInventoryConfigurationWithContext(aws.Context, *s3.GetBucketInventoryConfigurationInput, ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error)
GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput)
GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error)
GetBucketLifecycleWithContext(aws.Context, *s3.GetBucketLifecycleInput, ...request.Option) (*s3.GetBucketLifecycleOutput, error)
GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput)
GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error)
GetBucketLifecycleConfigurationWithContext(aws.Context, *s3.GetBucketLifecycleConfigurationInput, ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error)
GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput)
GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error)
GetBucketLocationWithContext(aws.Context, *s3.GetBucketLocationInput, ...request.Option) (*s3.GetBucketLocationOutput, error)
GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput)
GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error)
GetBucketLoggingWithContext(aws.Context, *s3.GetBucketLoggingInput, ...request.Option) (*s3.GetBucketLoggingOutput, error)
GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput)
GetBucketMetricsConfiguration(*s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error)
GetBucketMetricsConfigurationWithContext(aws.Context, *s3.GetBucketMetricsConfigurationInput, ...request.Option) (*s3.GetBucketMetricsConfigurationOutput, error)
GetBucketMetricsConfigurationRequest(*s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput)
GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error)
GetBucketNotificationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfigurationDeprecated, error)
GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated)
GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error)
GetBucketNotificationConfigurationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfiguration, error)
GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration)
GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error)
GetBucketPolicyWithContext(aws.Context, *s3.GetBucketPolicyInput, ...request.Option) (*s3.GetBucketPolicyOutput, error)
GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput)
GetBucketPolicyStatus(*s3.GetBucketPolicyStatusInput) (*s3.GetBucketPolicyStatusOutput, error)
GetBucketPolicyStatusWithContext(aws.Context, *s3.GetBucketPolicyStatusInput, ...request.Option) (*s3.GetBucketPolicyStatusOutput, error)
GetBucketPolicyStatusRequest(*s3.GetBucketPolicyStatusInput) (*request.Request, *s3.GetBucketPolicyStatusOutput)
GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error)
GetBucketReplicationWithContext(aws.Context, *s3.GetBucketReplicationInput, ...request.Option) (*s3.GetBucketReplicationOutput, error)
GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput)
GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error)
GetBucketRequestPaymentWithContext(aws.Context, *s3.GetBucketRequestPaymentInput, ...request.Option) (*s3.GetBucketRequestPaymentOutput, error)
GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput)
GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error)
GetBucketTaggingWithContext(aws.Context, *s3.GetBucketTaggingInput, ...request.Option) (*s3.GetBucketTaggingOutput, error)
GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput)
GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error)
GetBucketVersioningWithContext(aws.Context, *s3.GetBucketVersioningInput, ...request.Option) (*s3.GetBucketVersioningOutput, error)
GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput)
GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error)
GetBucketWebsiteWithContext(aws.Context, *s3.GetBucketWebsiteInput, ...request.Option) (*s3.GetBucketWebsiteOutput, error)
GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput)
GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error)
GetObjectWithContext(aws.Context, *s3.GetObjectInput, ...request.Option) (*s3.GetObjectOutput, error)
GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput)
GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
GetObjectAclWithContext(aws.Context, *s3.GetObjectAclInput, ...request.Option) (*s3.GetObjectAclOutput, error)
GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput)
GetObjectLegalHold(*s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error)
GetObjectLegalHoldWithContext(aws.Context, *s3.GetObjectLegalHoldInput, ...request.Option) (*s3.GetObjectLegalHoldOutput, error)
GetObjectLegalHoldRequest(*s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput)
GetObjectLockConfiguration(*s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error)
GetObjectLockConfigurationWithContext(aws.Context, *s3.GetObjectLockConfigurationInput, ...request.Option) (*s3.GetObjectLockConfigurationOutput, error)
GetObjectLockConfigurationRequest(*s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput)
GetObjectRetention(*s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error)
GetObjectRetentionWithContext(aws.Context, *s3.GetObjectRetentionInput, ...request.Option) (*s3.GetObjectRetentionOutput, error)
GetObjectRetentionRequest(*s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput)
GetObjectTagging(*s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error)
GetObjectTaggingWithContext(aws.Context, *s3.GetObjectTaggingInput, ...request.Option) (*s3.GetObjectTaggingOutput, error)
GetObjectTaggingRequest(*s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput)
GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error)
GetObjectTorrentWithContext(aws.Context, *s3.GetObjectTorrentInput, ...request.Option) (*s3.GetObjectTorrentOutput, error)
GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput)
GetPublicAccessBlock(*s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error)
GetPublicAccessBlockWithContext(aws.Context, *s3.GetPublicAccessBlockInput, ...request.Option) (*s3.GetPublicAccessBlockOutput, error)
GetPublicAccessBlockRequest(*s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput)
HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
HeadBucketWithContext(aws.Context, *s3.HeadBucketInput, ...request.Option) (*s3.HeadBucketOutput, error)
HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput)
HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
HeadObjectWithContext(aws.Context, *s3.HeadObjectInput, ...request.Option) (*s3.HeadObjectOutput, error)
HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput)
ListBucketAnalyticsConfigurations(*s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error)
ListBucketAnalyticsConfigurationsWithContext(aws.Context, *s3.ListBucketAnalyticsConfigurationsInput, ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error)
ListBucketAnalyticsConfigurationsRequest(*s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput)
ListBucketInventoryConfigurations(*s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error)
ListBucketInventoryConfigurationsWithContext(aws.Context, *s3.ListBucketInventoryConfigurationsInput, ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error)
ListBucketInventoryConfigurationsRequest(*s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput)
ListBucketMetricsConfigurations(*s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error)
ListBucketMetricsConfigurationsWithContext(aws.Context, *s3.ListBucketMetricsConfigurationsInput, ...request.Option) (*s3.ListBucketMetricsConfigurationsOutput, error)
ListBucketMetricsConfigurationsRequest(*s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput)
ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error)
ListBucketsWithContext(aws.Context, *s3.ListBucketsInput, ...request.Option) (*s3.ListBucketsOutput, error)
ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput)
ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error)
ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput)
ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error
ListMultipartUploadsPagesWithContext(aws.Context, *s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool, ...request.Option) error
ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error)
ListObjectVersionsWithContext(aws.Context, *s3.ListObjectVersionsInput, ...request.Option) (*s3.ListObjectVersionsOutput, error)
ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput)
ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error
ListObjectVersionsPagesWithContext(aws.Context, *s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool, ...request.Option) error
ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error)
ListObjectsWithContext(aws.Context, *s3.ListObjectsInput, ...request.Option) (*s3.ListObjectsOutput, error)
ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput)
ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error
ListObjectsPagesWithContext(aws.Context, *s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool, ...request.Option) error
ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
ListObjectsV2WithContext(aws.Context, *s3.ListObjectsV2Input, ...request.Option) (*s3.ListObjectsV2Output, error)
ListObjectsV2Request(*s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output)
ListObjectsV2Pages(*s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool) error
ListObjectsV2PagesWithContext(aws.Context, *s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool, ...request.Option) error
ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error)
ListPartsWithContext(aws.Context, *s3.ListPartsInput, ...request.Option) (*s3.ListPartsOutput, error)
ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput)
ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error
ListPartsPagesWithContext(aws.Context, *s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool, ...request.Option) error
PutBucketAccelerateConfiguration(*s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error)
PutBucketAccelerateConfigurationWithContext(aws.Context, *s3.PutBucketAccelerateConfigurationInput, ...request.Option) (*s3.PutBucketAccelerateConfigurationOutput, error)
PutBucketAccelerateConfigurationRequest(*s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput)
PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error)
PutBucketAclWithContext(aws.Context, *s3.PutBucketAclInput, ...request.Option) (*s3.PutBucketAclOutput, error)
PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput)
PutBucketAnalyticsConfiguration(*s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error)
PutBucketAnalyticsConfigurationWithContext(aws.Context, *s3.PutBucketAnalyticsConfigurationInput, ...request.Option) (*s3.PutBucketAnalyticsConfigurationOutput, error)
PutBucketAnalyticsConfigurationRequest(*s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput)
PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error)
PutBucketCorsWithContext(aws.Context, *s3.PutBucketCorsInput, ...request.Option) (*s3.PutBucketCorsOutput, error)
PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput)
PutBucketEncryption(*s3.PutBucketEncryptionInput) (*s3.PutBucketEncryptionOutput, error)
PutBucketEncryptionWithContext(aws.Context, *s3.PutBucketEncryptionInput, ...request.Option) (*s3.PutBucketEncryptionOutput, error)
PutBucketEncryptionRequest(*s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput)
PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error)
PutBucketInventoryConfigurationWithContext(aws.Context, *s3.PutBucketInventoryConfigurationInput, ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error)
PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput)
PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error)
PutBucketLifecycleWithContext(aws.Context, *s3.PutBucketLifecycleInput, ...request.Option) (*s3.PutBucketLifecycleOutput, error)
PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput)
PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error)
PutBucketLifecycleConfigurationWithContext(aws.Context, *s3.PutBucketLifecycleConfigurationInput, ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error)
PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput)
PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error)
PutBucketLoggingWithContext(aws.Context, *s3.PutBucketLoggingInput, ...request.Option) (*s3.PutBucketLoggingOutput, error)
PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput)
PutBucketMetricsConfiguration(*s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error)
PutBucketMetricsConfigurationWithContext(aws.Context, *s3.PutBucketMetricsConfigurationInput, ...request.Option) (*s3.PutBucketMetricsConfigurationOutput, error)
PutBucketMetricsConfigurationRequest(*s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput)
PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error)
PutBucketNotificationWithContext(aws.Context, *s3.PutBucketNotificationInput, ...request.Option) (*s3.PutBucketNotificationOutput, error)
PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput)
PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error)
PutBucketNotificationConfigurationWithContext(aws.Context, *s3.PutBucketNotificationConfigurationInput, ...request.Option) (*s3.PutBucketNotificationConfigurationOutput, error)
PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput)
PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error)
PutBucketPolicyWithContext(aws.Context, *s3.PutBucketPolicyInput, ...request.Option) (*s3.PutBucketPolicyOutput, error)
PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput)
PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error)
PutBucketReplicationWithContext(aws.Context, *s3.PutBucketReplicationInput, ...request.Option) (*s3.PutBucketReplicationOutput, error)
PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput)
PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error)
PutBucketRequestPaymentWithContext(aws.Context, *s3.PutBucketRequestPaymentInput, ...request.Option) (*s3.PutBucketRequestPaymentOutput, error)
PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput)
PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error)
PutBucketTaggingWithContext(aws.Context, *s3.PutBucketTaggingInput, ...request.Option) (*s3.PutBucketTaggingOutput, error)
PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput)
PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error)
PutBucketVersioningWithContext(aws.Context, *s3.PutBucketVersioningInput, ...request.Option) (*s3.PutBucketVersioningOutput, error)
PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput)
PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error)
PutBucketWebsiteWithContext(aws.Context, *s3.PutBucketWebsiteInput, ...request.Option) (*s3.PutBucketWebsiteOutput, error)
PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput)
PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error)
PutObjectWithContext(aws.Context, *s3.PutObjectInput, ...request.Option) (*s3.PutObjectOutput, error)
PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput)
PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error)
PutObjectAclWithContext(aws.Context, *s3.PutObjectAclInput, ...request.Option) (*s3.PutObjectAclOutput, error)
PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput)
PutObjectLegalHold(*s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error)
PutObjectLegalHoldWithContext(aws.Context, *s3.PutObjectLegalHoldInput, ...request.Option) (*s3.PutObjectLegalHoldOutput, error)
PutObjectLegalHoldRequest(*s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput)
PutObjectLockConfiguration(*s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error)
PutObjectLockConfigurationWithContext(aws.Context, *s3.PutObjectLockConfigurationInput, ...request.Option) (*s3.PutObjectLockConfigurationOutput, error)
PutObjectLockConfigurationRequest(*s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput)
PutObjectRetention(*s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error)
PutObjectRetentionWithContext(aws.Context, *s3.PutObjectRetentionInput, ...request.Option) (*s3.PutObjectRetentionOutput, error)
PutObjectRetentionRequest(*s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput)
PutObjectTagging(*s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error)
PutObjectTaggingWithContext(aws.Context, *s3.PutObjectTaggingInput, ...request.Option) (*s3.PutObjectTaggingOutput, error)
PutObjectTaggingRequest(*s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput)
PutPublicAccessBlock(*s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error)
PutPublicAccessBlockWithContext(aws.Context, *s3.PutPublicAccessBlockInput, ...request.Option) (*s3.PutPublicAccessBlockOutput, error)
PutPublicAccessBlockRequest(*s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput)
RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error)
RestoreObjectWithContext(aws.Context, *s3.RestoreObjectInput, ...request.Option) (*s3.RestoreObjectOutput, error)
RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput)
SelectObjectContent(*s3.SelectObjectContentInput) (*s3.SelectObjectContentOutput, error)
SelectObjectContentWithContext(aws.Context, *s3.SelectObjectContentInput, ...request.Option) (*s3.SelectObjectContentOutput, error)
SelectObjectContentRequest(*s3.SelectObjectContentInput) (*request.Request, *s3.SelectObjectContentOutput)
UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error)
UploadPartWithContext(aws.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error)
UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput)
UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
UploadPartCopyWithContext(aws.Context, *s3.UploadPartCopyInput, ...request.Option) (*s3.UploadPartCopyOutput, error)
UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput)
WaitUntilBucketExists(*s3.HeadBucketInput) error
WaitUntilBucketExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error
WaitUntilBucketNotExists(*s3.HeadBucketInput) error
WaitUntilBucketNotExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error
WaitUntilObjectExists(*s3.HeadObjectInput) error
WaitUntilObjectExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error
WaitUntilObjectNotExists(*s3.HeadObjectInput) error
WaitUntilObjectNotExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error
}
var _ S3API = (*s3.S3)(nil)

View File

@ -1,529 +0,0 @@
package s3manager
import (
"bytes"
"fmt"
"io"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
)
const (
// DefaultBatchSize is the batch size we initialize when constructing a batch delete client.
// This value is used when calling DeleteObjects. This represents how many objects to delete
// per DeleteObjects call.
DefaultBatchSize = 100
)
// BatchError will contain the key and bucket of the object that failed to
// either upload or download.
type BatchError struct {
Errors Errors
code string
message string
}
// Errors is a typed alias for a slice of errors to satisfy the error
// interface.
type Errors []Error
func (errs Errors) Error() string {
buf := bytes.NewBuffer(nil)
for i, err := range errs {
buf.WriteString(err.Error())
if i+1 < len(errs) {
buf.WriteString("\n")
}
}
return buf.String()
}
// Error will contain the original error, bucket, and key of the operation that failed
// during batch operations.
type Error struct {
OrigErr error
Bucket *string
Key *string
}
func newError(err error, bucket, key *string) Error {
return Error{
err,
bucket,
key,
}
}
func (err *Error) Error() string {
origErr := ""
if err.OrigErr != nil {
origErr = ":\n" + err.OrigErr.Error()
}
return fmt.Sprintf("failed to perform batch operation on %q to %q%s",
aws.StringValue(err.Key),
aws.StringValue(err.Bucket),
origErr,
)
}
// NewBatchError will return a BatchError that satisfies the awserr.Error interface.
func NewBatchError(code, message string, err []Error) awserr.Error {
return &BatchError{
Errors: err,
code: code,
message: message,
}
}
// Code will return the code associated with the batch error.
func (err *BatchError) Code() string {
return err.code
}
// Message will return the message associated with the batch error.
func (err *BatchError) Message() string {
return err.message
}
func (err *BatchError) Error() string {
return awserr.SprintError(err.Code(), err.Message(), "", err.Errors)
}
// OrigErr will return the original error. Which, in this case, will always be nil
// for batched operations.
func (err *BatchError) OrigErr() error {
return err.Errors
}
// BatchDeleteIterator is an interface that uses the scanner pattern to
// iterate through what needs to be deleted.
type BatchDeleteIterator interface {
Next() bool
Err() error
DeleteObject() BatchDeleteObject
}
// DeleteListIterator is an alternative iterator for the BatchDelete client. This will
// iterate through a list of objects and delete the objects.
//
// Example:
// iter := &s3manager.DeleteListIterator{
// Client: svc,
// Input: &s3.ListObjectsInput{
// Bucket: aws.String("bucket"),
// MaxKeys: aws.Int64(5),
// },
// Paginator: request.Pagination{
// NewRequest: func() (*request.Request, error) {
// var inCpy *ListObjectsInput
// if input != nil {
// tmp := *input
// inCpy = &tmp
// }
// req, _ := c.ListObjectsRequest(inCpy)
// return req, nil
// },
// },
// }
//
// batcher := s3manager.NewBatchDeleteWithClient(svc)
// if err := batcher.Delete(aws.BackgroundContext(), iter); err != nil {
// return err
// }
type DeleteListIterator struct {
Bucket *string
Paginator request.Pagination
objects []*s3.Object
}
// NewDeleteListIterator will return a new DeleteListIterator.
func NewDeleteListIterator(svc s3iface.S3API, input *s3.ListObjectsInput, opts ...func(*DeleteListIterator)) BatchDeleteIterator {
iter := &DeleteListIterator{
Bucket: input.Bucket,
Paginator: request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *s3.ListObjectsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := svc.ListObjectsRequest(inCpy)
return req, nil
},
},
}
for _, opt := range opts {
opt(iter)
}
return iter
}
// Next will use the S3API client to iterate through a list of objects.
func (iter *DeleteListIterator) Next() bool {
if len(iter.objects) > 0 {
iter.objects = iter.objects[1:]
}
if len(iter.objects) == 0 && iter.Paginator.Next() {
iter.objects = iter.Paginator.Page().(*s3.ListObjectsOutput).Contents
}
return len(iter.objects) > 0
}
// Err will return the last known error from Next.
func (iter *DeleteListIterator) Err() error {
return iter.Paginator.Err()
}
// DeleteObject will return the current object to be deleted.
func (iter *DeleteListIterator) DeleteObject() BatchDeleteObject {
return BatchDeleteObject{
Object: &s3.DeleteObjectInput{
Bucket: iter.Bucket,
Key: iter.objects[0].Key,
},
}
}
// BatchDelete will use the s3 package's service client to perform a batch
// delete.
type BatchDelete struct {
Client s3iface.S3API
BatchSize int
}
// NewBatchDeleteWithClient will return a new delete client that can delete a batched amount of
// objects.
//
// Example:
// batcher := s3manager.NewBatchDeleteWithClient(client, size)
//
// objects := []BatchDeleteObject{
// {
// Object: &s3.DeleteObjectInput {
// Key: aws.String("key"),
// Bucket: aws.String("bucket"),
// },
// },
// }
//
// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{
// Objects: objects,
// }); err != nil {
// return err
// }
func NewBatchDeleteWithClient(client s3iface.S3API, options ...func(*BatchDelete)) *BatchDelete {
svc := &BatchDelete{
Client: client,
BatchSize: DefaultBatchSize,
}
for _, opt := range options {
opt(svc)
}
return svc
}
// NewBatchDelete will return a new delete client that can delete a batched amount of
// objects.
//
// Example:
// batcher := s3manager.NewBatchDelete(sess, size)
//
// objects := []BatchDeleteObject{
// {
// Object: &s3.DeleteObjectInput {
// Key: aws.String("key"),
// Bucket: aws.String("bucket"),
// },
// },
// }
//
// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{
// Objects: objects,
// }); err != nil {
// return err
// }
func NewBatchDelete(c client.ConfigProvider, options ...func(*BatchDelete)) *BatchDelete {
client := s3.New(c)
return NewBatchDeleteWithClient(client, options...)
}
// BatchDeleteObject is a wrapper object for calling the batch delete operation.
type BatchDeleteObject struct {
Object *s3.DeleteObjectInput
// After will run after each iteration during the batch process. This function will
// be executed whether or not the request was successful.
After func() error
}
// DeleteObjectsIterator is an interface that uses the scanner pattern to iterate
// through a series of objects to be deleted.
type DeleteObjectsIterator struct {
Objects []BatchDeleteObject
index int
inc bool
}
// Next will increment the default iterator's index and ensure that there
// is another object to iterator to.
func (iter *DeleteObjectsIterator) Next() bool {
if iter.inc {
iter.index++
} else {
iter.inc = true
}
return iter.index < len(iter.Objects)
}
// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface
// this will only return nil.
func (iter *DeleteObjectsIterator) Err() error {
return nil
}
// DeleteObject will return the BatchDeleteObject at the current batched index.
func (iter *DeleteObjectsIterator) DeleteObject() BatchDeleteObject {
object := iter.Objects[iter.index]
return object
}
// Delete will use the iterator to queue up objects that need to be deleted.
// Once the batch size is met, this will call the deleteBatch function.
func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error {
var errs []Error
objects := []BatchDeleteObject{}
var input *s3.DeleteObjectsInput
for iter.Next() {
o := iter.DeleteObject()
if input == nil {
input = initDeleteObjectsInput(o.Object)
}
parity := hasParity(input, o)
if parity {
input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{
Key: o.Object.Key,
VersionId: o.Object.VersionId,
})
objects = append(objects, o)
}
if len(input.Delete.Objects) == d.BatchSize || !parity {
if err := deleteBatch(ctx, d, input, objects); err != nil {
errs = append(errs, err...)
}
objects = objects[:0]
input = nil
if !parity {
objects = append(objects, o)
input = initDeleteObjectsInput(o.Object)
input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{
Key: o.Object.Key,
VersionId: o.Object.VersionId,
})
}
}
}
// iter.Next() could return false (above) plus populate iter.Err()
if iter.Err() != nil {
errs = append(errs, newError(iter.Err(), nil, nil))
}
if input != nil && len(input.Delete.Objects) > 0 {
if err := deleteBatch(ctx, d, input, objects); err != nil {
errs = append(errs, err...)
}
}
if len(errs) > 0 {
return NewBatchError("BatchedDeleteIncomplete", "some objects have failed to be deleted.", errs)
}
return nil
}
func initDeleteObjectsInput(o *s3.DeleteObjectInput) *s3.DeleteObjectsInput {
return &s3.DeleteObjectsInput{
Bucket: o.Bucket,
MFA: o.MFA,
RequestPayer: o.RequestPayer,
Delete: &s3.Delete{},
}
}
const (
// ErrDeleteBatchFailCode represents an error code which will be returned
// only when DeleteObjects.Errors has an error that does not contain a code.
ErrDeleteBatchFailCode = "DeleteBatchError"
errDefaultDeleteBatchMessage = "failed to delete"
)
// deleteBatch will delete a batch of items in the objects parameters.
func deleteBatch(ctx aws.Context, d *BatchDelete, input *s3.DeleteObjectsInput, objects []BatchDeleteObject) []Error {
errs := []Error{}
if result, err := d.Client.DeleteObjectsWithContext(ctx, input); err != nil {
for i := 0; i < len(input.Delete.Objects); i++ {
errs = append(errs, newError(err, input.Bucket, input.Delete.Objects[i].Key))
}
} else if len(result.Errors) > 0 {
for i := 0; i < len(result.Errors); i++ {
code := ErrDeleteBatchFailCode
msg := errDefaultDeleteBatchMessage
if result.Errors[i].Message != nil {
msg = *result.Errors[i].Message
}
if result.Errors[i].Code != nil {
code = *result.Errors[i].Code
}
errs = append(errs, newError(awserr.New(code, msg, err), input.Bucket, result.Errors[i].Key))
}
}
for _, object := range objects {
if object.After == nil {
continue
}
if err := object.After(); err != nil {
errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
}
}
return errs
}
func hasParity(o1 *s3.DeleteObjectsInput, o2 BatchDeleteObject) bool {
if o1.Bucket != nil && o2.Object.Bucket != nil {
if *o1.Bucket != *o2.Object.Bucket {
return false
}
} else if o1.Bucket != o2.Object.Bucket {
return false
}
if o1.MFA != nil && o2.Object.MFA != nil {
if *o1.MFA != *o2.Object.MFA {
return false
}
} else if o1.MFA != o2.Object.MFA {
return false
}
if o1.RequestPayer != nil && o2.Object.RequestPayer != nil {
if *o1.RequestPayer != *o2.Object.RequestPayer {
return false
}
} else if o1.RequestPayer != o2.Object.RequestPayer {
return false
}
return true
}
// BatchDownloadIterator is an interface that uses the scanner pattern to iterate
// through a series of objects to be downloaded.
type BatchDownloadIterator interface {
Next() bool
Err() error
DownloadObject() BatchDownloadObject
}
// BatchDownloadObject contains all necessary information to run a batch operation once.
type BatchDownloadObject struct {
Object *s3.GetObjectInput
Writer io.WriterAt
// After will run after each iteration during the batch process. This function will
// be executed whether or not the request was successful.
After func() error
}
// DownloadObjectsIterator implements the BatchDownloadIterator interface and allows for batched
// download of objects.
type DownloadObjectsIterator struct {
Objects []BatchDownloadObject
index int
inc bool
}
// Next will increment the default iterator's index and ensure that there
// is another object to iterator to.
func (batcher *DownloadObjectsIterator) Next() bool {
if batcher.inc {
batcher.index++
} else {
batcher.inc = true
}
return batcher.index < len(batcher.Objects)
}
// DownloadObject will return the BatchDownloadObject at the current batched index.
func (batcher *DownloadObjectsIterator) DownloadObject() BatchDownloadObject {
object := batcher.Objects[batcher.index]
return object
}
// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface
// this will only return nil.
func (batcher *DownloadObjectsIterator) Err() error {
return nil
}
// BatchUploadIterator is an interface that uses the scanner pattern to
// iterate through what needs to be uploaded.
type BatchUploadIterator interface {
Next() bool
Err() error
UploadObject() BatchUploadObject
}
// UploadObjectsIterator implements the BatchUploadIterator interface and allows for batched
// upload of objects.
type UploadObjectsIterator struct {
Objects []BatchUploadObject
index int
inc bool
}
// Next will increment the default iterator's index and ensure that there
// is another object to iterator to.
func (batcher *UploadObjectsIterator) Next() bool {
if batcher.inc {
batcher.index++
} else {
batcher.inc = true
}
return batcher.index < len(batcher.Objects)
}
// Err will return an error. Since this is just used to satisfy the BatchUploadIterator interface
// this will only return nil.
func (batcher *UploadObjectsIterator) Err() error {
return nil
}
// UploadObject will return the BatchUploadObject at the current batched index.
func (batcher *UploadObjectsIterator) UploadObject() BatchUploadObject {
object := batcher.Objects[batcher.index]
return object
}
// BatchUploadObject contains all necessary information to run a batch operation once.
type BatchUploadObject struct {
Object *UploadInput
// After will run after each iteration during the batch process. This function will
// be executed whether or not the request was successful.
After func() error
}

View File

@ -1,88 +0,0 @@
package s3manager
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
)
// GetBucketRegion will attempt to get the region for a bucket using the
// regionHint to determine which AWS partition to perform the query on.
//
// The request will not be signed, and will not use your AWS credentials.
//
// A "NotFound" error code will be returned if the bucket does not exist in the
// AWS partition the regionHint belongs to. If the regionHint parameter is an
// empty string GetBucketRegion will fallback to the ConfigProvider's region
// config. If the regionHint is empty, and the ConfigProvider does not have a
// region value, an error will be returned..
//
// For example to get the region of a bucket which exists in "eu-central-1"
// you could provide a region hint of "us-west-2".
//
// sess := session.Must(session.NewSession())
//
// bucket := "my-bucket"
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
// if err != nil {
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
// }
// return err
// }
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
//
func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) {
var cfg aws.Config
if len(regionHint) != 0 {
cfg.Region = aws.String(regionHint)
}
svc := s3.New(c, &cfg)
return GetBucketRegionWithClient(ctx, svc, bucket, opts...)
}
const bucketRegionHeader = "X-Amz-Bucket-Region"
// GetBucketRegionWithClient is the same as GetBucketRegion with the exception
// that it takes a S3 service client instead of a Session. The regionHint is
// derived from the region the S3 service client was created in.
//
// See GetBucketRegion for more information.
func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...request.Option) (string, error) {
req, _ := svc.HeadBucketRequest(&s3.HeadBucketInput{
Bucket: aws.String(bucket),
})
req.Config.S3ForcePathStyle = aws.Bool(true)
req.Config.Credentials = credentials.AnonymousCredentials
req.SetContext(ctx)
// Disable HTTP redirects to prevent an invalid 301 from eating the response
// because Go's HTTP client will fail, and drop the response if an 301 is
// received without a location header. S3 will return a 301 without the
// location header for HeadObject API calls.
req.DisableFollowRedirects = true
var bucketRegion string
req.Handlers.Send.PushBack(func(r *request.Request) {
bucketRegion = r.HTTPResponse.Header.Get(bucketRegionHeader)
if len(bucketRegion) == 0 {
return
}
r.HTTPResponse.StatusCode = 200
r.HTTPResponse.Status = "OK"
r.Error = nil
})
req.ApplyOptions(opts...)
if err := req.Send(); err != nil {
return "", err
}
bucketRegion = s3.NormalizeBucketLocation(bucketRegion)
return bucketRegion, nil
}

View File

@ -1,81 +0,0 @@
package s3manager
import (
"io"
"github.com/aws/aws-sdk-go/internal/sdkio"
)
// BufferedReadSeeker is buffered io.ReadSeeker
type BufferedReadSeeker struct {
r io.ReadSeeker
buffer []byte
readIdx, writeIdx int
}
// NewBufferedReadSeeker returns a new BufferedReadSeeker
// if len(b) == 0 then the buffer will be initialized to 64 KiB.
func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker {
if len(b) == 0 {
b = make([]byte, 64*1024)
}
return &BufferedReadSeeker{r: r, buffer: b}
}
func (b *BufferedReadSeeker) reset(r io.ReadSeeker) {
b.r = r
b.readIdx, b.writeIdx = 0, 0
}
// Read will read up len(p) bytes into p and will return
// the number of bytes read and any error that occurred.
// If the len(p) > the buffer size then a single read request
// will be issued to the underlying io.ReadSeeker for len(p) bytes.
// A Read request will at most perform a single Read to the underlying
// io.ReadSeeker, and may return < len(p) if serviced from the buffer.
func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) {
if len(p) == 0 {
return n, err
}
if b.readIdx == b.writeIdx {
if len(p) >= len(b.buffer) {
n, err = b.r.Read(p)
return n, err
}
b.readIdx, b.writeIdx = 0, 0
n, err = b.r.Read(b.buffer)
if n == 0 {
return n, err
}
b.writeIdx += n
}
n = copy(p, b.buffer[b.readIdx:b.writeIdx])
b.readIdx += n
return n, err
}
// Seek will position then underlying io.ReadSeeker to the given offset
// and will clear the buffer.
func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) {
n, err := b.r.Seek(offset, whence)
b.reset(b.r)
return n, err
}
// ReadAt will read up to len(p) bytes at the given file offset.
// This will result in the buffer being cleared.
func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) {
_, err := b.Seek(off, sdkio.SeekStart)
if err != nil {
return 0, err
}
return b.Read(p)
}

View File

@ -1,7 +0,0 @@
// +build !windows
package s3manager
func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
return nil
}

View File

@ -1,5 +0,0 @@
package s3manager
func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
return NewBufferedReadSeekerWriteToPool(1024 * 1024)
}

View File

@ -1,7 +0,0 @@
// +build !windows
package s3manager
func defaultDownloadBufferProvider() WriterReadFromProvider {
return nil
}

View File

@ -1,5 +0,0 @@
package s3manager
func defaultDownloadBufferProvider() WriterReadFromProvider {
return NewPooledBufferedWriterReadFromProvider(1024 * 1024)
}

View File

@ -1,3 +0,0 @@
// Package s3manager provides utilities to upload and download objects from
// S3 concurrently. Helpful for when working with large objects.
package s3manager

View File

@ -1,597 +0,0 @@
package s3manager
import (
"fmt"
"io"
"net/http"
"strconv"
"strings"
"sync"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
)
// DefaultDownloadPartSize is the default range of bytes to get at a time when
// using Download().
const DefaultDownloadPartSize = 1024 * 1024 * 5
// DefaultDownloadConcurrency is the default number of goroutines to spin up
// when using Download().
const DefaultDownloadConcurrency = 5
type errReadingBody struct {
err error
}
func (e *errReadingBody) Error() string {
return fmt.Sprintf("failed to read part body: %v", e.err)
}
func (e *errReadingBody) Unwrap() error {
return e.err
}
// The Downloader structure that calls Download(). It is safe to call Download()
// on this structure for multiple objects and across concurrent goroutines.
// Mutating the Downloader's properties is not safe to be done concurrently.
type Downloader struct {
// The size (in bytes) to request from S3 for each part.
// The minimum allowed part size is 5MB, and if this value is set to zero,
// the DefaultDownloadPartSize value will be used.
//
// PartSize is ignored if the Range input parameter is provided.
PartSize int64
// The number of goroutines to spin up in parallel when sending parts.
// If this is set to zero, the DefaultDownloadConcurrency value will be used.
//
// Concurrency of 1 will download the parts sequentially.
//
// Concurrency is ignored if the Range input parameter is provided.
Concurrency int
// An S3 client to use when performing downloads.
S3 s3iface.S3API
// List of request options that will be passed down to individual API
// operation requests made by the downloader.
RequestOptions []request.Option
// Defines the buffer strategy used when downloading a part.
//
// If a WriterReadFromProvider is given the Download manager
// will pass the io.WriterAt of the Download request to the provider
// and will use the returned WriterReadFrom from the provider as the
// destination writer when copying from http response body.
BufferProvider WriterReadFromProvider
}
// WithDownloaderRequestOptions appends to the Downloader's API request options.
func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) {
return func(d *Downloader) {
d.RequestOptions = append(d.RequestOptions, opts...)
}
}
// NewDownloader creates a new Downloader instance to downloads objects from
// S3 in concurrent chunks. Pass in additional functional options to customize
// the downloader behavior. Requires a client.ConfigProvider in order to create
// a S3 service client. The session.Session satisfies the client.ConfigProvider
// interface.
//
// Example:
// // The session the S3 Downloader will use
// sess := session.Must(session.NewSession())
//
// // Create a downloader with the session and default options
// downloader := s3manager.NewDownloader(sess)
//
// // Create a downloader with the session and custom options
// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) {
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {
return newDownloader(s3.New(c), options...)
}
func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Downloader {
d := &Downloader{
S3: client,
PartSize: DefaultDownloadPartSize,
Concurrency: DefaultDownloadConcurrency,
BufferProvider: defaultDownloadBufferProvider(),
}
for _, option := range options {
option(d)
}
return d
}
// NewDownloaderWithClient creates a new Downloader instance to downloads
// objects from S3 in concurrent chunks. Pass in additional functional
// options to customize the downloader behavior. Requires a S3 service client
// to make S3 API calls.
//
// Example:
// // The session the S3 Downloader will use
// sess := session.Must(session.NewSession())
//
// // The S3 client the S3 Downloader will use
// s3Svc := s3.New(sess)
//
// // Create a downloader with the s3 client and default options
// downloader := s3manager.NewDownloaderWithClient(s3Svc)
//
// // Create a downloader with the s3 client and custom options
// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) {
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {
return newDownloader(svc, options...)
}
type maxRetrier interface {
MaxRetries() int
}
// Download downloads an object in S3 and writes the payload into w using
// concurrent GET requests. The n int64 returned is the size of the object downloaded
// in bytes.
//
// Additional functional options can be provided to configure the individual
// download. These options are copies of the Downloader instance Download is called from.
// Modifying the options will not impact the original Downloader instance.
//
// It is safe to call this method concurrently across goroutines.
//
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
//
// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
// download the parts from S3 sequentially.
//
// If the GetObjectInput's Range value is provided that will cause the downloader
// to perform a single GetObjectInput request for that object's range. This will
// caused the part size, and concurrency configurations to be ignored.
func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
return d.DownloadWithContext(aws.BackgroundContext(), w, input, options...)
}
// DownloadWithContext downloads an object in S3 and writes the payload into w
// using concurrent GET requests. The n int64 returned is the size of the object downloaded
// in bytes.
//
// DownloadWithContext is the same as Download with the additional support for
// Context input parameters. The Context must not be nil. A nil Context will
// cause a panic. Use the Context to add deadlining, timeouts, etc. The
// DownloadWithContext may create sub-contexts for individual underlying
// requests.
//
// Additional functional options can be provided to configure the individual
// download. These options are copies of the Downloader instance Download is
// called from. Modifying the options will not impact the original Downloader
// instance. Use the WithDownloaderRequestOptions helper function to pass in request
// options that will be applied to all API operations made with this downloader.
//
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
//
// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
// download the parts from S3 sequentially.
//
// It is safe to call this method concurrently across goroutines.
//
// If the GetObjectInput's Range value is provided that will cause the downloader
// to perform a single GetObjectInput request for that object's range. This will
// caused the part size, and concurrency configurations to be ignored.
func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
impl := downloader{w: w, in: input, cfg: d, ctx: ctx}
for _, option := range options {
option(&impl.cfg)
}
impl.cfg.RequestOptions = append(impl.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
if s, ok := d.S3.(maxRetrier); ok {
impl.partBodyMaxRetries = s.MaxRetries()
}
impl.totalBytes = -1
if impl.cfg.Concurrency == 0 {
impl.cfg.Concurrency = DefaultDownloadConcurrency
}
if impl.cfg.PartSize == 0 {
impl.cfg.PartSize = DefaultDownloadPartSize
}
return impl.download()
}
// DownloadWithIterator will download a batched amount of objects in S3 and writes them
// to the io.WriterAt specificed in the iterator.
//
// Example:
// svc := s3manager.NewDownloader(session)
//
// fooFile, err := os.Open("/tmp/foo.file")
// if err != nil {
// return err
// }
//
// barFile, err := os.Open("/tmp/bar.file")
// if err != nil {
// return err
// }
//
// objects := []s3manager.BatchDownloadObject {
// {
// Object: &s3.GetObjectInput {
// Bucket: aws.String("bucket"),
// Key: aws.String("foo"),
// },
// Writer: fooFile,
// },
// {
// Object: &s3.GetObjectInput {
// Bucket: aws.String("bucket"),
// Key: aws.String("bar"),
// },
// Writer: barFile,
// },
// }
//
// iter := &s3manager.DownloadObjectsIterator{Objects: objects}
// if err := svc.DownloadWithIterator(aws.BackgroundContext(), iter); err != nil {
// return err
// }
func (d Downloader) DownloadWithIterator(ctx aws.Context, iter BatchDownloadIterator, opts ...func(*Downloader)) error {
var errs []Error
for iter.Next() {
object := iter.DownloadObject()
if _, err := d.DownloadWithContext(ctx, object.Writer, object.Object, opts...); err != nil {
errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
}
if object.After == nil {
continue
}
if err := object.After(); err != nil {
errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
}
}
if len(errs) > 0 {
return NewBatchError("BatchedDownloadIncomplete", "some objects have failed to download.", errs)
}
return nil
}
// downloader is the implementation structure used internally by Downloader.
type downloader struct {
ctx aws.Context
cfg Downloader
in *s3.GetObjectInput
w io.WriterAt
wg sync.WaitGroup
m sync.Mutex
pos int64
totalBytes int64
written int64
err error
partBodyMaxRetries int
}
// download performs the implementation of the object download across ranged
// GETs.
func (d *downloader) download() (n int64, err error) {
// If range is specified fall back to single download of that range
// this enables the functionality of ranged gets with the downloader but
// at the cost of no multipart downloads.
if rng := aws.StringValue(d.in.Range); len(rng) > 0 {
d.downloadRange(rng)
return d.written, d.err
}
// Spin off first worker to check additional header information
d.getChunk()
if total := d.getTotalBytes(); total >= 0 {
// Spin up workers
ch := make(chan dlchunk, d.cfg.Concurrency)
for i := 0; i < d.cfg.Concurrency; i++ {
d.wg.Add(1)
go d.downloadPart(ch)
}
// Assign work
for d.getErr() == nil {
if d.pos >= total {
break // We're finished queuing chunks
}
// Queue the next range of bytes to read.
ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize}
d.pos += d.cfg.PartSize
}
// Wait for completion
close(ch)
d.wg.Wait()
} else {
// Checking if we read anything new
for d.err == nil {
d.getChunk()
}
// We expect a 416 error letting us know we are done downloading the
// total bytes. Since we do not know the content's length, this will
// keep grabbing chunks of data until the range of bytes specified in
// the request is out of range of the content. Once, this happens, a
// 416 should occur.
e, ok := d.err.(awserr.RequestFailure)
if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable {
d.err = nil
}
}
// Return error
return d.written, d.err
}
// downloadPart is an individual goroutine worker reading from the ch channel
// and performing a GetObject request on the data with a given byte range.
//
// If this is the first worker, this operation also resolves the total number
// of bytes to be read so that the worker manager knows when it is finished.
func (d *downloader) downloadPart(ch chan dlchunk) {
defer d.wg.Done()
for {
chunk, ok := <-ch
if !ok {
break
}
if d.getErr() != nil {
// Drain the channel if there is an error, to prevent deadlocking
// of download producer.
continue
}
if err := d.downloadChunk(chunk); err != nil {
d.setErr(err)
}
}
}
// getChunk grabs a chunk of data from the body.
// Not thread safe. Should only used when grabbing data on a single thread.
func (d *downloader) getChunk() {
if d.getErr() != nil {
return
}
chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize}
d.pos += d.cfg.PartSize
if err := d.downloadChunk(chunk); err != nil {
d.setErr(err)
}
}
// downloadRange downloads an Object given the passed in Byte-Range value.
// The chunk used down download the range will be configured for that range.
func (d *downloader) downloadRange(rng string) {
if d.getErr() != nil {
return
}
chunk := dlchunk{w: d.w, start: d.pos}
// Ranges specified will short circuit the multipart download
chunk.withRange = rng
if err := d.downloadChunk(chunk); err != nil {
d.setErr(err)
}
// Update the position based on the amount of data received.
d.pos = d.written
}
// downloadChunk downloads the chunk from s3
func (d *downloader) downloadChunk(chunk dlchunk) error {
in := &s3.GetObjectInput{}
awsutil.Copy(in, d.in)
// Get the next byte range of data
in.Range = aws.String(chunk.ByteRange())
var n int64
var err error
for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
n, err = d.tryDownloadChunk(in, &chunk)
if err == nil {
break
}
// Check if the returned error is an errReadingBody.
// If err is errReadingBody this indicates that an error
// occurred while copying the http response body.
// If this occurs we unwrap the err to set the underlying error
// and attempt any remaining retries.
if bodyErr, ok := err.(*errReadingBody); ok {
err = bodyErr.Unwrap()
} else {
return err
}
chunk.cur = 0
logMessage(d.cfg.S3, aws.LogDebugWithRequestRetries,
fmt.Sprintf("DEBUG: object part body download interrupted %s, err, %v, retrying attempt %d",
aws.StringValue(in.Key), err, retry))
}
d.incrWritten(n)
return err
}
func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64, error) {
cleanup := func() {}
if d.cfg.BufferProvider != nil {
w, cleanup = d.cfg.BufferProvider.GetReadFrom(w)
}
defer cleanup()
resp, err := d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...)
if err != nil {
return 0, err
}
d.setTotalBytes(resp) // Set total if not yet set.
n, err := io.Copy(w, resp.Body)
resp.Body.Close()
if err != nil {
return n, &errReadingBody{err: err}
}
return n, nil
}
func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) {
s, ok := svc.(*s3.S3)
if !ok {
return
}
if s.Config.Logger == nil {
return
}
if s.Config.LogLevel.Matches(level) {
s.Config.Logger.Log(msg)
}
}
// getTotalBytes is a thread-safe getter for retrieving the total byte status.
func (d *downloader) getTotalBytes() int64 {
d.m.Lock()
defer d.m.Unlock()
return d.totalBytes
}
// setTotalBytes is a thread-safe setter for setting the total byte status.
// Will extract the object's total bytes from the Content-Range if the file
// will be chunked, or Content-Length. Content-Length is used when the response
// does not include a Content-Range. Meaning the object was not chunked. This
// occurs when the full file fits within the PartSize directive.
func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) {
d.m.Lock()
defer d.m.Unlock()
if d.totalBytes >= 0 {
return
}
if resp.ContentRange == nil {
// ContentRange is nil when the full file contents is provided, and
// is not chunked. Use ContentLength instead.
if resp.ContentLength != nil {
d.totalBytes = *resp.ContentLength
return
}
} else {
parts := strings.Split(*resp.ContentRange, "/")
total := int64(-1)
var err error
// Checking for whether or not a numbered total exists
// If one does not exist, we will assume the total to be -1, undefined,
// and sequentially download each chunk until hitting a 416 error
totalStr := parts[len(parts)-1]
if totalStr != "*" {
total, err = strconv.ParseInt(totalStr, 10, 64)
if err != nil {
d.err = err
return
}
}
d.totalBytes = total
}
}
func (d *downloader) incrWritten(n int64) {
d.m.Lock()
defer d.m.Unlock()
d.written += n
}
// getErr is a thread-safe getter for the error object
func (d *downloader) getErr() error {
d.m.Lock()
defer d.m.Unlock()
return d.err
}
// setErr is a thread-safe setter for the error object
func (d *downloader) setErr(e error) {
d.m.Lock()
defer d.m.Unlock()
d.err = e
}
// dlchunk represents a single chunk of data to write by the worker routine.
// This structure also implements an io.SectionReader style interface for
// io.WriterAt, effectively making it an io.SectionWriter (which does not
// exist).
type dlchunk struct {
w io.WriterAt
start int64
size int64
cur int64
// specifies the byte range the chunk should be downloaded with.
withRange string
}
// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start
// position to its end (or EOF).
//
// If a range is specified on the dlchunk the size will be ignored when writing.
// as the total size may not of be known ahead of time.
func (c *dlchunk) Write(p []byte) (n int, err error) {
if c.cur >= c.size && len(c.withRange) == 0 {
return 0, io.EOF
}
n, err = c.w.WriteAt(p, c.start+c.cur)
c.cur += int64(n)
return
}
// ByteRange returns a HTTP Byte-Range header value that should be used by the
// client to request the chunk's range.
func (c *dlchunk) ByteRange() string {
if len(c.withRange) != 0 {
return c.withRange
}
return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1)
}

View File

@ -1,65 +0,0 @@
package s3manager
import (
"io"
"sync"
)
// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker
type ReadSeekerWriteTo interface {
io.ReadSeeker
io.WriterTo
}
// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt
// implementation.
type BufferedReadSeekerWriteTo struct {
*BufferedReadSeeker
}
// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or
// an error occurs. Returns the number of bytes written and any error encountered during the write.
func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) {
return io.Copy(writer, b.BufferedReadSeeker)
}
// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker
type ReadSeekerWriteToProvider interface {
GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func())
}
// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse
// []byte slices for buffering parts in memory
type BufferedReadSeekerWriteToPool struct {
pool sync.Pool
}
// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create
// a pool of reusable buffers . If size is less then < 64 KiB then the buffer
// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom
// respectively will default to copying 32 KiB.
func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool {
if size < 65536 {
size = 65536
}
return &BufferedReadSeekerWriteToPool{
pool: sync.Pool{New: func() interface{} {
return make([]byte, size)
}},
}
}
// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo.
// The provided cleanup must be called after operations have been completed on the
// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool.
func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) {
buffer := p.pool.Get().([]byte)
r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)}
cleanup = func() {
p.pool.Put(buffer)
}
return r, cleanup
}

View File

@ -1,774 +0,0 @@
package s3manager
import (
"bytes"
"fmt"
"io"
"sort"
"sync"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
)
// MaxUploadParts is the maximum allowed number of parts in a multi-part upload
// on Amazon S3.
const MaxUploadParts = 10000
// MinUploadPartSize is the minimum allowed part size when uploading a part to
// Amazon S3.
const MinUploadPartSize int64 = 1024 * 1024 * 5
// DefaultUploadPartSize is the default part size to buffer chunks of a
// payload into.
const DefaultUploadPartSize = MinUploadPartSize
// DefaultUploadConcurrency is the default number of goroutines to spin up when
// using Upload().
const DefaultUploadConcurrency = 5
// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned
// will satisfy this interface when a multi part upload failed to upload all
// chucks to S3. In the case of a failure the UploadID is needed to operate on
// the chunks, if any, which were uploaded.
//
// Example:
//
// u := s3manager.NewUploader(opts)
// output, err := u.upload(input)
// if err != nil {
// if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
// // Process error and its associated uploadID
// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
// } else {
// // Process error generically
// fmt.Println("Error:", err.Error())
// }
// }
//
type MultiUploadFailure interface {
awserr.Error
// Returns the upload id for the S3 multipart upload that failed.
UploadID() string
}
// So that the Error interface type can be included as an anonymous field
// in the multiUploadError struct and not conflict with the error.Error() method.
type awsError awserr.Error
// A multiUploadError wraps the upload ID of a failed s3 multipart upload.
// Composed of BaseError for code, message, and original error
//
// Should be used for an error that occurred failing a S3 multipart upload,
// and a upload ID is available. If an uploadID is not available a more relevant
type multiUploadError struct {
awsError
// ID for multipart upload which failed.
uploadID string
}
// Error returns the string representation of the error.
//
// See apierr.BaseError ErrorWithExtra for output format
//
// Satisfies the error interface.
func (m multiUploadError) Error() string {
extra := fmt.Sprintf("upload id: %s", m.uploadID)
return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr())
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (m multiUploadError) String() string {
return m.Error()
}
// UploadID returns the id of the S3 upload which failed.
func (m multiUploadError) UploadID() string {
return m.uploadID
}
// UploadOutput represents a response from the Upload() call.
type UploadOutput struct {
// The URL where the object was uploaded to.
Location string
// The version of the object that was uploaded. Will only be populated if
// the S3 Bucket is versioned. If the bucket is not versioned this field
// will not be set.
VersionID *string
// The ID for a multipart upload to S3. In the case of an error the error
// can be cast to the MultiUploadFailure interface to extract the upload ID.
UploadID string
}
// WithUploaderRequestOptions appends to the Uploader's API request options.
func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) {
return func(u *Uploader) {
u.RequestOptions = append(u.RequestOptions, opts...)
}
}
// The Uploader structure that calls Upload(). It is safe to call Upload()
// on this structure for multiple objects and across concurrent goroutines.
// Mutating the Uploader's properties is not safe to be done concurrently.
type Uploader struct {
// The buffer size (in bytes) to use when buffering data into chunks and
// sending them as parts to S3. The minimum allowed part size is 5MB, and
// if this value is set to zero, the DefaultUploadPartSize value will be used.
PartSize int64
// The number of goroutines to spin up in parallel per call to Upload when
// sending parts. If this is set to zero, the DefaultUploadConcurrency value
// will be used.
//
// The concurrency pool is not shared between calls to Upload.
Concurrency int
// Setting this value to true will cause the SDK to avoid calling
// AbortMultipartUpload on a failure, leaving all successfully uploaded
// parts on S3 for manual recovery.
//
// Note that storing parts of an incomplete multipart upload counts towards
// space usage on S3 and will add additional costs if not cleaned up.
LeavePartsOnError bool
// MaxUploadParts is the max number of parts which will be uploaded to S3.
// Will be used to calculate the partsize of the object to be uploaded.
// E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
// as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts).
//
// MaxUploadParts must not be used to limit the total number of bytes uploaded.
// Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader)
// instead. An io.LimitReader is helpful when uploading an unbounded reader
// to S3, and you know its maximum size. Otherwise the reader's io.EOF returned
// error must be used to signal end of stream.
//
// Defaults to package const's MaxUploadParts value.
MaxUploadParts int
// The client to use when uploading to S3.
S3 s3iface.S3API
// List of request options that will be passed down to individual API
// operation requests made by the uploader.
RequestOptions []request.Option
// Defines the buffer strategy used when uploading a part
BufferProvider ReadSeekerWriteToProvider
// partPool allows for the re-usage of streaming payload part buffers between upload calls
partPool *partPool
}
// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
// additional functional options to customize the uploader's behavior. Requires a
// client.ConfigProvider in order to create a S3 service client. The session.Session
// satisfies the client.ConfigProvider interface.
//
// Example:
// // The session the S3 Uploader will use
// sess := session.Must(session.NewSession())
//
// // Create an uploader with the session and default options
// uploader := s3manager.NewUploader(sess)
//
// // Create an uploader with the session and custom options
// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
return newUploader(s3.New(c), options...)
}
func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader {
u := &Uploader{
S3: client,
PartSize: DefaultUploadPartSize,
Concurrency: DefaultUploadConcurrency,
LeavePartsOnError: false,
MaxUploadParts: MaxUploadParts,
BufferProvider: defaultUploadBufferProvider(),
}
for _, option := range options {
option(u)
}
u.partPool = newPartPool(u.PartSize)
return u
}
// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in
// additional functional options to customize the uploader's behavior. Requires
// a S3 service client to make S3 API calls.
//
// Example:
// // The session the S3 Uploader will use
// sess := session.Must(session.NewSession())
//
// // S3 service client the Upload manager will use.
// s3Svc := s3.New(sess)
//
// // Create an uploader with S3 client and default options
// uploader := s3manager.NewUploaderWithClient(s3Svc)
//
// // Create an uploader with S3 client and custom options
// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
return newUploader(svc, options...)
}
// Upload uploads an object to S3, intelligently buffering large files into
// smaller chunks and sending them in parallel across multiple goroutines. You
// can configure the buffer size and concurrency through the Uploader's parameters.
//
// Additional functional options can be provided to configure the individual
// upload. These options are copies of the Uploader instance Upload is called from.
// Modifying the options will not impact the original Uploader instance.
//
// Use the WithUploaderRequestOptions helper function to pass in request
// options that will be applied to all API operations made with this uploader.
//
// It is safe to call this method concurrently across goroutines.
//
// Example:
// // Upload input parameters
// upParams := &s3manager.UploadInput{
// Bucket: &bucketName,
// Key: &keyName,
// Body: file,
// }
//
// // Perform an upload.
// result, err := uploader.Upload(upParams)
//
// // Perform upload with options different than the those in the Uploader.
// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
// u.PartSize = 10 * 1024 * 1024 // 10MB part size
// u.LeavePartsOnError = true // Don't delete the parts if the upload fails.
// })
func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
return u.UploadWithContext(aws.BackgroundContext(), input, options...)
}
// UploadWithContext uploads an object to S3, intelligently buffering large
// files into smaller chunks and sending them in parallel across multiple
// goroutines. You can configure the buffer size and concurrency through the
// Uploader's parameters.
//
// UploadWithContext is the same as Upload with the additional support for
// Context input parameters. The Context must not be nil. A nil Context will
// cause a panic. Use the context to add deadlining, timeouts, etc. The
// UploadWithContext may create sub-contexts for individual underlying requests.
//
// Additional functional options can be provided to configure the individual
// upload. These options are copies of the Uploader instance Upload is called from.
// Modifying the options will not impact the original Uploader instance.
//
// Use the WithUploaderRequestOptions helper function to pass in request
// options that will be applied to all API operations made with this uploader.
//
// It is safe to call this method concurrently across goroutines.
func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ...func(*Uploader)) (*UploadOutput, error) {
i := uploader{in: input, cfg: u, ctx: ctx}
for _, opt := range opts {
opt(&i.cfg)
}
i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
return i.upload()
}
// UploadWithIterator will upload a batched amount of objects to S3. This operation uses
// the iterator pattern to know which object to upload next. Since this is an interface this
// allows for custom defined functionality.
//
// Example:
// svc:= s3manager.NewUploader(sess)
//
// objects := []BatchUploadObject{
// {
// Object: &s3manager.UploadInput {
// Key: aws.String("key"),
// Bucket: aws.String("bucket"),
// },
// },
// }
//
// iter := &s3manager.UploadObjectsIterator{Objects: objects}
// if err := svc.UploadWithIterator(aws.BackgroundContext(), iter); err != nil {
// return err
// }
func (u Uploader) UploadWithIterator(ctx aws.Context, iter BatchUploadIterator, opts ...func(*Uploader)) error {
var errs []Error
for iter.Next() {
object := iter.UploadObject()
if _, err := u.UploadWithContext(ctx, object.Object, opts...); err != nil {
s3Err := Error{
OrigErr: err,
Bucket: object.Object.Bucket,
Key: object.Object.Key,
}
errs = append(errs, s3Err)
}
if object.After == nil {
continue
}
if err := object.After(); err != nil {
s3Err := Error{
OrigErr: err,
Bucket: object.Object.Bucket,
Key: object.Object.Key,
}
errs = append(errs, s3Err)
}
}
if len(errs) > 0 {
return NewBatchError("BatchedUploadIncomplete", "some objects have failed to upload.", errs)
}
return nil
}
// internal structure to manage an upload to S3.
type uploader struct {
ctx aws.Context
cfg Uploader
in *UploadInput
readerPos int64 // current reader position
totalSize int64 // set to -1 if the size is not known
}
// internal logic for deciding whether to upload a single part or use a
// multipart upload.
func (u *uploader) upload() (*UploadOutput, error) {
if err := u.init(); err != nil {
return nil, awserr.New("ReadRequestBody", "unable to initialize upload", err)
}
if u.cfg.PartSize < MinUploadPartSize {
msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
return nil, awserr.New("ConfigError", msg, nil)
}
// Do one read to determine if we have more than one part
reader, _, cleanup, err := u.nextReader()
if err == io.EOF { // single part
return u.singlePart(reader, cleanup)
} else if err != nil {
cleanup()
return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
}
mu := multiuploader{uploader: u}
return mu.upload(reader, cleanup)
}
// init will initialize all default options.
func (u *uploader) init() error {
if u.cfg.Concurrency == 0 {
u.cfg.Concurrency = DefaultUploadConcurrency
}
if u.cfg.PartSize == 0 {
u.cfg.PartSize = DefaultUploadPartSize
}
if u.cfg.MaxUploadParts == 0 {
u.cfg.MaxUploadParts = MaxUploadParts
}
// If PartSize was changed or partPool was never setup then we need to allocated a new pool
// so that we return []byte slices of the correct size
if u.cfg.partPool == nil || u.cfg.partPool.partSize != u.cfg.PartSize {
u.cfg.partPool = newPartPool(u.cfg.PartSize)
}
// Try to get the total size for some optimizations
return u.initSize()
}
// initSize tries to detect the total stream size, setting u.totalSize. If
// the size is not known, totalSize is set to -1.
func (u *uploader) initSize() error {
u.totalSize = -1
switch r := u.in.Body.(type) {
case io.Seeker:
n, err := aws.SeekerLen(r)
if err != nil {
return err
}
u.totalSize = n
// Try to adjust partSize if it is too small and account for
// integer division truncation.
if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) {
// Add one to the part size to account for remainders
// during the size calculation. e.g odd number of bytes.
u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1
}
}
return nil
}
// nextReader returns a seekable reader representing the next packet of data.
// This operation increases the shared u.readerPos counter, but note that it
// does not need to be wrapped in a mutex because nextReader is only called
// from the main thread.
func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) {
type readerAtSeeker interface {
io.ReaderAt
io.ReadSeeker
}
switch r := u.in.Body.(type) {
case readerAtSeeker:
var err error
n := u.cfg.PartSize
if u.totalSize >= 0 {
bytesLeft := u.totalSize - u.readerPos
if bytesLeft <= u.cfg.PartSize {
err = io.EOF
n = bytesLeft
}
}
var (
reader io.ReadSeeker
cleanup func()
)
reader = io.NewSectionReader(r, u.readerPos, n)
if u.cfg.BufferProvider != nil {
reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader)
} else {
cleanup = func() {}
}
u.readerPos += n
return reader, int(n), cleanup, err
default:
part := u.cfg.partPool.Get().([]byte)
n, err := readFillBuf(r, part)
u.readerPos += int64(n)
cleanup := func() {
u.cfg.partPool.Put(part)
}
return bytes.NewReader(part[0:n]), n, cleanup, err
}
}
func readFillBuf(r io.Reader, b []byte) (offset int, err error) {
for offset < len(b) && err == nil {
var n int
n, err = r.Read(b[offset:])
offset += n
}
return offset, err
}
// singlePart contains upload logic for uploading a single chunk via
// a regular PutObject request. Multipart requests require at least two
// parts, or at least 5MB of data.
func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
defer cleanup()
params := &s3.PutObjectInput{}
awsutil.Copy(params, u.in)
params.Body = r
// Need to use request form because URL generated in request is
// used in return.
req, out := u.cfg.S3.PutObjectRequest(params)
req.SetContext(u.ctx)
req.ApplyOptions(u.cfg.RequestOptions...)
if err := req.Send(); err != nil {
return nil, err
}
url := req.HTTPRequest.URL.String()
return &UploadOutput{
Location: url,
VersionID: out.VersionId,
}, nil
}
// internal structure to manage a specific multipart upload to S3.
type multiuploader struct {
*uploader
wg sync.WaitGroup
m sync.Mutex
err error
uploadID string
parts completedParts
}
// keeps track of a single chunk of data being sent to S3.
type chunk struct {
buf io.ReadSeeker
num int64
cleanup func()
}
// completedParts is a wrapper to make parts sortable by their part number,
// since S3 required this list to be sent in sorted order.
type completedParts []*s3.CompletedPart
func (a completedParts) Len() int { return len(a) }
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
// upload will perform a multipart upload using the firstBuf buffer containing
// the first chunk of data.
func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
params := &s3.CreateMultipartUploadInput{}
awsutil.Copy(params, u.in)
// Create the multipart
resp, err := u.cfg.S3.CreateMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
if err != nil {
return nil, err
}
u.uploadID = *resp.UploadId
// Create the workers
ch := make(chan chunk, u.cfg.Concurrency)
for i := 0; i < u.cfg.Concurrency; i++ {
u.wg.Add(1)
go u.readChunk(ch)
}
// Send part 1 to the workers
var num int64 = 1
ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup}
// Read and queue the rest of the parts
for u.geterr() == nil && err == nil {
var (
reader io.ReadSeeker
nextChunkLen int
ok bool
)
reader, nextChunkLen, cleanup, err = u.nextReader()
ok, err = u.shouldContinue(num, nextChunkLen, err)
if !ok {
cleanup()
if err != nil {
u.seterr(err)
}
break
}
num++
ch <- chunk{buf: reader, num: num, cleanup: cleanup}
}
// Close the channel, wait for workers, and complete upload
close(ch)
u.wg.Wait()
complete := u.complete()
if err := u.geterr(); err != nil {
return nil, &multiUploadError{
awsError: awserr.New(
"MultipartUpload",
"upload multipart failed",
err),
uploadID: u.uploadID,
}
}
// Create a presigned URL of the S3 Get Object in order to have parity with
// single part upload.
getReq, _ := u.cfg.S3.GetObjectRequest(&s3.GetObjectInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
})
getReq.Config.Credentials = credentials.AnonymousCredentials
uploadLocation, _, _ := getReq.PresignRequest(1)
return &UploadOutput{
Location: uploadLocation,
VersionID: complete.VersionId,
UploadID: u.uploadID,
}, nil
}
func (u *multiuploader) shouldContinue(part int64, nextChunkLen int, err error) (bool, error) {
if err != nil && err != io.EOF {
return false, awserr.New("ReadRequestBody", "read multipart upload data failed", err)
}
if nextChunkLen == 0 {
// No need to upload empty part, if file was empty to start
// with empty single part would of been created and never
// started multipart upload.
return false, nil
}
part++
// This upload exceeded maximum number of supported parts, error now.
if part > int64(u.cfg.MaxUploadParts) || part > int64(MaxUploadParts) {
var msg string
if part > int64(u.cfg.MaxUploadParts) {
msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
u.cfg.MaxUploadParts)
} else {
msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
MaxUploadParts)
}
return false, awserr.New("TotalPartsExceeded", msg, nil)
}
return true, err
}
// readChunk runs in worker goroutines to pull chunks off of the ch channel
// and send() them as UploadPart requests.
func (u *multiuploader) readChunk(ch chan chunk) {
defer u.wg.Done()
for {
data, ok := <-ch
if !ok {
break
}
if u.geterr() == nil {
if err := u.send(data); err != nil {
u.seterr(err)
}
}
}
}
// send performs an UploadPart request and keeps track of the completed
// part information.
func (u *multiuploader) send(c chunk) error {
params := &s3.UploadPartInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
Body: c.buf,
UploadId: &u.uploadID,
SSECustomerAlgorithm: u.in.SSECustomerAlgorithm,
SSECustomerKey: u.in.SSECustomerKey,
PartNumber: &c.num,
}
resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...)
c.cleanup()
if err != nil {
return err
}
n := c.num
completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n}
u.m.Lock()
u.parts = append(u.parts, completed)
u.m.Unlock()
return nil
}
// geterr is a thread-safe getter for the error object
func (u *multiuploader) geterr() error {
u.m.Lock()
defer u.m.Unlock()
return u.err
}
// seterr is a thread-safe setter for the error object
func (u *multiuploader) seterr(e error) {
u.m.Lock()
defer u.m.Unlock()
u.err = e
}
// fail will abort the multipart unless LeavePartsOnError is set to true.
func (u *multiuploader) fail() {
if u.cfg.LeavePartsOnError {
return
}
params := &s3.AbortMultipartUploadInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
UploadId: &u.uploadID,
}
_, err := u.cfg.S3.AbortMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
if err != nil {
logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err))
}
}
// complete successfully completes a multipart upload and returns the response.
func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
if u.geterr() != nil {
u.fail()
return nil
}
// Parts must be sorted in PartNumber order.
sort.Sort(u.parts)
params := &s3.CompleteMultipartUploadInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
UploadId: &u.uploadID,
MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts},
}
resp, err := u.cfg.S3.CompleteMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
if err != nil {
u.seterr(err)
u.fail()
}
return resp
}
type partPool struct {
partSize int64
sync.Pool
}
func newPartPool(partSize int64) *partPool {
p := &partPool{partSize: partSize}
p.New = func() interface{} {
return make([]byte, p.partSize)
}
return p
}

View File

@ -1,129 +0,0 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package s3manager
import (
"io"
"time"
)
// UploadInput provides the input parameters for uploading a stream or buffer
// to an object in an Amazon S3 bucket. This type is similar to the s3
// package's PutObjectInput with the exception that the Body member is an
// io.Reader instead of an io.ReadSeeker.
type UploadInput struct {
_ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
// The canned ACL to apply to the object.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
// The readable body payload to send to S3.
Body io.Reader
// Name of the bucket to which the PUT operation was initiated.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// Specifies caching behavior along the request/reply chain.
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
// Specifies presentational information for the object.
ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
// Specifies what content encodings have been applied to the object and thus
// what decoding mechanisms must be applied to obtain the media-type referenced
// by the Content-Type header field.
ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
// The language the content is in.
ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
// The base64-encoded 128-bit MD5 digest of the part data. This parameter is
// auto-populated when using the command from the CLI. This parameted is required
// if object lock parameters are specified.
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
// A standard MIME type describing the format of the object data.
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
// The date and time at which the object is no longer cacheable.
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
// Allows grantee to read the object data and its metadata.
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
// Allows grantee to read the object ACL.
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
// Allows grantee to write the ACL for the applicable object.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
// Object key for which the PUT operation was initiated.
//
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// A map of metadata to store with the object in S3.
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
// The Legal Hold status that you want to apply to the specified object.
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
// The object lock mode that you want to apply to this object.
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
// The date and time when you want this object's object lock to expire.
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
// Confirms that the requester knows that she or he will be charged for the
// request. Bucket owners need not specify this parameter in their requests.
// Documentation on downloading objects from requester pays buckets can be found
// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// Specifies the algorithm to use to when encrypting the object (e.g., AES256).
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
// data. This value is used to store the object and then it is discarded; Amazon
// does not store the encryption key. The key must be appropriate for use with
// the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure the encryption
// key was transmitted without error.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Specifies the AWS KMS Encryption Context to use for object encryption. The
// value of this header is a base64-encoded UTF-8 string holding JSON with the
// encryption context key-value pairs.
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
// Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
// requests for an object protected by AWS KMS will fail if not made via SSL
// or using SigV4. Documentation on configuring any of the officially supported
// AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
// The Server-side encryption algorithm used when storing this object in S3
// (e.g., AES256, aws:kms).
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// The type of storage to use for the object. Defaults to 'STANDARD'.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
// The tag-set for the object. The tag-set must be encoded as URL Query parameters.
// (For example, "Key1=Value1")
Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
// If the bucket is configured as a website, redirects requests for this object
// to another object in the same bucket or to an external URL. Amazon S3 stores
// the value of this header in the object metadata.
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
}

View File

@ -1,75 +0,0 @@
package s3manager
import (
"bufio"
"io"
"sync"
"github.com/aws/aws-sdk-go/internal/sdkio"
)
// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom
type WriterReadFrom interface {
io.Writer
io.ReaderFrom
}
// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer
type WriterReadFromProvider interface {
GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func())
}
type bufferedWriter interface {
WriterReadFrom
Flush() error
Reset(io.Writer)
}
type bufferedReadFrom struct {
bufferedWriter
}
func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) {
n, err := b.bufferedWriter.ReadFrom(r)
if flushErr := b.Flush(); flushErr != nil && err == nil {
err = flushErr
}
return n, err
}
// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool
// to manage allocation and reuse of *bufio.Writer structures.
type PooledBufferedReadFromProvider struct {
pool sync.Pool
}
// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider
// Size is used to control the size of the underlying *bufio.Writer created for
// calls to GetReadFrom.
func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider {
if size < int(32*sdkio.KibiByte) {
size = int(64 * sdkio.KibiByte)
}
return &PooledBufferedReadFromProvider{
pool: sync.Pool{
New: func() interface{} {
return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)}
},
},
}
}
// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom
// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom
// has been completed in order to allow the reuse of the *bufio.Writer
func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) {
buffer := p.pool.Get().(*bufferedReadFrom)
buffer.Reset(writer)
r = buffer
cleanup = func() {
buffer.Reset(nil) // Reset to nil writer to release reference
p.pool.Put(buffer)
}
return r, cleanup
}

View File

@ -704,11 +704,20 @@ func (m *Mega) addFSNode(itm FSNode) (*Node, error) {
switch {
case itm.T == FOLDER || itm.T == FILE:
args := strings.Split(itm.Key, ":")
if len(args) < 2 {
return nil, fmt.Errorf("not enough : in item.Key: %q", itm.Key)
}
itemUser, itemKey := args[0], args[1]
itemKeyParts := strings.Split(itemKey, "/")
if len(itemKeyParts) >= 2 {
itemKey = itemKeyParts[0]
// the other part is maybe a share key handle?
}
switch {
// File or folder owned by current user
case args[0] == itm.User:
buf, err := base64urldecode(args[1])
case itemUser == itm.User:
buf, err := base64urldecode(itemKey)
if err != nil {
return nil, err
}
@ -736,7 +745,7 @@ func (m *Mega) addFSNode(itm FSNode) (*Node, error) {
}
m.FS.skmap[itm.Hash] = itm.SKey
buf, err := base64urldecode(args[1])
buf, err := base64urldecode(itemKey)
if err != nil {
return nil, err
}
@ -750,7 +759,7 @@ func (m *Mega) addFSNode(itm FSNode) (*Node, error) {
}
// Shared file
default:
k := m.FS.skmap[args[0]]
k := m.FS.skmap[itemUser]
b, err := base64urldecode(k)
if err != nil {
return nil, err
@ -763,7 +772,7 @@ func (m *Mega) addFSNode(itm FSNode) (*Node, error) {
if err != nil {
return nil, err
}
buf, err := base64urldecode(args[1])
buf, err := base64urldecode(itemKey)
if err != nil {
return nil, err
}

View File

@ -1,308 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.11
// +build !gccgo,!appengine
#include "textflag.h"
#define NUM_ROUNDS 10
// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
MOVD dst+0(FP), R1
MOVD src+24(FP), R2
MOVD src_len+32(FP), R3
MOVD key+48(FP), R4
MOVD nonce+56(FP), R6
MOVD counter+64(FP), R7
MOVD $·constants(SB), R10
MOVD $·incRotMatrix(SB), R11
MOVW (R7), R20
AND $~255, R3, R13
ADD R2, R13, R12 // R12 for block end
AND $255, R3, R13
loop:
MOVD $NUM_ROUNDS, R21
VLD1 (R11), [V30.S4, V31.S4]
// load contants
// VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4]
WORD $0x4D60E940
// load keys
// VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4]
WORD $0x4DFFE884
// VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4]
WORD $0x4DFFE888
SUB $32, R4
// load counter + nonce
// VLD1R (R7), [V12.S4]
WORD $0x4D40C8EC
// VLD3R (R6), [V13.S4, V14.S4, V15.S4]
WORD $0x4D40E8CD
// update counter
VADD V30.S4, V12.S4, V12.S4
chacha:
// V0..V3 += V4..V7
// V12..V15 <<<= ((V12..V15 XOR V0..V3), 16)
VADD V0.S4, V4.S4, V0.S4
VADD V1.S4, V5.S4, V1.S4
VADD V2.S4, V6.S4, V2.S4
VADD V3.S4, V7.S4, V3.S4
VEOR V12.B16, V0.B16, V12.B16
VEOR V13.B16, V1.B16, V13.B16
VEOR V14.B16, V2.B16, V14.B16
VEOR V15.B16, V3.B16, V15.B16
VREV32 V12.H8, V12.H8
VREV32 V13.H8, V13.H8
VREV32 V14.H8, V14.H8
VREV32 V15.H8, V15.H8
// V8..V11 += V12..V15
// V4..V7 <<<= ((V4..V7 XOR V8..V11), 12)
VADD V8.S4, V12.S4, V8.S4
VADD V9.S4, V13.S4, V9.S4
VADD V10.S4, V14.S4, V10.S4
VADD V11.S4, V15.S4, V11.S4
VEOR V8.B16, V4.B16, V16.B16
VEOR V9.B16, V5.B16, V17.B16
VEOR V10.B16, V6.B16, V18.B16
VEOR V11.B16, V7.B16, V19.B16
VSHL $12, V16.S4, V4.S4
VSHL $12, V17.S4, V5.S4
VSHL $12, V18.S4, V6.S4
VSHL $12, V19.S4, V7.S4
VSRI $20, V16.S4, V4.S4
VSRI $20, V17.S4, V5.S4
VSRI $20, V18.S4, V6.S4
VSRI $20, V19.S4, V7.S4
// V0..V3 += V4..V7
// V12..V15 <<<= ((V12..V15 XOR V0..V3), 8)
VADD V0.S4, V4.S4, V0.S4
VADD V1.S4, V5.S4, V1.S4
VADD V2.S4, V6.S4, V2.S4
VADD V3.S4, V7.S4, V3.S4
VEOR V12.B16, V0.B16, V12.B16
VEOR V13.B16, V1.B16, V13.B16
VEOR V14.B16, V2.B16, V14.B16
VEOR V15.B16, V3.B16, V15.B16
VTBL V31.B16, [V12.B16], V12.B16
VTBL V31.B16, [V13.B16], V13.B16
VTBL V31.B16, [V14.B16], V14.B16
VTBL V31.B16, [V15.B16], V15.B16
// V8..V11 += V12..V15
// V4..V7 <<<= ((V4..V7 XOR V8..V11), 7)
VADD V12.S4, V8.S4, V8.S4
VADD V13.S4, V9.S4, V9.S4
VADD V14.S4, V10.S4, V10.S4
VADD V15.S4, V11.S4, V11.S4
VEOR V8.B16, V4.B16, V16.B16
VEOR V9.B16, V5.B16, V17.B16
VEOR V10.B16, V6.B16, V18.B16
VEOR V11.B16, V7.B16, V19.B16
VSHL $7, V16.S4, V4.S4
VSHL $7, V17.S4, V5.S4
VSHL $7, V18.S4, V6.S4
VSHL $7, V19.S4, V7.S4
VSRI $25, V16.S4, V4.S4
VSRI $25, V17.S4, V5.S4
VSRI $25, V18.S4, V6.S4
VSRI $25, V19.S4, V7.S4
// V0..V3 += V5..V7, V4
// V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16)
VADD V0.S4, V5.S4, V0.S4
VADD V1.S4, V6.S4, V1.S4
VADD V2.S4, V7.S4, V2.S4
VADD V3.S4, V4.S4, V3.S4
VEOR V15.B16, V0.B16, V15.B16
VEOR V12.B16, V1.B16, V12.B16
VEOR V13.B16, V2.B16, V13.B16
VEOR V14.B16, V3.B16, V14.B16
VREV32 V12.H8, V12.H8
VREV32 V13.H8, V13.H8
VREV32 V14.H8, V14.H8
VREV32 V15.H8, V15.H8
// V10 += V15; V5 <<<= ((V10 XOR V5), 12)
// ...
VADD V15.S4, V10.S4, V10.S4
VADD V12.S4, V11.S4, V11.S4
VADD V13.S4, V8.S4, V8.S4
VADD V14.S4, V9.S4, V9.S4
VEOR V10.B16, V5.B16, V16.B16
VEOR V11.B16, V6.B16, V17.B16
VEOR V8.B16, V7.B16, V18.B16
VEOR V9.B16, V4.B16, V19.B16
VSHL $12, V16.S4, V5.S4
VSHL $12, V17.S4, V6.S4
VSHL $12, V18.S4, V7.S4
VSHL $12, V19.S4, V4.S4
VSRI $20, V16.S4, V5.S4
VSRI $20, V17.S4, V6.S4
VSRI $20, V18.S4, V7.S4
VSRI $20, V19.S4, V4.S4
// V0 += V5; V15 <<<= ((V0 XOR V15), 8)
// ...
VADD V5.S4, V0.S4, V0.S4
VADD V6.S4, V1.S4, V1.S4
VADD V7.S4, V2.S4, V2.S4
VADD V4.S4, V3.S4, V3.S4
VEOR V0.B16, V15.B16, V15.B16
VEOR V1.B16, V12.B16, V12.B16
VEOR V2.B16, V13.B16, V13.B16
VEOR V3.B16, V14.B16, V14.B16
VTBL V31.B16, [V12.B16], V12.B16
VTBL V31.B16, [V13.B16], V13.B16
VTBL V31.B16, [V14.B16], V14.B16
VTBL V31.B16, [V15.B16], V15.B16
// V10 += V15; V5 <<<= ((V10 XOR V5), 7)
// ...
VADD V15.S4, V10.S4, V10.S4
VADD V12.S4, V11.S4, V11.S4
VADD V13.S4, V8.S4, V8.S4
VADD V14.S4, V9.S4, V9.S4
VEOR V10.B16, V5.B16, V16.B16
VEOR V11.B16, V6.B16, V17.B16
VEOR V8.B16, V7.B16, V18.B16
VEOR V9.B16, V4.B16, V19.B16
VSHL $7, V16.S4, V5.S4
VSHL $7, V17.S4, V6.S4
VSHL $7, V18.S4, V7.S4
VSHL $7, V19.S4, V4.S4
VSRI $25, V16.S4, V5.S4
VSRI $25, V17.S4, V6.S4
VSRI $25, V18.S4, V7.S4
VSRI $25, V19.S4, V4.S4
SUB $1, R21
CBNZ R21, chacha
// VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4]
WORD $0x4D60E950
// VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4]
WORD $0x4DFFE894
VADD V30.S4, V12.S4, V12.S4
VADD V16.S4, V0.S4, V0.S4
VADD V17.S4, V1.S4, V1.S4
VADD V18.S4, V2.S4, V2.S4
VADD V19.S4, V3.S4, V3.S4
// VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4]
WORD $0x4DFFE898
// restore R4
SUB $32, R4
// load counter + nonce
// VLD1R (R7), [V28.S4]
WORD $0x4D40C8FC
// VLD3R (R6), [V29.S4, V30.S4, V31.S4]
WORD $0x4D40E8DD
VADD V20.S4, V4.S4, V4.S4
VADD V21.S4, V5.S4, V5.S4
VADD V22.S4, V6.S4, V6.S4
VADD V23.S4, V7.S4, V7.S4
VADD V24.S4, V8.S4, V8.S4
VADD V25.S4, V9.S4, V9.S4
VADD V26.S4, V10.S4, V10.S4
VADD V27.S4, V11.S4, V11.S4
VADD V28.S4, V12.S4, V12.S4
VADD V29.S4, V13.S4, V13.S4
VADD V30.S4, V14.S4, V14.S4
VADD V31.S4, V15.S4, V15.S4
VZIP1 V1.S4, V0.S4, V16.S4
VZIP2 V1.S4, V0.S4, V17.S4
VZIP1 V3.S4, V2.S4, V18.S4
VZIP2 V3.S4, V2.S4, V19.S4
VZIP1 V5.S4, V4.S4, V20.S4
VZIP2 V5.S4, V4.S4, V21.S4
VZIP1 V7.S4, V6.S4, V22.S4
VZIP2 V7.S4, V6.S4, V23.S4
VZIP1 V9.S4, V8.S4, V24.S4
VZIP2 V9.S4, V8.S4, V25.S4
VZIP1 V11.S4, V10.S4, V26.S4
VZIP2 V11.S4, V10.S4, V27.S4
VZIP1 V13.S4, V12.S4, V28.S4
VZIP2 V13.S4, V12.S4, V29.S4
VZIP1 V15.S4, V14.S4, V30.S4
VZIP2 V15.S4, V14.S4, V31.S4
VZIP1 V18.D2, V16.D2, V0.D2
VZIP2 V18.D2, V16.D2, V4.D2
VZIP1 V19.D2, V17.D2, V8.D2
VZIP2 V19.D2, V17.D2, V12.D2
VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16]
VZIP1 V22.D2, V20.D2, V1.D2
VZIP2 V22.D2, V20.D2, V5.D2
VZIP1 V23.D2, V21.D2, V9.D2
VZIP2 V23.D2, V21.D2, V13.D2
VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16]
VZIP1 V26.D2, V24.D2, V2.D2
VZIP2 V26.D2, V24.D2, V6.D2
VZIP1 V27.D2, V25.D2, V10.D2
VZIP2 V27.D2, V25.D2, V14.D2
VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16]
VZIP1 V30.D2, V28.D2, V3.D2
VZIP2 V30.D2, V28.D2, V7.D2
VZIP1 V31.D2, V29.D2, V11.D2
VZIP2 V31.D2, V29.D2, V15.D2
VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16]
VEOR V0.B16, V16.B16, V16.B16
VEOR V1.B16, V17.B16, V17.B16
VEOR V2.B16, V18.B16, V18.B16
VEOR V3.B16, V19.B16, V19.B16
VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1)
VEOR V4.B16, V20.B16, V20.B16
VEOR V5.B16, V21.B16, V21.B16
VEOR V6.B16, V22.B16, V22.B16
VEOR V7.B16, V23.B16, V23.B16
VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1)
VEOR V8.B16, V24.B16, V24.B16
VEOR V9.B16, V25.B16, V25.B16
VEOR V10.B16, V26.B16, V26.B16
VEOR V11.B16, V27.B16, V27.B16
VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1)
VEOR V12.B16, V28.B16, V28.B16
VEOR V13.B16, V29.B16, V29.B16
VEOR V14.B16, V30.B16, V30.B16
VEOR V15.B16, V31.B16, V31.B16
VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1)
ADD $4, R20
MOVW R20, (R7) // update counter
CMP R2, R12
BGT loop
RET
DATA ·constants+0x00(SB)/4, $0x61707865
DATA ·constants+0x04(SB)/4, $0x3320646e
DATA ·constants+0x08(SB)/4, $0x79622d32
DATA ·constants+0x0c(SB)/4, $0x6b206574
GLOBL ·constants(SB), NOPTR|RODATA, $32
DATA ·incRotMatrix+0x00(SB)/4, $0x00000000
DATA ·incRotMatrix+0x04(SB)/4, $0x00000001
DATA ·incRotMatrix+0x08(SB)/4, $0x00000002
DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003
DATA ·incRotMatrix+0x10(SB)/4, $0x02010003
DATA ·incRotMatrix+0x14(SB)/4, $0x06050407
DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B
DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F
GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32

View File

@ -1,465 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on CRYPTOGAMS code with the following comment:
// # ====================================================================
// # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// # project. The module is, however, dual licensed under OpenSSL and
// # CRYPTOGAMS licenses depending on where you obtain it. For further
// # details see http://www.openssl.org/~appro/cryptogams/.
// # ====================================================================
// Code for the perl script that generates the ppc64 assembler
// can be found in the cryptogams repository at the link below. It is based on
// the original from openssl.
// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91
// The differences in this and the original implementation are
// due to the calling conventions and initialization of constants.
// +build ppc64le,!gccgo,!appengine
#include "textflag.h"
#define OUT R3
#define INP R4
#define LEN R5
#define KEY R6
#define CNT R7
#define TMP R15
#define CONSTBASE R16
#define X0 R11
#define X1 R12
#define X2 R14
#define X3 R15
#define X4 R16
#define X5 R17
#define X6 R18
#define X7 R19
#define X8 R20
#define X9 R21
#define X10 R22
#define X11 R23
#define X12 R24
#define X13 R25
#define X14 R26
#define X15 R27
DATA consts<>+0x00(SB)/8, $0x3320646e61707865
DATA consts<>+0x08(SB)/8, $0x6b20657479622d32
DATA consts<>+0x10(SB)/8, $0x0000000000000001
DATA consts<>+0x18(SB)/8, $0x0000000000000000
DATA consts<>+0x20(SB)/8, $0x0000000000000004
DATA consts<>+0x28(SB)/8, $0x0000000000000000
DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d
DATA consts<>+0x38(SB)/8, $0x0203000106070405
DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c
DATA consts<>+0x48(SB)/8, $0x0102030005060704
DATA consts<>+0x50(SB)/8, $0x6170786561707865
DATA consts<>+0x58(SB)/8, $0x6170786561707865
DATA consts<>+0x60(SB)/8, $0x3320646e3320646e
DATA consts<>+0x68(SB)/8, $0x3320646e3320646e
DATA consts<>+0x70(SB)/8, $0x79622d3279622d32
DATA consts<>+0x78(SB)/8, $0x79622d3279622d32
DATA consts<>+0x80(SB)/8, $0x6b2065746b206574
DATA consts<>+0x88(SB)/8, $0x6b2065746b206574
DATA consts<>+0x90(SB)/8, $0x0000000100000000
DATA consts<>+0x98(SB)/8, $0x0000000300000002
GLOBL consts<>(SB), RODATA, $0xa0
//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
MOVD out+0(FP), OUT
MOVD inp+8(FP), INP
MOVD len+16(FP), LEN
MOVD key+24(FP), KEY
MOVD counter+32(FP), CNT
// Addressing for constants
MOVD $consts<>+0x00(SB), CONSTBASE
MOVD $16, R8
MOVD $32, R9
MOVD $48, R10
MOVD $64, R11
// V16
LXVW4X (CONSTBASE)(R0), VS48
ADD $80,CONSTBASE
// Load key into V17,V18
LXVW4X (KEY)(R0), VS49
LXVW4X (KEY)(R8), VS50
// Load CNT, NONCE into V19
LXVW4X (CNT)(R0), VS51
// Clear V27
VXOR V27, V27, V27
// V28
LXVW4X (CONSTBASE)(R11), VS60
// splat slot from V19 -> V26
VSPLTW $0, V19, V26
VSLDOI $4, V19, V27, V19
VSLDOI $12, V27, V19, V19
VADDUWM V26, V28, V26
MOVD $10, R14
MOVD R14, CTR
loop_outer_vsx:
// V0, V1, V2, V3
LXVW4X (R0)(CONSTBASE), VS32
LXVW4X (R8)(CONSTBASE), VS33
LXVW4X (R9)(CONSTBASE), VS34
LXVW4X (R10)(CONSTBASE), VS35
// splat values from V17, V18 into V4-V11
VSPLTW $0, V17, V4
VSPLTW $1, V17, V5
VSPLTW $2, V17, V6
VSPLTW $3, V17, V7
VSPLTW $0, V18, V8
VSPLTW $1, V18, V9
VSPLTW $2, V18, V10
VSPLTW $3, V18, V11
// VOR
VOR V26, V26, V12
// splat values from V19 -> V13, V14, V15
VSPLTW $1, V19, V13
VSPLTW $2, V19, V14
VSPLTW $3, V19, V15
// splat const values
VSPLTISW $-16, V27
VSPLTISW $12, V28
VSPLTISW $8, V29
VSPLTISW $7, V30
loop_vsx:
VADDUWM V0, V4, V0
VADDUWM V1, V5, V1
VADDUWM V2, V6, V2
VADDUWM V3, V7, V3
VXOR V12, V0, V12
VXOR V13, V1, V13
VXOR V14, V2, V14
VXOR V15, V3, V15
VRLW V12, V27, V12
VRLW V13, V27, V13
VRLW V14, V27, V14
VRLW V15, V27, V15
VADDUWM V8, V12, V8
VADDUWM V9, V13, V9
VADDUWM V10, V14, V10
VADDUWM V11, V15, V11
VXOR V4, V8, V4
VXOR V5, V9, V5
VXOR V6, V10, V6
VXOR V7, V11, V7
VRLW V4, V28, V4
VRLW V5, V28, V5
VRLW V6, V28, V6
VRLW V7, V28, V7
VADDUWM V0, V4, V0
VADDUWM V1, V5, V1
VADDUWM V2, V6, V2
VADDUWM V3, V7, V3
VXOR V12, V0, V12
VXOR V13, V1, V13
VXOR V14, V2, V14
VXOR V15, V3, V15
VRLW V12, V29, V12
VRLW V13, V29, V13
VRLW V14, V29, V14
VRLW V15, V29, V15
VADDUWM V8, V12, V8
VADDUWM V9, V13, V9
VADDUWM V10, V14, V10
VADDUWM V11, V15, V11
VXOR V4, V8, V4
VXOR V5, V9, V5
VXOR V6, V10, V6
VXOR V7, V11, V7
VRLW V4, V30, V4
VRLW V5, V30, V5
VRLW V6, V30, V6
VRLW V7, V30, V7
VADDUWM V0, V5, V0
VADDUWM V1, V6, V1
VADDUWM V2, V7, V2
VADDUWM V3, V4, V3
VXOR V15, V0, V15
VXOR V12, V1, V12
VXOR V13, V2, V13
VXOR V14, V3, V14
VRLW V15, V27, V15
VRLW V12, V27, V12
VRLW V13, V27, V13
VRLW V14, V27, V14
VADDUWM V10, V15, V10
VADDUWM V11, V12, V11
VADDUWM V8, V13, V8
VADDUWM V9, V14, V9
VXOR V5, V10, V5
VXOR V6, V11, V6
VXOR V7, V8, V7
VXOR V4, V9, V4
VRLW V5, V28, V5
VRLW V6, V28, V6
VRLW V7, V28, V7
VRLW V4, V28, V4
VADDUWM V0, V5, V0
VADDUWM V1, V6, V1
VADDUWM V2, V7, V2
VADDUWM V3, V4, V3
VXOR V15, V0, V15
VXOR V12, V1, V12
VXOR V13, V2, V13
VXOR V14, V3, V14
VRLW V15, V29, V15
VRLW V12, V29, V12
VRLW V13, V29, V13
VRLW V14, V29, V14
VADDUWM V10, V15, V10
VADDUWM V11, V12, V11
VADDUWM V8, V13, V8
VADDUWM V9, V14, V9
VXOR V5, V10, V5
VXOR V6, V11, V6
VXOR V7, V8, V7
VXOR V4, V9, V4
VRLW V5, V30, V5
VRLW V6, V30, V6
VRLW V7, V30, V7
VRLW V4, V30, V4
BC 16, LT, loop_vsx
VADDUWM V12, V26, V12
WORD $0x13600F8C // VMRGEW V0, V1, V27
WORD $0x13821F8C // VMRGEW V2, V3, V28
WORD $0x10000E8C // VMRGOW V0, V1, V0
WORD $0x10421E8C // VMRGOW V2, V3, V2
WORD $0x13A42F8C // VMRGEW V4, V5, V29
WORD $0x13C63F8C // VMRGEW V6, V7, V30
XXPERMDI VS32, VS34, $0, VS33
XXPERMDI VS32, VS34, $3, VS35
XXPERMDI VS59, VS60, $0, VS32
XXPERMDI VS59, VS60, $3, VS34
WORD $0x10842E8C // VMRGOW V4, V5, V4
WORD $0x10C63E8C // VMRGOW V6, V7, V6
WORD $0x13684F8C // VMRGEW V8, V9, V27
WORD $0x138A5F8C // VMRGEW V10, V11, V28
XXPERMDI VS36, VS38, $0, VS37
XXPERMDI VS36, VS38, $3, VS39
XXPERMDI VS61, VS62, $0, VS36
XXPERMDI VS61, VS62, $3, VS38
WORD $0x11084E8C // VMRGOW V8, V9, V8
WORD $0x114A5E8C // VMRGOW V10, V11, V10
WORD $0x13AC6F8C // VMRGEW V12, V13, V29
WORD $0x13CE7F8C // VMRGEW V14, V15, V30
XXPERMDI VS40, VS42, $0, VS41
XXPERMDI VS40, VS42, $3, VS43
XXPERMDI VS59, VS60, $0, VS40
XXPERMDI VS59, VS60, $3, VS42
WORD $0x118C6E8C // VMRGOW V12, V13, V12
WORD $0x11CE7E8C // VMRGOW V14, V15, V14
VSPLTISW $4, V27
VADDUWM V26, V27, V26
XXPERMDI VS44, VS46, $0, VS45
XXPERMDI VS44, VS46, $3, VS47
XXPERMDI VS61, VS62, $0, VS44
XXPERMDI VS61, VS62, $3, VS46
VADDUWM V0, V16, V0
VADDUWM V4, V17, V4
VADDUWM V8, V18, V8
VADDUWM V12, V19, V12
CMPU LEN, $64
BLT tail_vsx
// Bottom of loop
LXVW4X (INP)(R0), VS59
LXVW4X (INP)(R8), VS60
LXVW4X (INP)(R9), VS61
LXVW4X (INP)(R10), VS62
VXOR V27, V0, V27
VXOR V28, V4, V28
VXOR V29, V8, V29
VXOR V30, V12, V30
STXVW4X VS59, (OUT)(R0)
STXVW4X VS60, (OUT)(R8)
ADD $64, INP
STXVW4X VS61, (OUT)(R9)
ADD $-64, LEN
STXVW4X VS62, (OUT)(R10)
ADD $64, OUT
BEQ done_vsx
VADDUWM V1, V16, V0
VADDUWM V5, V17, V4
VADDUWM V9, V18, V8
VADDUWM V13, V19, V12
CMPU LEN, $64
BLT tail_vsx
LXVW4X (INP)(R0), VS59
LXVW4X (INP)(R8), VS60
LXVW4X (INP)(R9), VS61
LXVW4X (INP)(R10), VS62
VXOR V27, V0, V27
VXOR V28, V4, V28
VXOR V29, V8, V29
VXOR V30, V12, V30
STXVW4X VS59, (OUT)(R0)
STXVW4X VS60, (OUT)(R8)
ADD $64, INP
STXVW4X VS61, (OUT)(R9)
ADD $-64, LEN
STXVW4X VS62, (OUT)(V10)
ADD $64, OUT
BEQ done_vsx
VADDUWM V2, V16, V0
VADDUWM V6, V17, V4
VADDUWM V10, V18, V8
VADDUWM V14, V19, V12
CMPU LEN, $64
BLT tail_vsx
LXVW4X (INP)(R0), VS59
LXVW4X (INP)(R8), VS60
LXVW4X (INP)(R9), VS61
LXVW4X (INP)(R10), VS62
VXOR V27, V0, V27
VXOR V28, V4, V28
VXOR V29, V8, V29
VXOR V30, V12, V30
STXVW4X VS59, (OUT)(R0)
STXVW4X VS60, (OUT)(R8)
ADD $64, INP
STXVW4X VS61, (OUT)(R9)
ADD $-64, LEN
STXVW4X VS62, (OUT)(R10)
ADD $64, OUT
BEQ done_vsx
VADDUWM V3, V16, V0
VADDUWM V7, V17, V4
VADDUWM V11, V18, V8
VADDUWM V15, V19, V12
CMPU LEN, $64
BLT tail_vsx
LXVW4X (INP)(R0), VS59
LXVW4X (INP)(R8), VS60
LXVW4X (INP)(R9), VS61
LXVW4X (INP)(R10), VS62
VXOR V27, V0, V27
VXOR V28, V4, V28
VXOR V29, V8, V29
VXOR V30, V12, V30
STXVW4X VS59, (OUT)(R0)
STXVW4X VS60, (OUT)(R8)
ADD $64, INP
STXVW4X VS61, (OUT)(R9)
ADD $-64, LEN
STXVW4X VS62, (OUT)(R10)
ADD $64, OUT
MOVD $10, R14
MOVD R14, CTR
BNE loop_outer_vsx
done_vsx:
// Increment counter by 4
MOVD (CNT), R14
ADD $4, R14
MOVD R14, (CNT)
RET
tail_vsx:
ADD $32, R1, R11
MOVD LEN, CTR
// Save values on stack to copy from
STXVW4X VS32, (R11)(R0)
STXVW4X VS36, (R11)(R8)
STXVW4X VS40, (R11)(R9)
STXVW4X VS44, (R11)(R10)
ADD $-1, R11, R12
ADD $-1, INP
ADD $-1, OUT
looptail_vsx:
// Copying the result to OUT
// in bytes.
MOVBZU 1(R12), KEY
MOVBZU 1(INP), TMP
XOR KEY, TMP, KEY
MOVBU KEY, 1(OUT)
BC 16, LT, looptail_vsx
// Clear the stack values
STXVW4X VS48, (R11)(R0)
STXVW4X VS48, (R11)(R8)
STXVW4X VS48, (R11)(R9)
STXVW4X VS48, (R11)(R10)
BR done_vsx

View File

@ -1,31 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.11
// +build !gccgo
package chacha20
const (
haveAsm = true
bufSize = 256
)
//go:noescape
func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
func (c *Cipher) xorKeyStreamAsm(dst, src []byte) {
if len(src) >= bufSize {
xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter)
}
if len(src)%bufSize != 0 {
i := len(src) - len(src)%bufSize
c.buf = [bufSize]byte{}
copy(c.buf[:], src[i:])
xorKeyStreamVX(c.buf[:], c.buf[:], &c.key, &c.nonce, &c.counter)
c.len = bufSize - copy(dst[i:], c.buf[:len(src)%bufSize])
}
}

View File

@ -1,264 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ChaCha20 implements the core ChaCha20 function as specified
// in https://tools.ietf.org/html/rfc7539#section-2.3.
package chacha20
import (
"crypto/cipher"
"encoding/binary"
"golang.org/x/crypto/internal/subtle"
)
// assert that *Cipher implements cipher.Stream
var _ cipher.Stream = (*Cipher)(nil)
// Cipher is a stateful instance of ChaCha20 using a particular key
// and nonce. A *Cipher implements the cipher.Stream interface.
type Cipher struct {
key [8]uint32
counter uint32 // incremented after each block
nonce [3]uint32
buf [bufSize]byte // buffer for unused keystream bytes
len int // number of unused keystream bytes at end of buf
}
// New creates a new ChaCha20 stream cipher with the given key and nonce.
// The initial counter value is set to 0.
func New(key [8]uint32, nonce [3]uint32) *Cipher {
return &Cipher{key: key, nonce: nonce}
}
// ChaCha20 constants spelling "expand 32-byte k"
const (
j0 uint32 = 0x61707865
j1 uint32 = 0x3320646e
j2 uint32 = 0x79622d32
j3 uint32 = 0x6b206574
)
func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
a += b
d ^= a
d = (d << 16) | (d >> 16)
c += d
b ^= c
b = (b << 12) | (b >> 20)
a += b
d ^= a
d = (d << 8) | (d >> 24)
c += d
b ^= c
b = (b << 7) | (b >> 25)
return a, b, c, d
}
// XORKeyStream XORs each byte in the given slice with a byte from the
// cipher's key stream. Dst and src must overlap entirely or not at all.
//
// If len(dst) < len(src), XORKeyStream will panic. It is acceptable
// to pass a dst bigger than src, and in that case, XORKeyStream will
// only update dst[:len(src)] and will not touch the rest of dst.
//
// Multiple calls to XORKeyStream behave as if the concatenation of
// the src buffers was passed in a single run. That is, Cipher
// maintains state and does not reset at each XORKeyStream call.
func (s *Cipher) XORKeyStream(dst, src []byte) {
if len(dst) < len(src) {
panic("chacha20: output smaller than input")
}
if subtle.InexactOverlap(dst[:len(src)], src) {
panic("chacha20: invalid buffer overlap")
}
// xor src with buffered keystream first
if s.len != 0 {
buf := s.buf[len(s.buf)-s.len:]
if len(src) < len(buf) {
buf = buf[:len(src)]
}
td, ts := dst[:len(buf)], src[:len(buf)] // BCE hint
for i, b := range buf {
td[i] = ts[i] ^ b
}
s.len -= len(buf)
if s.len != 0 {
return
}
s.buf = [len(s.buf)]byte{} // zero the empty buffer
src = src[len(buf):]
dst = dst[len(buf):]
}
if len(src) == 0 {
return
}
if haveAsm {
if uint64(len(src))+uint64(s.counter)*64 > (1<<38)-64 {
panic("chacha20: counter overflow")
}
s.xorKeyStreamAsm(dst, src)
return
}
// set up a 64-byte buffer to pad out the final block if needed
// (hoisted out of the main loop to avoid spills)
rem := len(src) % 64 // length of final block
fin := len(src) - rem // index of final block
if rem > 0 {
copy(s.buf[len(s.buf)-64:], src[fin:])
}
// pre-calculate most of the first round
s1, s5, s9, s13 := quarterRound(j1, s.key[1], s.key[5], s.nonce[0])
s2, s6, s10, s14 := quarterRound(j2, s.key[2], s.key[6], s.nonce[1])
s3, s7, s11, s15 := quarterRound(j3, s.key[3], s.key[7], s.nonce[2])
n := len(src)
src, dst = src[:n:n], dst[:n:n] // BCE hint
for i := 0; i < n; i += 64 {
// calculate the remainder of the first round
s0, s4, s8, s12 := quarterRound(j0, s.key[0], s.key[4], s.counter)
// execute the second round
x0, x5, x10, x15 := quarterRound(s0, s5, s10, s15)
x1, x6, x11, x12 := quarterRound(s1, s6, s11, s12)
x2, x7, x8, x13 := quarterRound(s2, s7, s8, s13)
x3, x4, x9, x14 := quarterRound(s3, s4, s9, s14)
// execute the remaining 18 rounds
for i := 0; i < 9; i++ {
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
}
x0 += j0
x1 += j1
x2 += j2
x3 += j3
x4 += s.key[0]
x5 += s.key[1]
x6 += s.key[2]
x7 += s.key[3]
x8 += s.key[4]
x9 += s.key[5]
x10 += s.key[6]
x11 += s.key[7]
x12 += s.counter
x13 += s.nonce[0]
x14 += s.nonce[1]
x15 += s.nonce[2]
// increment the counter
s.counter += 1
if s.counter == 0 {
panic("chacha20: counter overflow")
}
// pad to 64 bytes if needed
in, out := src[i:], dst[i:]
if i == fin {
// src[fin:] has already been copied into s.buf before
// the main loop
in, out = s.buf[len(s.buf)-64:], s.buf[len(s.buf)-64:]
}
in, out = in[:64], out[:64] // BCE hint
// XOR the key stream with the source and write out the result
xor(out[0:], in[0:], x0)
xor(out[4:], in[4:], x1)
xor(out[8:], in[8:], x2)
xor(out[12:], in[12:], x3)
xor(out[16:], in[16:], x4)
xor(out[20:], in[20:], x5)
xor(out[24:], in[24:], x6)
xor(out[28:], in[28:], x7)
xor(out[32:], in[32:], x8)
xor(out[36:], in[36:], x9)
xor(out[40:], in[40:], x10)
xor(out[44:], in[44:], x11)
xor(out[48:], in[48:], x12)
xor(out[52:], in[52:], x13)
xor(out[56:], in[56:], x14)
xor(out[60:], in[60:], x15)
}
// copy any trailing bytes out of the buffer and into dst
if rem != 0 {
s.len = 64 - rem
copy(dst[fin:], s.buf[len(s.buf)-64:])
}
}
// Advance discards bytes in the key stream until the next 64 byte block
// boundary is reached and updates the counter accordingly. If the key
// stream is already at a block boundary no bytes will be discarded and
// the counter will be unchanged.
func (s *Cipher) Advance() {
s.len -= s.len % 64
if s.len == 0 {
s.buf = [len(s.buf)]byte{}
}
}
// XORKeyStream crypts bytes from in to out using the given key and counters.
// In and out must overlap entirely or not at all. Counter contains the raw
// ChaCha20 counter bytes (i.e. block counter followed by nonce).
func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
s := Cipher{
key: [8]uint32{
binary.LittleEndian.Uint32(key[0:4]),
binary.LittleEndian.Uint32(key[4:8]),
binary.LittleEndian.Uint32(key[8:12]),
binary.LittleEndian.Uint32(key[12:16]),
binary.LittleEndian.Uint32(key[16:20]),
binary.LittleEndian.Uint32(key[20:24]),
binary.LittleEndian.Uint32(key[24:28]),
binary.LittleEndian.Uint32(key[28:32]),
},
nonce: [3]uint32{
binary.LittleEndian.Uint32(counter[4:8]),
binary.LittleEndian.Uint32(counter[8:12]),
binary.LittleEndian.Uint32(counter[12:16]),
},
counter: binary.LittleEndian.Uint32(counter[0:4]),
}
s.XORKeyStream(out, in)
}
// HChaCha20 uses the ChaCha20 core to generate a derived key from a key and a
// nonce. It should only be used as part of the XChaCha20 construction.
func HChaCha20(key *[8]uint32, nonce *[4]uint32) [8]uint32 {
x0, x1, x2, x3 := j0, j1, j2, j3
x4, x5, x6, x7 := key[0], key[1], key[2], key[3]
x8, x9, x10, x11 := key[4], key[5], key[6], key[7]
x12, x13, x14, x15 := nonce[0], nonce[1], nonce[2], nonce[3]
for i := 0; i < 10; i++ {
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
}
var out [8]uint32
out[0], out[1], out[2], out[3] = x0, x1, x2, x3
out[4], out[5], out[6], out[7] = x12, x13, x14, x15
return out
}

View File

@ -1,16 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !arm64,!s390x,!ppc64le arm64,!go1.11 gccgo appengine
package chacha20
const (
bufSize = 64
haveAsm = false
)
func (*Cipher) xorKeyStreamAsm(dst, src []byte) {
panic("not implemented")
}

View File

@ -1,54 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ppc64le,!gccgo,!appengine
package chacha20
import (
"encoding/binary"
)
const (
bufSize = 256
haveAsm = true
)
//go:noescape
func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
func (c *Cipher) xorKeyStreamAsm(dst, src []byte) {
// This implementation can handle buffers that aren't multiples of
// 256.
if len(src) >= bufSize {
chaCha20_ctr32_vsx(&dst[0], &src[0], len(src), &c.key, &c.counter)
} else if len(src)%bufSize != 0 {
chaCha20_ctr32_vsx(&c.buf[0], &c.buf[0], bufSize, &c.key, &c.counter)
start := len(src) - len(src)%bufSize
ts, td, tb := src[start:], dst[start:], c.buf[:]
// Unroll loop to XOR 32 bytes per iteration.
for i := 0; i < len(ts)-32; i += 32 {
td, tb = td[:len(ts)], tb[:len(ts)] // bounds check elimination
s0 := binary.LittleEndian.Uint64(ts[0:8])
s1 := binary.LittleEndian.Uint64(ts[8:16])
s2 := binary.LittleEndian.Uint64(ts[16:24])
s3 := binary.LittleEndian.Uint64(ts[24:32])
b0 := binary.LittleEndian.Uint64(tb[0:8])
b1 := binary.LittleEndian.Uint64(tb[8:16])
b2 := binary.LittleEndian.Uint64(tb[16:24])
b3 := binary.LittleEndian.Uint64(tb[24:32])
binary.LittleEndian.PutUint64(td[0:8], s0^b0)
binary.LittleEndian.PutUint64(td[8:16], s1^b1)
binary.LittleEndian.PutUint64(td[16:24], s2^b2)
binary.LittleEndian.PutUint64(td[24:32], s3^b3)
ts, td, tb = ts[32:], td[32:], tb[32:]
}
td, tb = td[:len(ts)], tb[:len(ts)] // bounds check elimination
for i, v := range ts {
td[i] = tb[i] ^ v
}
c.len = bufSize - (len(src) % bufSize)
}
}

View File

@ -1,29 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,!gccgo,!appengine
package chacha20
import (
"golang.org/x/sys/cpu"
)
var haveAsm = cpu.S390X.HasVX
const bufSize = 256
// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only
// be called when the vector facility is available.
// Implementation in asm_s390x.s.
//go:noescape
func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int)
func (c *Cipher) xorKeyStreamAsm(dst, src []byte) {
xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter, &c.buf, &c.len)
}
// EXRL targets, DO NOT CALL!
func mvcSrcToBuf()
func mvcBufToDst()

View File

@ -1,260 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,!gccgo,!appengine
#include "go_asm.h"
#include "textflag.h"
// This is an implementation of the ChaCha20 encryption algorithm as
// specified in RFC 7539. It uses vector instructions to compute
// 4 keystream blocks in parallel (256 bytes) which are then XORed
// with the bytes in the input slice.
GLOBL ·constants<>(SB), RODATA|NOPTR, $32
// BSWAP: swap bytes in each 4-byte element
DATA ·constants<>+0x00(SB)/4, $0x03020100
DATA ·constants<>+0x04(SB)/4, $0x07060504
DATA ·constants<>+0x08(SB)/4, $0x0b0a0908
DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c
// J0: [j0, j1, j2, j3]
DATA ·constants<>+0x10(SB)/4, $0x61707865
DATA ·constants<>+0x14(SB)/4, $0x3320646e
DATA ·constants<>+0x18(SB)/4, $0x79622d32
DATA ·constants<>+0x1c(SB)/4, $0x6b206574
// EXRL targets:
TEXT ·mvcSrcToBuf(SB), NOFRAME|NOSPLIT, $0
MVC $1, (R1), (R8)
RET
TEXT ·mvcBufToDst(SB), NOFRAME|NOSPLIT, $0
MVC $1, (R8), (R9)
RET
#define BSWAP V5
#define J0 V6
#define KEY0 V7
#define KEY1 V8
#define NONCE V9
#define CTR V10
#define M0 V11
#define M1 V12
#define M2 V13
#define M3 V14
#define INC V15
#define X0 V16
#define X1 V17
#define X2 V18
#define X3 V19
#define X4 V20
#define X5 V21
#define X6 V22
#define X7 V23
#define X8 V24
#define X9 V25
#define X10 V26
#define X11 V27
#define X12 V28
#define X13 V29
#define X14 V30
#define X15 V31
#define NUM_ROUNDS 20
#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \
VAF a1, a0, a0 \
VAF b1, b0, b0 \
VAF c1, c0, c0 \
VAF d1, d0, d0 \
VX a0, a2, a2 \
VX b0, b2, b2 \
VX c0, c2, c2 \
VX d0, d2, d2 \
VERLLF $16, a2, a2 \
VERLLF $16, b2, b2 \
VERLLF $16, c2, c2 \
VERLLF $16, d2, d2 \
VAF a2, a3, a3 \
VAF b2, b3, b3 \
VAF c2, c3, c3 \
VAF d2, d3, d3 \
VX a3, a1, a1 \
VX b3, b1, b1 \
VX c3, c1, c1 \
VX d3, d1, d1 \
VERLLF $12, a1, a1 \
VERLLF $12, b1, b1 \
VERLLF $12, c1, c1 \
VERLLF $12, d1, d1 \
VAF a1, a0, a0 \
VAF b1, b0, b0 \
VAF c1, c0, c0 \
VAF d1, d0, d0 \
VX a0, a2, a2 \
VX b0, b2, b2 \
VX c0, c2, c2 \
VX d0, d2, d2 \
VERLLF $8, a2, a2 \
VERLLF $8, b2, b2 \
VERLLF $8, c2, c2 \
VERLLF $8, d2, d2 \
VAF a2, a3, a3 \
VAF b2, b3, b3 \
VAF c2, c3, c3 \
VAF d2, d3, d3 \
VX a3, a1, a1 \
VX b3, b1, b1 \
VX c3, c1, c1 \
VX d3, d1, d1 \
VERLLF $7, a1, a1 \
VERLLF $7, b1, b1 \
VERLLF $7, c1, c1 \
VERLLF $7, d1, d1
#define PERMUTE(mask, v0, v1, v2, v3) \
VPERM v0, v0, mask, v0 \
VPERM v1, v1, mask, v1 \
VPERM v2, v2, mask, v2 \
VPERM v3, v3, mask, v3
#define ADDV(x, v0, v1, v2, v3) \
VAF x, v0, v0 \
VAF x, v1, v1 \
VAF x, v2, v2 \
VAF x, v3, v3
#define XORV(off, dst, src, v0, v1, v2, v3) \
VLM off(src), M0, M3 \
PERMUTE(BSWAP, v0, v1, v2, v3) \
VX v0, M0, M0 \
VX v1, M1, M1 \
VX v2, M2, M2 \
VX v3, M3, M3 \
VSTM M0, M3, off(dst)
#define SHUFFLE(a, b, c, d, t, u, v, w) \
VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]}
VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]}
VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]}
VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]}
VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]}
VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]}
VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]}
VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]}
// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int)
TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
MOVD $·constants<>(SB), R1
MOVD dst+0(FP), R2 // R2=&dst[0]
LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src)
MOVD key+48(FP), R5 // R5=key
MOVD nonce+56(FP), R6 // R6=nonce
MOVD counter+64(FP), R7 // R7=counter
MOVD buf+72(FP), R8 // R8=buf
MOVD len+80(FP), R9 // R9=len
// load BSWAP and J0
VLM (R1), BSWAP, J0
// set up tail buffer
ADD $-1, R4, R12
MOVBZ R12, R12
CMPUBEQ R12, $255, aligned
MOVD R4, R1
AND $~255, R1
MOVD $(R3)(R1*1), R1
EXRL $·mvcSrcToBuf(SB), R12
MOVD $255, R0
SUB R12, R0
MOVD R0, (R9) // update len
aligned:
// setup
MOVD $95, R0
VLM (R5), KEY0, KEY1
VLL R0, (R6), NONCE
VZERO M0
VLEIB $7, $32, M0
VSRLB M0, NONCE, NONCE
// initialize counter values
VLREPF (R7), CTR
VZERO INC
VLEIF $1, $1, INC
VLEIF $2, $2, INC
VLEIF $3, $3, INC
VAF INC, CTR, CTR
VREPIF $4, INC
chacha:
VREPF $0, J0, X0
VREPF $1, J0, X1
VREPF $2, J0, X2
VREPF $3, J0, X3
VREPF $0, KEY0, X4
VREPF $1, KEY0, X5
VREPF $2, KEY0, X6
VREPF $3, KEY0, X7
VREPF $0, KEY1, X8
VREPF $1, KEY1, X9
VREPF $2, KEY1, X10
VREPF $3, KEY1, X11
VLR CTR, X12
VREPF $1, NONCE, X13
VREPF $2, NONCE, X14
VREPF $3, NONCE, X15
MOVD $(NUM_ROUNDS/2), R1
loop:
ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11)
ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9)
ADD $-1, R1
BNE loop
// decrement length
ADD $-256, R4
BLT tail
continue:
// rearrange vectors
SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3)
ADDV(J0, X0, X1, X2, X3)
SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3)
ADDV(KEY0, X4, X5, X6, X7)
SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3)
ADDV(KEY1, X8, X9, X10, X11)
VAF CTR, X12, X12
SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3)
ADDV(NONCE, X12, X13, X14, X15)
// increment counters
VAF INC, CTR, CTR
// xor keystream with plaintext
XORV(0*64, R2, R3, X0, X4, X8, X12)
XORV(1*64, R2, R3, X1, X5, X9, X13)
XORV(2*64, R2, R3, X2, X6, X10, X14)
XORV(3*64, R2, R3, X3, X7, X11, X15)
// increment pointers
MOVD $256(R2), R2
MOVD $256(R3), R3
CMPBNE R4, $0, chacha
CMPUBEQ R12, $255, return
EXRL $·mvcBufToDst(SB), R12 // len was updated during setup
return:
VSTEF $0, CTR, (R7)
RET
tail:
MOVD R2, R9
MOVD R8, R2
MOVD R8, R3
MOVD $0, R4
JMP continue

View File

@ -1,43 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found src the LICENSE file.
package chacha20
import (
"runtime"
)
// Platforms that have fast unaligned 32-bit little endian accesses.
const unaligned = runtime.GOARCH == "386" ||
runtime.GOARCH == "amd64" ||
runtime.GOARCH == "arm64" ||
runtime.GOARCH == "ppc64le" ||
runtime.GOARCH == "s390x"
// xor reads a little endian uint32 from src, XORs it with u and
// places the result in little endian byte order in dst.
func xor(dst, src []byte, u uint32) {
_, _ = src[3], dst[3] // eliminate bounds checks
if unaligned {
// The compiler should optimize this code into
// 32-bit unaligned little endian loads and stores.
// TODO: delete once the compiler does a reliably
// good job with the generic code below.
// See issue #25111 for more details.
v := uint32(src[0])
v |= uint32(src[1]) << 8
v |= uint32(src[2]) << 16
v |= uint32(src[3]) << 24
v ^= u
dst[0] = byte(v)
dst[1] = byte(v >> 8)
dst[2] = byte(v >> 16)
dst[3] = byte(v >> 24)
} else {
dst[0] = src[0] ^ byte(u)
dst[1] = src[1] ^ byte(u>>8)
dst[2] = src[2] ^ byte(u>>16)
dst[3] = src[3] ^ byte(u>>24)
}
}

View File

@ -22,8 +22,14 @@ import "crypto/subtle"
// TagSize is the size, in bytes, of a poly1305 authenticator.
const TagSize = 16
// Verify returns true if mac is a valid authenticator for m with the given
// key.
// Sum generates an authenticator for msg using a one-time key and puts the
// 16-byte result into out. Authenticating two different messages with the same
// key allows an attacker to forge messages at will.
func Sum(out *[16]byte, m []byte, key *[32]byte) {
sum(out, m, key)
}
// Verify returns true if mac is a valid authenticator for m with the given key.
func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
var tmp [16]byte
Sum(&tmp, m, key)

View File

@ -7,62 +7,52 @@
package poly1305
//go:noescape
func initialize(state *[7]uint64, key *[32]byte)
func update(state *macState, msg []byte)
//go:noescape
func update(state *[7]uint64, msg []byte)
//go:noescape
func finalize(tag *[TagSize]byte, state *[7]uint64)
// Sum generates an authenticator for m using a one-time key and puts the
// 16-byte result into out. Authenticating two different messages with the same
// key allows an attacker to forge messages at will.
func Sum(out *[16]byte, m []byte, key *[32]byte) {
func sum(out *[16]byte, m []byte, key *[32]byte) {
h := newMAC(key)
h.Write(m)
h.Sum(out)
}
func newMAC(key *[32]byte) (h mac) {
initialize(&h.state, key)
initialize(key, &h.r, &h.s)
return
}
type mac struct {
state [7]uint64 // := uint64{ h0, h1, h2, r0, r1, pad0, pad1 }
// mac is a wrapper for macGeneric that redirects calls that would have gone to
// updateGeneric to update.
//
// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
// using function pointers would carry a major performance cost.
type mac struct{ macGeneric }
buffer [TagSize]byte
offset int
}
func (h *mac) Write(p []byte) (n int, err error) {
n = len(p)
func (h *mac) Write(p []byte) (int, error) {
nn := len(p)
if h.offset > 0 {
remaining := TagSize - h.offset
if n < remaining {
h.offset += copy(h.buffer[h.offset:], p)
return n, nil
n := copy(h.buffer[h.offset:], p)
if h.offset+n < TagSize {
h.offset += n
return nn, nil
}
copy(h.buffer[h.offset:], p[:remaining])
p = p[remaining:]
p = p[n:]
h.offset = 0
update(&h.state, h.buffer[:])
update(&h.macState, h.buffer[:])
}
if nn := len(p) - (len(p) % TagSize); nn > 0 {
update(&h.state, p[:nn])
p = p[nn:]
if n := len(p) - (len(p) % TagSize); n > 0 {
update(&h.macState, p[:n])
p = p[n:]
}
if len(p) > 0 {
h.offset += copy(h.buffer[h.offset:], p)
}
return n, nil
return nn, nil
}
func (h *mac) Sum(out *[16]byte) {
state := h.state
state := h.macState
if h.offset > 0 {
update(&state, h.buffer[:h.offset])
}
finalize(out, &state)
finalize(out, &state.h, &state.s)
}

View File

@ -54,10 +54,6 @@
ADCQ t3, h1; \
ADCQ $0, h2
DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
GLOBL ·poly1305Mask<>(SB), RODATA, $16
// func update(state *[7]uint64, msg []byte)
TEXT ·update(SB), $0-32
MOVQ state+0(FP), DI
@ -110,39 +106,3 @@ done:
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
RET
// func initialize(state *[7]uint64, key *[32]byte)
TEXT ·initialize(SB), $0-16
MOVQ state+0(FP), DI
MOVQ key+8(FP), SI
// state[0...7] is initialized with zero
MOVOU 0(SI), X0
MOVOU 16(SI), X1
MOVOU ·poly1305Mask<>(SB), X2
PAND X2, X0
MOVOU X0, 24(DI)
MOVOU X1, 40(DI)
RET
// func finalize(tag *[TagSize]byte, state *[7]uint64)
TEXT ·finalize(SB), $0-16
MOVQ tag+0(FP), DI
MOVQ state+8(FP), SI
MOVQ 0(SI), AX
MOVQ 8(SI), BX
MOVQ 16(SI), CX
MOVQ AX, R8
MOVQ BX, R9
SUBQ $0xFFFFFFFFFFFFFFFB, AX
SBBQ $0xFFFFFFFFFFFFFFFF, BX
SBBQ $3, CX
CMOVQCS R8, AX
CMOVQCS R9, BX
ADDQ 40(SI), AX
ADCQ 48(SI), BX
MOVQ AX, 0(DI)
MOVQ BX, 8(DI)
RET

View File

@ -1,22 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build arm,!gccgo,!appengine,!nacl
package poly1305
// This function is implemented in sum_arm.s
//go:noescape
func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte)
// Sum generates an authenticator for m using a one-time key and puts the
// 16-byte result into out. Authenticating two different messages with the same
// key allows an attacker to forge messages at will.
func Sum(out *[16]byte, m []byte, key *[32]byte) {
var mPtr *byte
if len(m) > 0 {
mPtr = &m[0]
}
poly1305_auth_armv6(out, mPtr, uint32(len(m)), key)
}

View File

@ -1,427 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build arm,!gccgo,!appengine,!nacl
#include "textflag.h"
// This code was translated into a form compatible with 5a from the public
// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305.
DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff
DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03
DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff
DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff
DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff
GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20
// Warning: the linker may use R11 to synthesize certain instructions. Please
// take care and verify that no synthetic instructions use it.
TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0
// Needs 16 bytes of stack and 64 bytes of space pointed to by R0. (It
// might look like it's only 60 bytes of space but the final four bytes
// will be written by another function.) We need to skip over four
// bytes of stack because that's saving the value of 'g'.
ADD $4, R13, R8
MOVM.IB [R4-R7], (R8)
MOVM.IA.W (R1), [R2-R5]
MOVW $·poly1305_init_constants_armv6<>(SB), R7
MOVW R2, R8
MOVW R2>>26, R9
MOVW R3>>20, g
MOVW R4>>14, R11
MOVW R5>>8, R12
ORR R3<<6, R9, R9
ORR R4<<12, g, g
ORR R5<<18, R11, R11
MOVM.IA (R7), [R2-R6]
AND R8, R2, R2
AND R9, R3, R3
AND g, R4, R4
AND R11, R5, R5
AND R12, R6, R6
MOVM.IA.W [R2-R6], (R0)
EOR R2, R2, R2
EOR R3, R3, R3
EOR R4, R4, R4
EOR R5, R5, R5
EOR R6, R6, R6
MOVM.IA.W [R2-R6], (R0)
MOVM.IA.W (R1), [R2-R5]
MOVM.IA [R2-R6], (R0)
ADD $20, R13, R0
MOVM.DA (R0), [R4-R7]
RET
#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \
MOVBU (offset+0)(Rsrc), Rtmp; \
MOVBU Rtmp, (offset+0)(Rdst); \
MOVBU (offset+1)(Rsrc), Rtmp; \
MOVBU Rtmp, (offset+1)(Rdst); \
MOVBU (offset+2)(Rsrc), Rtmp; \
MOVBU Rtmp, (offset+2)(Rdst); \
MOVBU (offset+3)(Rsrc), Rtmp; \
MOVBU Rtmp, (offset+3)(Rdst)
TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0
// Needs 24 bytes of stack for saved registers and then 88 bytes of
// scratch space after that. We assume that 24 bytes at (R13) have
// already been used: four bytes for the link register saved in the
// prelude of poly1305_auth_armv6, four bytes for saving the value of g
// in that function and 16 bytes of scratch space used around
// poly1305_finish_ext_armv6_skip1.
ADD $24, R13, R12
MOVM.IB [R4-R8, R14], (R12)
MOVW R0, 88(R13)
MOVW R1, 92(R13)
MOVW R2, 96(R13)
MOVW R1, R14
MOVW R2, R12
MOVW 56(R0), R8
WORD $0xe1180008 // TST R8, R8 not working see issue 5921
EOR R6, R6, R6
MOVW.EQ $(1<<24), R6
MOVW R6, 84(R13)
ADD $116, R13, g
MOVM.IA (R0), [R0-R9]
MOVM.IA [R0-R4], (g)
CMP $16, R12
BLO poly1305_blocks_armv6_done
poly1305_blocks_armv6_mainloop:
WORD $0xe31e0003 // TST R14, #3 not working see issue 5921
BEQ poly1305_blocks_armv6_mainloop_aligned
ADD $100, R13, g
MOVW_UNALIGNED(R14, g, R0, 0)
MOVW_UNALIGNED(R14, g, R0, 4)
MOVW_UNALIGNED(R14, g, R0, 8)
MOVW_UNALIGNED(R14, g, R0, 12)
MOVM.IA (g), [R0-R3]
ADD $16, R14
B poly1305_blocks_armv6_mainloop_loaded
poly1305_blocks_armv6_mainloop_aligned:
MOVM.IA.W (R14), [R0-R3]
poly1305_blocks_armv6_mainloop_loaded:
MOVW R0>>26, g
MOVW R1>>20, R11
MOVW R2>>14, R12
MOVW R14, 92(R13)
MOVW R3>>8, R4
ORR R1<<6, g, g
ORR R2<<12, R11, R11
ORR R3<<18, R12, R12
BIC $0xfc000000, R0, R0
BIC $0xfc000000, g, g
MOVW 84(R13), R3
BIC $0xfc000000, R11, R11
BIC $0xfc000000, R12, R12
ADD R0, R5, R5
ADD g, R6, R6
ORR R3, R4, R4
ADD R11, R7, R7
ADD $116, R13, R14
ADD R12, R8, R8
ADD R4, R9, R9
MOVM.IA (R14), [R0-R4]
MULLU R4, R5, (R11, g)
MULLU R3, R5, (R14, R12)
MULALU R3, R6, (R11, g)
MULALU R2, R6, (R14, R12)
MULALU R2, R7, (R11, g)
MULALU R1, R7, (R14, R12)
ADD R4<<2, R4, R4
ADD R3<<2, R3, R3
MULALU R1, R8, (R11, g)
MULALU R0, R8, (R14, R12)
MULALU R0, R9, (R11, g)
MULALU R4, R9, (R14, R12)
MOVW g, 76(R13)
MOVW R11, 80(R13)
MOVW R12, 68(R13)
MOVW R14, 72(R13)
MULLU R2, R5, (R11, g)
MULLU R1, R5, (R14, R12)
MULALU R1, R6, (R11, g)
MULALU R0, R6, (R14, R12)
MULALU R0, R7, (R11, g)
MULALU R4, R7, (R14, R12)
ADD R2<<2, R2, R2
ADD R1<<2, R1, R1
MULALU R4, R8, (R11, g)
MULALU R3, R8, (R14, R12)
MULALU R3, R9, (R11, g)
MULALU R2, R9, (R14, R12)
MOVW g, 60(R13)
MOVW R11, 64(R13)
MOVW R12, 52(R13)
MOVW R14, 56(R13)
MULLU R0, R5, (R11, g)
MULALU R4, R6, (R11, g)
MULALU R3, R7, (R11, g)
MULALU R2, R8, (R11, g)
MULALU R1, R9, (R11, g)
ADD $52, R13, R0
MOVM.IA (R0), [R0-R7]
MOVW g>>26, R12
MOVW R4>>26, R14
ORR R11<<6, R12, R12
ORR R5<<6, R14, R14
BIC $0xfc000000, g, g
BIC $0xfc000000, R4, R4
ADD.S R12, R0, R0
ADC $0, R1, R1
ADD.S R14, R6, R6
ADC $0, R7, R7
MOVW R0>>26, R12
MOVW R6>>26, R14
ORR R1<<6, R12, R12
ORR R7<<6, R14, R14
BIC $0xfc000000, R0, R0
BIC $0xfc000000, R6, R6
ADD R14<<2, R14, R14
ADD.S R12, R2, R2
ADC $0, R3, R3
ADD R14, g, g
MOVW R2>>26, R12
MOVW g>>26, R14
ORR R3<<6, R12, R12
BIC $0xfc000000, g, R5
BIC $0xfc000000, R2, R7
ADD R12, R4, R4
ADD R14, R0, R0
MOVW R4>>26, R12
BIC $0xfc000000, R4, R8
ADD R12, R6, R9
MOVW 96(R13), R12
MOVW 92(R13), R14
MOVW R0, R6
CMP $32, R12
SUB $16, R12, R12
MOVW R12, 96(R13)
BHS poly1305_blocks_armv6_mainloop
poly1305_blocks_armv6_done:
MOVW 88(R13), R12
MOVW R5, 20(R12)
MOVW R6, 24(R12)
MOVW R7, 28(R12)
MOVW R8, 32(R12)
MOVW R9, 36(R12)
ADD $48, R13, R0
MOVM.DA (R0), [R4-R8, R14]
RET
#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \
MOVBU.P 1(Rsrc), Rtmp; \
MOVBU.P Rtmp, 1(Rdst); \
MOVBU.P 1(Rsrc), Rtmp; \
MOVBU.P Rtmp, 1(Rdst)
#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \
MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \
MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp)
// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key)
TEXT ·poly1305_auth_armv6(SB), $196-16
// The value 196, just above, is the sum of 64 (the size of the context
// structure) and 132 (the amount of stack needed).
//
// At this point, the stack pointer (R13) has been moved down. It
// points to the saved link register and there's 196 bytes of free
// space above it.
//
// The stack for this function looks like:
//
// +---------------------
// |
// | 64 bytes of context structure
// |
// +---------------------
// |
// | 112 bytes for poly1305_blocks_armv6
// |
// +---------------------
// | 16 bytes of final block, constructed at
// | poly1305_finish_ext_armv6_skip8
// +---------------------
// | four bytes of saved 'g'
// +---------------------
// | lr, saved by prelude <- R13 points here
// +---------------------
MOVW g, 4(R13)
MOVW out+0(FP), R4
MOVW m+4(FP), R5
MOVW mlen+8(FP), R6
MOVW key+12(FP), R7
ADD $136, R13, R0 // 136 = 4 + 4 + 16 + 112
MOVW R7, R1
// poly1305_init_ext_armv6 will write to the stack from R13+4, but
// that's ok because none of the other values have been written yet.
BL poly1305_init_ext_armv6<>(SB)
BIC.S $15, R6, R2
BEQ poly1305_auth_armv6_noblocks
ADD $136, R13, R0
MOVW R5, R1
ADD R2, R5, R5
SUB R2, R6, R6
BL poly1305_blocks_armv6<>(SB)
poly1305_auth_armv6_noblocks:
ADD $136, R13, R0
MOVW R5, R1
MOVW R6, R2
MOVW R4, R3
MOVW R0, R5
MOVW R1, R6
MOVW R2, R7
MOVW R3, R8
AND.S R2, R2, R2
BEQ poly1305_finish_ext_armv6_noremaining
EOR R0, R0
ADD $8, R13, R9 // 8 = offset to 16 byte scratch space
MOVW R0, (R9)
MOVW R0, 4(R9)
MOVW R0, 8(R9)
MOVW R0, 12(R9)
WORD $0xe3110003 // TST R1, #3 not working see issue 5921
BEQ poly1305_finish_ext_armv6_aligned
WORD $0xe3120008 // TST R2, #8 not working see issue 5921
BEQ poly1305_finish_ext_armv6_skip8
MOVWP_UNALIGNED(R1, R9, g)
MOVWP_UNALIGNED(R1, R9, g)
poly1305_finish_ext_armv6_skip8:
WORD $0xe3120004 // TST $4, R2 not working see issue 5921
BEQ poly1305_finish_ext_armv6_skip4
MOVWP_UNALIGNED(R1, R9, g)
poly1305_finish_ext_armv6_skip4:
WORD $0xe3120002 // TST $2, R2 not working see issue 5921
BEQ poly1305_finish_ext_armv6_skip2
MOVHUP_UNALIGNED(R1, R9, g)
B poly1305_finish_ext_armv6_skip2
poly1305_finish_ext_armv6_aligned:
WORD $0xe3120008 // TST R2, #8 not working see issue 5921
BEQ poly1305_finish_ext_armv6_skip8_aligned
MOVM.IA.W (R1), [g-R11]
MOVM.IA.W [g-R11], (R9)
poly1305_finish_ext_armv6_skip8_aligned:
WORD $0xe3120004 // TST $4, R2 not working see issue 5921
BEQ poly1305_finish_ext_armv6_skip4_aligned
MOVW.P 4(R1), g
MOVW.P g, 4(R9)
poly1305_finish_ext_armv6_skip4_aligned:
WORD $0xe3120002 // TST $2, R2 not working see issue 5921
BEQ poly1305_finish_ext_armv6_skip2
MOVHU.P 2(R1), g
MOVH.P g, 2(R9)
poly1305_finish_ext_armv6_skip2:
WORD $0xe3120001 // TST $1, R2 not working see issue 5921
BEQ poly1305_finish_ext_armv6_skip1
MOVBU.P 1(R1), g
MOVBU.P g, 1(R9)
poly1305_finish_ext_armv6_skip1:
MOVW $1, R11
MOVBU R11, 0(R9)
MOVW R11, 56(R5)
MOVW R5, R0
ADD $8, R13, R1
MOVW $16, R2
BL poly1305_blocks_armv6<>(SB)
poly1305_finish_ext_armv6_noremaining:
MOVW 20(R5), R0
MOVW 24(R5), R1
MOVW 28(R5), R2
MOVW 32(R5), R3
MOVW 36(R5), R4
MOVW R4>>26, R12
BIC $0xfc000000, R4, R4
ADD R12<<2, R12, R12
ADD R12, R0, R0
MOVW R0>>26, R12
BIC $0xfc000000, R0, R0
ADD R12, R1, R1
MOVW R1>>26, R12
BIC $0xfc000000, R1, R1
ADD R12, R2, R2
MOVW R2>>26, R12
BIC $0xfc000000, R2, R2
ADD R12, R3, R3
MOVW R3>>26, R12
BIC $0xfc000000, R3, R3
ADD R12, R4, R4
ADD $5, R0, R6
MOVW R6>>26, R12
BIC $0xfc000000, R6, R6
ADD R12, R1, R7
MOVW R7>>26, R12
BIC $0xfc000000, R7, R7
ADD R12, R2, g
MOVW g>>26, R12
BIC $0xfc000000, g, g
ADD R12, R3, R11
MOVW $-(1<<26), R12
ADD R11>>26, R12, R12
BIC $0xfc000000, R11, R11
ADD R12, R4, R9
MOVW R9>>31, R12
SUB $1, R12
AND R12, R6, R6
AND R12, R7, R7
AND R12, g, g
AND R12, R11, R11
AND R12, R9, R9
MVN R12, R12
AND R12, R0, R0
AND R12, R1, R1
AND R12, R2, R2
AND R12, R3, R3
AND R12, R4, R4
ORR R6, R0, R0
ORR R7, R1, R1
ORR g, R2, R2
ORR R11, R3, R3
ORR R9, R4, R4
ORR R1<<26, R0, R0
MOVW R1>>6, R1
ORR R2<<20, R1, R1
MOVW R2>>12, R2
ORR R3<<14, R2, R2
MOVW R3>>18, R3
ORR R4<<8, R3, R3
MOVW 40(R5), R6
MOVW 44(R5), R7
MOVW 48(R5), g
MOVW 52(R5), R11
ADD.S R6, R0, R0
ADC.S R7, R1, R1
ADC.S g, R2, R2
ADC.S R11, R3, R3
MOVM.IA [R0-R3], (R8)
MOVW R5, R12
EOR R0, R0, R0
EOR R1, R1, R1
EOR R2, R2, R2
EOR R3, R3, R3
EOR R4, R4, R4
EOR R5, R5, R5
EOR R6, R6, R6
EOR R7, R7, R7
MOVM.IA.W [R0-R7], (R12)
MOVM.IA [R0-R7], (R12)
MOVW 4(R13), g
RET

View File

@ -2,18 +2,29 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file provides the generic implementation of Sum and MAC. Other files
// might provide optimized assembly implementations of some of this code.
package poly1305
import "encoding/binary"
const (
msgBlock = uint32(1 << 24)
finalBlock = uint32(0)
)
// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
// for a 64 bytes message is approximately
//
// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5
//
// for some secret r and s. It can be computed sequentially like
//
// for len(msg) > 0:
// h += read(msg, 16)
// h *= r
// h %= 2¹³⁰ - 5
// return h + s
//
// All the complexity is about doing performant constant-time math on numbers
// larger than any available numeric type.
// sumGeneric generates an authenticator for msg using a one-time key and
// puts the 16-byte result into out. This is the generic implementation of
// Sum and should be called if no assembly implementation is available.
func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
h := newMACGeneric(key)
h.Write(msg)
@ -21,152 +32,276 @@ func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
}
func newMACGeneric(key *[32]byte) (h macGeneric) {
h.r[0] = binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff
h.r[1] = (binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03
h.r[2] = (binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff
h.r[3] = (binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff
h.r[4] = (binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff
h.s[0] = binary.LittleEndian.Uint32(key[16:])
h.s[1] = binary.LittleEndian.Uint32(key[20:])
h.s[2] = binary.LittleEndian.Uint32(key[24:])
h.s[3] = binary.LittleEndian.Uint32(key[28:])
initialize(key, &h.r, &h.s)
return
}
// macState holds numbers in saturated 64-bit little-endian limbs. That is,
// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸.
type macState struct {
// h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but
// can grow larger during and after rounds.
h [3]uint64
// r and s are the private key components.
r [2]uint64
s [2]uint64
}
type macGeneric struct {
h, r [5]uint32
s [4]uint32
macState
buffer [TagSize]byte
offset int
}
func (h *macGeneric) Write(p []byte) (n int, err error) {
n = len(p)
// Write splits the incoming message into TagSize chunks, and passes them to
// update. It buffers incomplete chunks.
func (h *macGeneric) Write(p []byte) (int, error) {
nn := len(p)
if h.offset > 0 {
remaining := TagSize - h.offset
if n < remaining {
h.offset += copy(h.buffer[h.offset:], p)
return n, nil
n := copy(h.buffer[h.offset:], p)
if h.offset+n < TagSize {
h.offset += n
return nn, nil
}
copy(h.buffer[h.offset:], p[:remaining])
p = p[remaining:]
p = p[n:]
h.offset = 0
updateGeneric(h.buffer[:], msgBlock, &(h.h), &(h.r))
updateGeneric(&h.macState, h.buffer[:])
}
if nn := len(p) - (len(p) % TagSize); nn > 0 {
updateGeneric(p, msgBlock, &(h.h), &(h.r))
p = p[nn:]
if n := len(p) - (len(p) % TagSize); n > 0 {
updateGeneric(&h.macState, p[:n])
p = p[n:]
}
if len(p) > 0 {
h.offset += copy(h.buffer[h.offset:], p)
}
return n, nil
return nn, nil
}
func (h *macGeneric) Sum(out *[16]byte) {
H, R := h.h, h.r
// Sum flushes the last incomplete chunk from the buffer, if any, and generates
// the MAC output. It does not modify its state, in order to allow for multiple
// calls to Sum, even if no Write is allowed after Sum.
func (h *macGeneric) Sum(out *[TagSize]byte) {
state := h.macState
if h.offset > 0 {
var buffer [TagSize]byte
copy(buffer[:], h.buffer[:h.offset])
buffer[h.offset] = 1 // invariant: h.offset < TagSize
updateGeneric(buffer[:], finalBlock, &H, &R)
updateGeneric(&state, h.buffer[:h.offset])
}
finalizeGeneric(out, &H, &(h.s))
finalize(out, &state.h, &state.s)
}
func updateGeneric(msg []byte, flag uint32, h, r *[5]uint32) {
h0, h1, h2, h3, h4 := h[0], h[1], h[2], h[3], h[4]
r0, r1, r2, r3, r4 := uint64(r[0]), uint64(r[1]), uint64(r[2]), uint64(r[3]), uint64(r[4])
R1, R2, R3, R4 := r1*5, r2*5, r3*5, r4*5
// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It
// clears some bits of the secret coefficient to make it possible to implement
// multiplication more efficiently.
const (
rMask0 = 0x0FFFFFFC0FFFFFFF
rMask1 = 0x0FFFFFFC0FFFFFFC
)
for len(msg) >= TagSize {
// h += msg
h0 += binary.LittleEndian.Uint32(msg[0:]) & 0x3ffffff
h1 += (binary.LittleEndian.Uint32(msg[3:]) >> 2) & 0x3ffffff
h2 += (binary.LittleEndian.Uint32(msg[6:]) >> 4) & 0x3ffffff
h3 += (binary.LittleEndian.Uint32(msg[9:]) >> 6) & 0x3ffffff
h4 += (binary.LittleEndian.Uint32(msg[12:]) >> 8) | flag
func initialize(key *[32]byte, r, s *[2]uint64) {
r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
s[0] = binary.LittleEndian.Uint64(key[16:24])
s[1] = binary.LittleEndian.Uint64(key[24:32])
}
// h *= r
d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1)
d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2)
d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3)
d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4)
d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0)
// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
// bits.Mul64 and bits.Add64 intrinsics.
type uint128 struct {
lo, hi uint64
}
// h %= p
h0 = uint32(d0) & 0x3ffffff
h1 = uint32(d1) & 0x3ffffff
h2 = uint32(d2) & 0x3ffffff
h3 = uint32(d3) & 0x3ffffff
h4 = uint32(d4) & 0x3ffffff
func mul64(a, b uint64) uint128 {
hi, lo := bitsMul64(a, b)
return uint128{lo, hi}
}
h0 += uint32(d4>>26) * 5
h1 += h0 >> 26
h0 = h0 & 0x3ffffff
func add128(a, b uint128) uint128 {
lo, c := bitsAdd64(a.lo, b.lo, 0)
hi, c := bitsAdd64(a.hi, b.hi, c)
if c != 0 {
panic("poly1305: unexpected overflow")
}
return uint128{lo, hi}
}
msg = msg[TagSize:]
func shiftRightBy2(a uint128) uint128 {
a.lo = a.lo>>2 | (a.hi&3)<<62
a.hi = a.hi >> 2
return a
}
// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of
// 128 bits of message, it computes
//
// h₊ = (h + m) * r mod 2¹³⁰ - 5
//
// If the msg length is not a multiple of TagSize, it assumes the last
// incomplete chunk is the final one.
func updateGeneric(state *macState, msg []byte) {
h0, h1, h2 := state.h[0], state.h[1], state.h[2]
r0, r1 := state.r[0], state.r[1]
for len(msg) > 0 {
var c uint64
// For the first step, h + m, we use a chain of bits.Add64 intrinsics.
// The resulting value of h might exceed 2¹³⁰ - 5, but will be partially
// reduced at the end of the multiplication below.
//
// The spec requires us to set a bit just above the message size, not to
// hide leading zeroes. For full chunks, that's 1 << 128, so we can just
// add 1 to the most significant (2¹²⁸) limb, h2.
if len(msg) >= TagSize {
h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
h2 += c + 1
msg = msg[TagSize:]
} else {
var buf [TagSize]byte
copy(buf[:], msg)
buf[len(msg)] = 1
h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
h2 += c
msg = nil
}
// Multiplication of big number limbs is similar to elementary school
// columnar multiplication. Instead of digits, there are 64-bit limbs.
//
// We are multiplying a 3 limbs number, h, by a 2 limbs number, r.
//
// h2 h1 h0 x
// r1 r0 =
// ----------------
// h2r0 h1r0 h0r0 <-- individual 128-bit products
// + h2r1 h1r1 h0r1
// ------------------------
// m3 m2 m1 m0 <-- result in 128-bit overlapping limbs
// ------------------------
// m3.hi m2.hi m1.hi m0.hi <-- carry propagation
// + m3.lo m2.lo m1.lo m0.lo
// -------------------------------
// t4 t3 t2 t1 t0 <-- final result in 64-bit limbs
//
// The main difference from pen-and-paper multiplication is that we do
// carry propagation in a separate step, as if we wrote two digit sums
// at first (the 128-bit limbs), and then carried the tens all at once.
h0r0 := mul64(h0, r0)
h1r0 := mul64(h1, r0)
h2r0 := mul64(h2, r0)
h0r1 := mul64(h0, r1)
h1r1 := mul64(h1, r1)
h2r1 := mul64(h2, r1)
// Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their
// top 4 bits cleared by rMask{0,1}, we know that their product is not going
// to overflow 64 bits, so we can ignore the high part of the products.
//
// This also means that the product doesn't have a fifth limb (t4).
if h2r0.hi != 0 {
panic("poly1305: unexpected overflow")
}
if h2r1.hi != 0 {
panic("poly1305: unexpected overflow")
}
m0 := h0r0
m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again
m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1.
m3 := h2r1
t0 := m0.lo
t1, c := bitsAdd64(m1.lo, m0.hi, 0)
t2, c := bitsAdd64(m2.lo, m1.hi, c)
t3, _ := bitsAdd64(m3.lo, m2.hi, c)
// Now we have the result as 4 64-bit limbs, and we need to reduce it
// modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do
// a cheap partial reduction according to the reduction identity
//
// c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5
//
// because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is
// likely to be larger than 2¹³⁰ - 5, but still small enough to fit the
// assumptions we make about h in the rest of the code.
//
// See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23
// We split the final result at the 2¹³⁰ mark into h and cc, the carry.
// Note that the carry bits are effectively shifted left by 2, in other
// words, cc = c * 4 for the c in the reduction identity.
h0, h1, h2 = t0, t1, t2&maskLow2Bits
cc := uint128{t2 & maskNotLow2Bits, t3}
// To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
h0, c = bitsAdd64(h0, cc.lo, 0)
h1, c = bitsAdd64(h1, cc.hi, c)
h2 += c
cc = shiftRightBy2(cc)
h0, c = bitsAdd64(h0, cc.lo, 0)
h1, c = bitsAdd64(h1, cc.hi, c)
h2 += c
// h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
//
// 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1
}
h[0], h[1], h[2], h[3], h[4] = h0, h1, h2, h3, h4
state.h[0], state.h[1], state.h[2] = h0, h1, h2
}
func finalizeGeneric(out *[TagSize]byte, h *[5]uint32, s *[4]uint32) {
h0, h1, h2, h3, h4 := h[0], h[1], h[2], h[3], h[4]
const (
maskLow2Bits uint64 = 0x0000000000000003
maskNotLow2Bits uint64 = ^maskLow2Bits
)
// h %= p reduction
h2 += h1 >> 26
h1 &= 0x3ffffff
h3 += h2 >> 26
h2 &= 0x3ffffff
h4 += h3 >> 26
h3 &= 0x3ffffff
h0 += 5 * (h4 >> 26)
h4 &= 0x3ffffff
h1 += h0 >> 26
h0 &= 0x3ffffff
// select64 returns x if v == 1 and y if v == 0, in constant time.
func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y }
// h - p
t0 := h0 + 5
t1 := h1 + (t0 >> 26)
t2 := h2 + (t1 >> 26)
t3 := h3 + (t2 >> 26)
t4 := h4 + (t3 >> 26) - (1 << 26)
t0 &= 0x3ffffff
t1 &= 0x3ffffff
t2 &= 0x3ffffff
t3 &= 0x3ffffff
// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order.
const (
p0 = 0xFFFFFFFFFFFFFFFB
p1 = 0xFFFFFFFFFFFFFFFF
p2 = 0x0000000000000003
)
// select h if h < p else h - p
t_mask := (t4 >> 31) - 1
h_mask := ^t_mask
h0 = (h0 & h_mask) | (t0 & t_mask)
h1 = (h1 & h_mask) | (t1 & t_mask)
h2 = (h2 & h_mask) | (t2 & t_mask)
h3 = (h3 & h_mask) | (t3 & t_mask)
h4 = (h4 & h_mask) | (t4 & t_mask)
// finalize completes the modular reduction of h and computes
//
// out = h + s mod 2¹²⁸
//
func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
h0, h1, h2 := h[0], h[1], h[2]
// h %= 2^128
h0 |= h1 << 26
h1 = ((h1 >> 6) | (h2 << 20))
h2 = ((h2 >> 12) | (h3 << 14))
h3 = ((h3 >> 18) | (h4 << 8))
// After the partial reduction in updateGeneric, h might be more than
// 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction
// in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the
// result if the subtraction underflows, and t otherwise.
// s: the s part of the key
// tag = (h + s) % (2^128)
t := uint64(h0) + uint64(s[0])
h0 = uint32(t)
t = uint64(h1) + uint64(s[1]) + (t >> 32)
h1 = uint32(t)
t = uint64(h2) + uint64(s[2]) + (t >> 32)
h2 = uint32(t)
t = uint64(h3) + uint64(s[3]) + (t >> 32)
h3 = uint32(t)
hMinusP0, b := bitsSub64(h0, p0, 0)
hMinusP1, b := bitsSub64(h1, p1, b)
_, b = bitsSub64(h2, p2, b)
binary.LittleEndian.PutUint32(out[0:], h0)
binary.LittleEndian.PutUint32(out[4:], h1)
binary.LittleEndian.PutUint32(out[8:], h2)
binary.LittleEndian.PutUint32(out[12:], h3)
// h = h if h < p else h - p
h0 = select64(b, h0, hMinusP0)
h1 = select64(b, h1, hMinusP1)
// Finally, we compute the last Poly1305 step
//
// tag = h + s mod 2¹²⁸
//
// by just doing a wide addition with the 128 low bits of h and discarding
// the overflow.
h0, c := bitsAdd64(h0, s[0], 0)
h1, _ = bitsAdd64(h1, s[1], c)
binary.LittleEndian.PutUint64(out[0:8], h0)
binary.LittleEndian.PutUint64(out[8:16], h1)
}

View File

@ -2,14 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,!go1.11 !arm,!amd64,!s390x,!ppc64le gccgo appengine nacl
// +build s390x,!go1.11 !amd64,!s390x,!ppc64le gccgo appengine nacl
package poly1305
// Sum generates an authenticator for msg using a one-time key and puts the
// 16-byte result into out. Authenticating two different messages with the same
// key allows an attacker to forge messages at will.
func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
func sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
h := newMAC(key)
h.Write(msg)
h.Sum(out)

View File

@ -7,62 +7,52 @@
package poly1305
//go:noescape
func initialize(state *[7]uint64, key *[32]byte)
func update(state *macState, msg []byte)
//go:noescape
func update(state *[7]uint64, msg []byte)
//go:noescape
func finalize(tag *[TagSize]byte, state *[7]uint64)
// Sum generates an authenticator for m using a one-time key and puts the
// 16-byte result into out. Authenticating two different messages with the same
// key allows an attacker to forge messages at will.
func Sum(out *[16]byte, m []byte, key *[32]byte) {
func sum(out *[16]byte, m []byte, key *[32]byte) {
h := newMAC(key)
h.Write(m)
h.Sum(out)
}
func newMAC(key *[32]byte) (h mac) {
initialize(&h.state, key)
initialize(key, &h.r, &h.s)
return
}
type mac struct {
state [7]uint64 // := uint64{ h0, h1, h2, r0, r1, pad0, pad1 }
// mac is a wrapper for macGeneric that redirects calls that would have gone to
// updateGeneric to update.
//
// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
// using function pointers would carry a major performance cost.
type mac struct{ macGeneric }
buffer [TagSize]byte
offset int
}
func (h *mac) Write(p []byte) (n int, err error) {
n = len(p)
func (h *mac) Write(p []byte) (int, error) {
nn := len(p)
if h.offset > 0 {
remaining := TagSize - h.offset
if n < remaining {
h.offset += copy(h.buffer[h.offset:], p)
return n, nil
n := copy(h.buffer[h.offset:], p)
if h.offset+n < TagSize {
h.offset += n
return nn, nil
}
copy(h.buffer[h.offset:], p[:remaining])
p = p[remaining:]
p = p[n:]
h.offset = 0
update(&h.state, h.buffer[:])
update(&h.macState, h.buffer[:])
}
if nn := len(p) - (len(p) % TagSize); nn > 0 {
update(&h.state, p[:nn])
p = p[nn:]
if n := len(p) - (len(p) % TagSize); n > 0 {
update(&h.macState, p[:n])
p = p[n:]
}
if len(p) > 0 {
h.offset += copy(h.buffer[h.offset:], p)
}
return n, nil
return nn, nil
}
func (h *mac) Sum(out *[16]byte) {
state := h.state
state := h.macState
if h.offset > 0 {
update(&state, h.buffer[:h.offset])
}
finalize(out, &state)
finalize(out, &state.h, &state.s)
}

View File

@ -58,7 +58,6 @@ DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
GLOBL ·poly1305Mask<>(SB), RODATA, $16
// func update(state *[7]uint64, msg []byte)
TEXT ·update(SB), $0-32
MOVD state+0(FP), R3
MOVD msg_base+8(FP), R4
@ -180,68 +179,3 @@ done:
MOVD R9, 8(R3)
MOVD R10, 16(R3)
RET
// func initialize(state *[7]uint64, key *[32]byte)
TEXT ·initialize(SB), $0-16
MOVD state+0(FP), R3
MOVD key+8(FP), R4
// state[0...7] is initialized with zero
// Load key
MOVD 0(R4), R5
MOVD 8(R4), R6
MOVD 16(R4), R7
MOVD 24(R4), R8
// Address of key mask
MOVD $·poly1305Mask<>(SB), R9
// Save original key in state
MOVD R7, 40(R3)
MOVD R8, 48(R3)
// Get mask
MOVD (R9), R7
MOVD 8(R9), R8
// And with key
AND R5, R7, R5
AND R6, R8, R6
// Save masked key in state
MOVD R5, 24(R3)
MOVD R6, 32(R3)
RET
// func finalize(tag *[TagSize]byte, state *[7]uint64)
TEXT ·finalize(SB), $0-16
MOVD tag+0(FP), R3
MOVD state+8(FP), R4
// Get h0, h1, h2 from state
MOVD 0(R4), R5
MOVD 8(R4), R6
MOVD 16(R4), R7
// Save h0, h1
MOVD R5, R8
MOVD R6, R9
MOVD $3, R20
MOVD $-1, R21
SUBC $-5, R5
SUBE R21, R6
SUBE R20, R7
MOVD $0, R21
SUBZE R21
// Check for carry
CMP $0, R21
ISEL $2, R5, R8, R5
ISEL $2, R6, R9, R6
MOVD 40(R4), R8
MOVD 48(R4), R9
ADDC R8, R5
ADDE R9, R6
MOVD R5, 0(R3)
MOVD R6, 8(R3)
RET

View File

@ -22,10 +22,7 @@ func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
//go:noescape
func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
// Sum generates an authenticator for m using a one-time key and puts the
// 16-byte result into out. Authenticating two different messages with the same
// key allows an attacker to forge messages at will.
func Sum(out *[16]byte, m []byte, key *[32]byte) {
func sum(out *[16]byte, m []byte, key *[32]byte) {
if cpu.S390X.HasVX {
var mPtr *byte
if len(m) > 0 {

View File

@ -17,12 +17,14 @@ import (
// These constants from [PROTOCOL.certkeys] represent the algorithm names
// for certificate types supported by this package.
const (
CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com"
CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com"
CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com"
CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com"
CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com"
CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com"
CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com"
CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com"
CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com"
CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com"
CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com"
CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com"
)
// Certificate types distinguish between host and user
@ -37,6 +39,7 @@ const (
type Signature struct {
Format string
Blob []byte
Rest []byte `ssh:"rest"`
}
// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that
@ -429,12 +432,14 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
}
var certAlgoNames = map[string]string{
KeyAlgoRSA: CertAlgoRSAv01,
KeyAlgoDSA: CertAlgoDSAv01,
KeyAlgoECDSA256: CertAlgoECDSA256v01,
KeyAlgoECDSA384: CertAlgoECDSA384v01,
KeyAlgoECDSA521: CertAlgoECDSA521v01,
KeyAlgoED25519: CertAlgoED25519v01,
KeyAlgoRSA: CertAlgoRSAv01,
KeyAlgoDSA: CertAlgoDSAv01,
KeyAlgoECDSA256: CertAlgoECDSA256v01,
KeyAlgoECDSA384: CertAlgoECDSA384v01,
KeyAlgoECDSA521: CertAlgoECDSA521v01,
KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01,
KeyAlgoED25519: CertAlgoED25519v01,
KeyAlgoSKED25519: CertAlgoSKED25519v01,
}
// certToPrivAlgo returns the underlying algorithm for a certificate algorithm.
@ -518,6 +523,12 @@ func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) {
return
}
switch out.Format {
case KeyAlgoSKECDSA256, CertAlgoSKECDSA256v01, KeyAlgoSKED25519, CertAlgoSKED25519v01:
out.Rest = in
return out, nil, ok
}
return out, in, ok
}

View File

@ -16,9 +16,8 @@ import (
"hash"
"io"
"io/ioutil"
"math/bits"
"golang.org/x/crypto/internal/chacha20"
"golang.org/x/crypto/chacha20"
"golang.org/x/crypto/poly1305"
)
@ -642,8 +641,8 @@ const chacha20Poly1305ID = "chacha20-poly1305@openssh.com"
// the methods here also implement padding, which RFC4253 Section 6
// also requires of stream ciphers.
type chacha20Poly1305Cipher struct {
lengthKey [8]uint32
contentKey [8]uint32
lengthKey [32]byte
contentKey [32]byte
buf []byte
}
@ -656,21 +655,21 @@ func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionA
buf: make([]byte, 256),
}
for i := range c.contentKey {
c.contentKey[i] = binary.LittleEndian.Uint32(key[i*4 : (i+1)*4])
}
for i := range c.lengthKey {
c.lengthKey[i] = binary.LittleEndian.Uint32(key[(i+8)*4 : (i+9)*4])
}
copy(c.contentKey[:], key[:32])
copy(c.lengthKey[:], key[32:])
return c, nil
}
func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) {
nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)}
s := chacha20.New(c.contentKey, nonce)
var polyKey [32]byte
nonce := make([]byte, 12)
binary.BigEndian.PutUint32(nonce[8:], seqNum)
s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce)
if err != nil {
return nil, err
}
var polyKey, discardBuf [32]byte
s.XORKeyStream(polyKey[:], polyKey[:])
s.Advance() // skip next 32 bytes
s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes
encryptedLength := c.buf[:4]
if _, err := io.ReadFull(r, encryptedLength); err != nil {
@ -678,7 +677,11 @@ func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([
}
var lenBytes [4]byte
chacha20.New(c.lengthKey, nonce).XORKeyStream(lenBytes[:], encryptedLength)
ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce)
if err != nil {
return nil, err
}
ls.XORKeyStream(lenBytes[:], encryptedLength)
length := binary.BigEndian.Uint32(lenBytes[:])
if length > maxPacket {
@ -724,11 +727,15 @@ func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([
}
func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error {
nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)}
s := chacha20.New(c.contentKey, nonce)
var polyKey [32]byte
nonce := make([]byte, 12)
binary.BigEndian.PutUint32(nonce[8:], seqNum)
s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce)
if err != nil {
return err
}
var polyKey, discardBuf [32]byte
s.XORKeyStream(polyKey[:], polyKey[:])
s.Advance() // skip next 32 bytes
s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes
// There is no blocksize, so fall back to multiple of 8 byte
// padding, as described in RFC 4253, Sec 6.
@ -748,7 +755,11 @@ func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, r
}
binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding))
chacha20.New(c.lengthKey, nonce).XORKeyStream(c.buf, c.buf[:4])
ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce)
if err != nil {
return err
}
ls.XORKeyStream(c.buf, c.buf[:4])
c.buf[4] = byte(padding)
copy(c.buf[5:], payload)
packetEnd := 5 + len(payload) + padding

View File

@ -30,12 +30,14 @@ import (
// These constants represent the algorithm names for key types supported by this
// package.
const (
KeyAlgoRSA = "ssh-rsa"
KeyAlgoDSA = "ssh-dss"
KeyAlgoECDSA256 = "ecdsa-sha2-nistp256"
KeyAlgoECDSA384 = "ecdsa-sha2-nistp384"
KeyAlgoECDSA521 = "ecdsa-sha2-nistp521"
KeyAlgoED25519 = "ssh-ed25519"
KeyAlgoRSA = "ssh-rsa"
KeyAlgoDSA = "ssh-dss"
KeyAlgoECDSA256 = "ecdsa-sha2-nistp256"
KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com"
KeyAlgoECDSA384 = "ecdsa-sha2-nistp384"
KeyAlgoECDSA521 = "ecdsa-sha2-nistp521"
KeyAlgoED25519 = "ssh-ed25519"
KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com"
)
// These constants represent non-default signature algorithms that are supported
@ -58,9 +60,13 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err
return parseDSA(in)
case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521:
return parseECDSA(in)
case KeyAlgoSKECDSA256:
return parseSKECDSA(in)
case KeyAlgoED25519:
return parseED25519(in)
case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01:
case KeyAlgoSKED25519:
return parseSKEd25519(in)
case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01:
cert, err := parseCert(in, certToPrivAlgo(algo))
if err != nil {
return nil, nil, err
@ -685,6 +691,218 @@ func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey {
return (*ecdsa.PublicKey)(k)
}
// skFields holds the additional fields present in U2F/FIDO2 signatures.
// See openssh/PROTOCOL.u2f 'SSH U2F Signatures' for details.
type skFields struct {
// Flags contains U2F/FIDO2 flags such as 'user present'
Flags byte
// Counter is a monotonic signature counter which can be
// used to detect concurrent use of a private key, should
// it be extracted from hardware.
Counter uint32
}
type skECDSAPublicKey struct {
// application is a URL-like string, typically "ssh:" for SSH.
// see openssh/PROTOCOL.u2f for details.
application string
ecdsa.PublicKey
}
func (k *skECDSAPublicKey) Type() string {
return KeyAlgoSKECDSA256
}
func (k *skECDSAPublicKey) nistID() string {
return "nistp256"
}
func parseSKECDSA(in []byte) (out PublicKey, rest []byte, err error) {
var w struct {
Curve string
KeyBytes []byte
Application string
Rest []byte `ssh:"rest"`
}
if err := Unmarshal(in, &w); err != nil {
return nil, nil, err
}
key := new(skECDSAPublicKey)
key.application = w.Application
if w.Curve != "nistp256" {
return nil, nil, errors.New("ssh: unsupported curve")
}
key.Curve = elliptic.P256()
key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes)
if key.X == nil || key.Y == nil {
return nil, nil, errors.New("ssh: invalid curve point")
}
return key, w.Rest, nil
}
func (k *skECDSAPublicKey) Marshal() []byte {
// See RFC 5656, section 3.1.
keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y)
w := struct {
Name string
ID string
Key []byte
Application string
}{
k.Type(),
k.nistID(),
keyBytes,
k.application,
}
return Marshal(&w)
}
func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
h := ecHash(k.Curve).New()
h.Write([]byte(k.application))
appDigest := h.Sum(nil)
h.Reset()
h.Write(data)
dataDigest := h.Sum(nil)
var ecSig struct {
R *big.Int
S *big.Int
}
if err := Unmarshal(sig.Blob, &ecSig); err != nil {
return err
}
var skf skFields
if err := Unmarshal(sig.Rest, &skf); err != nil {
return err
}
blob := struct {
ApplicationDigest []byte `ssh:"rest"`
Flags byte
Counter uint32
MessageDigest []byte `ssh:"rest"`
}{
appDigest,
skf.Flags,
skf.Counter,
dataDigest,
}
original := Marshal(blob)
h.Reset()
h.Write(original)
digest := h.Sum(nil)
if ecdsa.Verify((*ecdsa.PublicKey)(&k.PublicKey), digest, ecSig.R, ecSig.S) {
return nil
}
return errors.New("ssh: signature did not verify")
}
type skEd25519PublicKey struct {
// application is a URL-like string, typically "ssh:" for SSH.
// see openssh/PROTOCOL.u2f for details.
application string
ed25519.PublicKey
}
func (k *skEd25519PublicKey) Type() string {
return KeyAlgoSKED25519
}
func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) {
var w struct {
KeyBytes []byte
Application string
Rest []byte `ssh:"rest"`
}
if err := Unmarshal(in, &w); err != nil {
return nil, nil, err
}
key := new(skEd25519PublicKey)
key.application = w.Application
key.PublicKey = ed25519.PublicKey(w.KeyBytes)
return key, w.Rest, nil
}
func (k *skEd25519PublicKey) Marshal() []byte {
w := struct {
Name string
KeyBytes []byte
Application string
}{
KeyAlgoSKED25519,
[]byte(k.PublicKey),
k.application,
}
return Marshal(&w)
}
func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
h := sha256.New()
h.Write([]byte(k.application))
appDigest := h.Sum(nil)
h.Reset()
h.Write(data)
dataDigest := h.Sum(nil)
var edSig struct {
Signature []byte `ssh:"rest"`
}
if err := Unmarshal(sig.Blob, &edSig); err != nil {
return err
}
var skf skFields
if err := Unmarshal(sig.Rest, &skf); err != nil {
return err
}
blob := struct {
ApplicationDigest []byte `ssh:"rest"`
Flags byte
Counter uint32
MessageDigest []byte `ssh:"rest"`
}{
appDigest,
skf.Flags,
skf.Counter,
dataDigest,
}
original := Marshal(blob)
edKey := (ed25519.PublicKey)(k.PublicKey)
if ok := ed25519.Verify(edKey, original, edSig.Signature); !ok {
return errors.New("ssh: signature did not verify")
}
return nil
}
// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey,
// *ecdsa.PrivateKey or any other crypto.Signer and returns a
// corresponding Signer instance. ECDSA keys must use P-256, P-384 or
@ -837,7 +1055,8 @@ func NewPublicKey(key interface{}) (PublicKey, error) {
}
// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports
// the same keys as ParseRawPrivateKey.
// the same keys as ParseRawPrivateKey. If the private key is encrypted, it
// will return a PassphraseMissingError.
func ParsePrivateKey(pemBytes []byte) (Signer, error) {
key, err := ParseRawPrivateKey(pemBytes)
if err != nil {
@ -850,8 +1069,8 @@ func ParsePrivateKey(pemBytes []byte) (Signer, error) {
// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private
// key and passphrase. It supports the same keys as
// ParseRawPrivateKeyWithPassphrase.
func ParsePrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (Signer, error) {
key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase)
func ParsePrivateKeyWithPassphrase(pemBytes, passphrase []byte) (Signer, error) {
key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase)
if err != nil {
return nil, err
}
@ -867,8 +1086,21 @@ func encryptedBlock(block *pem.Block) bool {
return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED")
}
// A PassphraseMissingError indicates that parsing this private key requires a
// passphrase. Use ParsePrivateKeyWithPassphrase.
type PassphraseMissingError struct {
// PublicKey will be set if the private key format includes an unencrypted
// public key along with the encrypted private key.
PublicKey PublicKey
}
func (*PassphraseMissingError) Error() string {
return "ssh: this private key is passphrase protected"
}
// ParseRawPrivateKey returns a private key from a PEM encoded private key. It
// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys.
// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. If the
// private key is encrypted, it will return a PassphraseMissingError.
func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
block, _ := pem.Decode(pemBytes)
if block == nil {
@ -876,7 +1108,7 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
}
if encryptedBlock(block) {
return nil, errors.New("ssh: cannot decode encrypted private keys")
return nil, &PassphraseMissingError{}
}
switch block.Type {
@ -899,24 +1131,22 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with
// passphrase from a PEM encoded private key. If wrong passphrase, return
// x509.IncorrectPasswordError.
func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, error) {
func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, error) {
block, _ := pem.Decode(pemBytes)
if block == nil {
return nil, errors.New("ssh: no key found")
}
buf := block.Bytes
if encryptedBlock(block) {
if x509.IsEncryptedPEMBlock(block) {
var err error
buf, err = x509.DecryptPEMBlock(block, passPhrase)
if err != nil {
if err == x509.IncorrectPasswordError {
return nil, err
}
return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err)
}
if !encryptedBlock(block) || !x509.IsEncryptedPEMBlock(block) {
return nil, errors.New("ssh: not an encrypted key")
}
buf, err := x509.DecryptPEMBlock(block, passphrase)
if err != nil {
if err == x509.IncorrectPasswordError {
return nil, err
}
return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err)
}
switch block.Type {
@ -926,8 +1156,6 @@ func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{},
return x509.ParseECPrivateKey(buf)
case "DSA PRIVATE KEY":
return ParseDSAPrivateKey(buf)
case "OPENSSH PRIVATE KEY":
return parseOpenSSHPrivateKey(buf)
default:
return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type)
}

View File

@ -284,8 +284,8 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error)
func isAcceptableAlgo(algo string) bool {
switch algo {
case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519,
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01:
case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519,
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01:
return true
}
return false

8
vendor/modules.txt vendored
View File

@ -58,8 +58,6 @@ github.com/aws/aws-sdk-go/private/protocol/rest
github.com/aws/aws-sdk-go/private/protocol/restxml
github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil
github.com/aws/aws-sdk-go/service/s3
github.com/aws/aws-sdk-go/service/s3/s3iface
github.com/aws/aws-sdk-go/service/s3/s3manager
github.com/aws/aws-sdk-go/service/sts
github.com/aws/aws-sdk-go/service/sts/stsiface
# github.com/billziss-gh/cgofuse v1.2.0
@ -168,7 +166,7 @@ github.com/spf13/pflag
# github.com/stretchr/testify v1.4.0
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
# github.com/t3rm1n4l/go-mega v0.0.0-20191014094753-e8695d78299a
# github.com/t3rm1n4l/go-mega v0.0.0-20200111163430-ad0abe77ec81
github.com/t3rm1n4l/go-mega
# github.com/xanzy/ssh-agent v0.2.1
github.com/xanzy/ssh-agent
@ -205,13 +203,13 @@ go.opencensus.io/trace/propagation
go.opencensus.io/trace/tracestate
# goftp.io/server v0.0.0-20190812052725-72a57b186803
goftp.io/server
# golang.org/x/crypto v0.0.0-20191108234033-bd318be0434a
# golang.org/x/crypto v0.0.0-20200109152110-61a87790db17
golang.org/x/crypto/bcrypt
golang.org/x/crypto/blowfish
golang.org/x/crypto/chacha20
golang.org/x/crypto/curve25519
golang.org/x/crypto/ed25519
golang.org/x/crypto/ed25519/internal/edwards25519
golang.org/x/crypto/internal/chacha20
golang.org/x/crypto/internal/subtle
golang.org/x/crypto/nacl/secretbox
golang.org/x/crypto/pbkdf2