New backend for Citrix Sharefile - Fixes #1543

Many thanks to Bob Droog for organizing a test account and extensive
testing.
s3-about
Nick Craig-Wood 2019-08-27 22:50:07 +01:00
parent 1e7144eb63
commit 4627ac5709
18 changed files with 2537 additions and 1 deletions

View File

@ -29,6 +29,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)

View File

@ -31,6 +31,7 @@ import (
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/webdav"

View File

@ -0,0 +1,152 @@
// Package api contains definitions for using the premiumize.me API
package api
import (
"fmt"
"time"
"github.com/pkg/errors"
)
// ListRequestSelect should be used in $select for Items/Children
const ListRequestSelect = "odata.count,FileCount,Name,FileName,CreationDate,IsHidden,FileSizeBytes,odata.type,Id,Hash,ClientModifiedDate"
// ListResponse is returned from the Items/Children call
type ListResponse struct {
OdataCount int `json:"odata.count"`
Value []Item `json:"value"`
}
// Item Types
const (
ItemTypeFolder = "ShareFile.Api.Models.Folder"
ItemTypeFile = "ShareFile.Api.Models.File"
)
// Item refers to a file or folder
type Item struct {
FileCount int32 `json:"FileCount,omitempty"`
Name string `json:"Name,omitempty"`
FileName string `json:"FileName,omitempty"`
CreatedAt time.Time `json:"CreationDate,omitempty"`
ModifiedAt time.Time `json:"ClientModifiedDate,omitempty"`
IsHidden bool `json:"IsHidden,omitempty"`
Size int64 `json:"FileSizeBytes,omitempty"`
Type string `json:"odata.type,omitempty"`
ID string `json:"Id,omitempty"`
Hash string `json:"Hash,omitempty"`
}
// Error is an odata error return
type Error struct {
Code string `json:"code"`
Message struct {
Lang string `json:"lang"`
Value string `json:"value"`
} `json:"message"`
Reason string `json:"reason"`
}
// Satisfy error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s: %s: %s", e.Message.Value, e.Code, e.Reason)
}
// Check Error satisfies error interface
var _ error = &Error{}
// DownloadSpecification is the response to /Items/Download
type DownloadSpecification struct {
Token string `json:"DownloadToken"`
URL string `json:"DownloadUrl"`
Metadata string `json:"odata.metadata"`
Type string `json:"odata.type"`
}
// UploadRequest is set to /Items/Upload2 to receive an UploadSpecification
type UploadRequest struct {
Method string `json:"method"` // Upload method: one of: standard, streamed or threaded
Raw bool `json:"raw"` // Raw post if true or MIME upload if false
Filename string `json:"fileName"` // Uploaded item file name.
Filesize *int64 `json:"fileSize,omitempty"` // Uploaded item file size.
Overwrite bool `json:"overwrite"` // Indicates whether items with the same name will be overwritten or not.
CreatedDate time.Time `json:"ClientCreatedDate"` // Created Date of this Item.
ModifiedDate time.Time `json:"ClientModifiedDate"` // Modified Date of this Item.
BatchID string `json:"batchId,omitempty"` // Indicates part of a batch. Batched uploads do not send notification until the whole batch is completed.
BatchLast *bool `json:"batchLast,omitempty"` // Indicates is the last in a batch. Upload notifications for the whole batch are sent after this upload.
CanResume *bool `json:"canResume,omitempty"` // Indicates uploader supports resume.
StartOver *bool `json:"startOver,omitempty"` // Indicates uploader wants to restart the file - i.e., ignore previous failed upload attempts.
Tool string `json:"tool,omitempty"` // Identifies the uploader tool.
Title string `json:"title,omitempty"` // Item Title
Details string `json:"details,omitempty"` // Item description
IsSend *bool `json:"isSend,omitempty"` // Indicates that this upload is part of a Send operation
SendGUID string `json:"sendGuid,omitempty"` // Used if IsSend is true. Specifies which Send operation this upload is part of.
OpID string `json:"opid,omitempty"` // Used for Asynchronous copy/move operations - called by Zones to push files to other Zones
ThreadCount *int `json:"threadCount,omitempty"` // Specifies the number of threads the threaded uploader will use. Only used is method is threaded, ignored otherwise
Notify *bool `json:"notify,omitempty"` // Indicates whether users will be notified of this upload - based on folder preferences
ExpirationDays *int `json:"expirationDays,omitempty"` // File expiration days
BaseFileID string `json:"baseFileId,omitempty"` // Used to check conflict in file during File Upload.
}
// UploadSpecification is returned from /Items/Upload
type UploadSpecification struct {
Method string `json:"Method"` // The Upload method that must be used for this upload
PrepareURI string `json:"PrepareUri"` // If provided, clients must issue a request to this Uri before uploading any data.
ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to
FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process
ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supproted.
ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true.
ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true
ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server
MaxNumberOfThreads int `json:"MaxNumberOfThreads"` // Specifies the max number of chunks that can be sent simultaneously for threaded uploads
}
// UploadFinishResponse is returnes from calling UploadSpecification.FinishURI
type UploadFinishResponse struct {
Error bool `json:"error"`
ErrorMessage string `json:"errorMessage"`
ErrorCode int `json:"errorCode"`
Value []struct {
UploadID string `json:"uploadid"`
ParentID string `json:"parentid"`
ID string `json:"id"`
StreamID string `json:"streamid"`
FileName string `json:"filename"`
DisplayName string `json:"displayname"`
Size int `json:"size"`
Md5 string `json:"md5"`
} `json:"value"`
}
// ID returns the ID of the first response if available
func (finish *UploadFinishResponse) ID() (string, error) {
if finish.Error {
return "", errors.Errorf("upload failed: %s (%d)", finish.ErrorMessage, finish.ErrorCode)
}
if len(finish.Value) == 0 {
return "", errors.New("upload failed: no results returned")
}
return finish.Value[0].ID, nil
}
// Parent is the ID of the parent folder
type Parent struct {
ID string `json:"Id,omitempty"`
}
// Zone is where the data is stored
type Zone struct {
ID string `json:"Id,omitempty"`
}
// UpdateItemRequest is sent to PATCH /v3/Items(id)
type UpdateItemRequest struct {
Name string `json:"Name,omitempty"`
FileName string `json:"FileName,omitempty"`
Description string `json:"Description,omitempty"`
ExpirationDate *time.Time `json:"ExpirationDate,omitempty"`
Parent *Parent `json:"Parent,omitempty"`
Zone *Zone `json:"Zone,omitempty"`
ModifiedAt *time.Time `json:"ClientModifiedDate,omitempty"`
}

View File

@ -0,0 +1,22 @@
// +build ignore
package main
import (
"log"
"net/http"
"github.com/shurcooL/vfsgen"
)
func main() {
var AssetDir http.FileSystem = http.Dir("./tzdata")
err := vfsgen.Generate(AssetDir, vfsgen.Options{
PackageName: "sharefile",
BuildTags: "!dev",
VariableName: "tzdata",
})
if err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,76 @@
/*
Translate file names for sharefile
*/
package sharefile
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// Sharefile has a restricted set of characters compared to other
// cloud storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'*': '', // FULLWIDTH ASTERISK
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'?': '', // FULLWIDTH QUESTION MARK
':': '', // FULLWIDTH COLON
'|': '', // FULLWIDTH VERTICAL LINE
'"': '', // FULLWIDTH QUOTATION MARK
'.': '', // FULLWIDTH FULL STOP
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixStartingWithPeriod = regexp.MustCompile(`(/|^)\.`)
fixEndingWithPeriod = regexp.MustCompile(`\.(/|$)`)
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Names can't start with a period '.'
in = fixStartingWithPeriod.ReplaceAllString(in, "$1"+string(charMap['.']))
// Names can't end with a period '.'
in = fixEndingWithPeriod.ReplaceAllString(in, string(charMap['.'])+"$1")
// Names can't start with space
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Names can't end with space
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
// Replace reserved characters
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@ -0,0 +1,31 @@
package sharefile
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{`\*<>?:|#%".~`, `#%.~`},
{`\*<>?:|#%".~/\*<>?:|#%".~`, `#%.~/#%.~`},
{" leading space", "␠leading space"},
{"trailing space ", "trailing space␠"},
{".leading dot", "leading dot"},
{"trailing dot.", "trailing dot"},
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
{"trailing dot./trailing dot./trailing dot.", "trailing dot/trailing dot/trailing dot"},
{".leading dot/..leading dot/.leading dot", "leading dot/.leading dot/leading dot"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,34 @@
// Test filesystem interface
package sharefile
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestSharefile:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
CeilChunkSize: fstests.NextPowerOfTwo,
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,18 @@
#!/bin/bash
set -e
# Extract just the America/New_York timezone from
tzinfo=$(go env GOROOT)/lib/time/zoneinfo.zip
rm -rf tzdata
mkdir tzdata
cd tzdata
unzip ${tzinfo} America/New_York
cd ..
# Make the embedded assets
go run generate_tzdata.go
# tidy up
rm -rf tzdata

261
backend/sharefile/upload.go Normal file
View File

@ -0,0 +1,261 @@
// Upload large files for sharefile
//
// Docs - https://api.sharefile.com/rest/docs/resource.aspx?name=Items#Upload_File
package sharefile
import (
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/sharefile/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
)
// largeUpload is used to control the upload of large files which need chunking
type largeUpload struct {
ctx context.Context
f *Fs // parent Fs
o *Object // object being uploaded
in io.Reader // read the data from here
wrap accounting.WrapFn // account parts being transferred
size int64 // total size
parts int64 // calculated number of parts, if known
info *api.UploadSpecification // where to post chunks etc
threads int // number of threads to use in upload
streamed bool // set if using streamed upload
}
// newLargeUpload starts an upload of object o from in with metadata in src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, info *api.UploadSpecification) (up *largeUpload, err error) {
size := src.Size()
parts := int64(-1)
if size >= 0 {
parts = size / int64(o.fs.opt.ChunkSize)
if size%int64(o.fs.opt.ChunkSize) != 0 {
parts++
}
}
var streamed bool
switch strings.ToLower(info.Method) {
case "streamed":
streamed = true
case "threaded":
streamed = false
default:
return nil, errors.Errorf("can't use method %q with newLargeUpload", info.Method)
}
threads := fs.Config.Transfers
if threads > info.MaxNumberOfThreads {
threads = info.MaxNumberOfThreads
}
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
in, wrap := accounting.UnWrap(in)
up = &largeUpload{
ctx: ctx,
f: f,
o: o,
in: in,
wrap: wrap,
size: size,
threads: threads,
info: info,
parts: parts,
streamed: streamed,
}
return up, nil
}
// parse the api.UploadFinishResponse in respBody
func (up *largeUpload) parseUploadFinishResponse(respBody []byte) (err error) {
var finish api.UploadFinishResponse
err = json.Unmarshal(respBody, &finish)
if err != nil {
// Sometimes the unmarshal fails in which case return the body
return errors.Errorf("upload: bad response: %q", bytes.TrimSpace(respBody))
}
return up.o.checkUploadResponse(up.ctx, &finish)
}
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, offset int64, body []byte, fileHash string) error {
md5sumRaw := md5.Sum(body)
md5sum := hex.EncodeToString(md5sumRaw[:])
size := int64(len(body))
// Add some more parameters to the ChunkURI
u := up.info.ChunkURI
u += fmt.Sprintf("&index=%d&byteOffset=%d&hash=%s&fmt=json",
part, offset, md5sum,
)
if fileHash != "" {
u += fmt.Sprintf("&finish=true&fileSize=%d&fileHash=%s",
offset+int64(len(body)),
fileHash,
)
}
opts := rest.Opts{
Method: "POST",
RootURL: u,
ContentLength: &size,
}
var respBody []byte
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
opts.Body = up.wrap(bytes.NewReader(body))
resp, err := up.f.srv.Call(ctx, &opts)
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else {
respBody, err = rest.ReadBody(resp)
}
// retry all errors now that the multipart upload has started
return err != nil, err
})
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
return err
}
// If last chunk and using "streamed" transfer, get the response back now
if up.streamed && fileHash != "" {
return up.parseUploadFinishResponse(respBody)
}
fs.Debugf(up.o, "Done sending chunk %d", part)
return nil
}
// finish closes off the large upload and reads the metadata
func (up *largeUpload) finish(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file upload")
// For a streamed transfer we will already have read the info
if up.streamed {
return nil
}
opts := rest.Opts{
Method: "POST",
RootURL: up.info.FinishURI,
}
var respBody []byte
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.Call(ctx, &opts)
if err != nil {
return shouldRetry(resp, err)
}
respBody, err = rest.ReadBody(resp)
// retry all errors now that the multipart upload has started
return err != nil, err
})
if err != nil {
return err
}
return up.parseUploadFinishResponse(respBody)
}
// Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) error {
if up.parts >= 0 {
fs.Debugf(up.o, "Starting upload of large file in %d chunks", up.parts)
} else {
fs.Debugf(up.o, "Starting streaming upload of large file")
}
var (
offset int64
errs = make(chan error, 1)
wg sync.WaitGroup
err error
wholeFileHash = md5.New()
eof = false
)
outer:
for part := int64(0); !eof; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
// Get a block of memory
buf := up.f.getUploadBlock()
// Read the chunk
var n int
n, err = readers.ReadFill(up.in, buf)
if err == io.EOF {
eof = true
buf = buf[:n]
err = nil
} else if err != nil {
up.f.putUploadBlock(buf)
break outer
}
// Hash it
_, _ = io.Copy(wholeFileHash, bytes.NewBuffer(buf))
// Get file hash if was last chunk
fileHash := ""
if eof {
fileHash = hex.EncodeToString(wholeFileHash.Sum(nil))
}
// Transfer the chunk
wg.Add(1)
transferChunk := func(part, offset int64, buf []byte, fileHash string) {
defer wg.Done()
defer up.f.putUploadBlock(buf)
err := up.transferChunk(ctx, part, offset, buf, fileHash)
if err != nil {
select {
case errs <- err:
default:
}
}
}
if up.streamed {
transferChunk(part, offset, buf, fileHash) // streamed
} else {
go transferChunk(part, offset, buf, fileHash) // multithreaded
}
offset += int64(n)
}
wg.Wait()
// check size read is correct
if eof && err == nil && up.size >= 0 && up.size != offset {
err = errors.Errorf("upload: short read: read %d bytes expected %d", up.size, offset)
}
// read any errors
if err == nil {
select {
case err = <-errs:
default:
}
}
// finish regardless of errors
finishErr := up.finish(ctx)
if err == nil {
err = finishErr
}
return err
}

View File

@ -32,6 +32,7 @@ docs = [
"box.md",
"cache.md",
"chunker.md",
"sharefile.md",
"crypt.md",
"dropbox.md",
"ftp.md",

View File

@ -17,6 +17,7 @@ Rclone is a command line program to sync files and directories to and from:
* {{< provider name="Backblaze B2" home="https://www.backblaze.com/b2/cloud-storage.html" config="/b2/" >}}
* {{< provider name="Box" home="https://www.box.com/" config="/box/" >}}
* {{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}}
* {{< provider name="Citrix ShareFile" home="http://sharefile.com/" config="/sharefile/" >}}
* {{< provider name="C14" home="https://www.online.net/en/storage/c14-cold-storage" config="/sftp/#c14" >}}
* {{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
* {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}

View File

@ -27,6 +27,7 @@ See the following for detailed instructions for
* [Box](/box/)
* [Cache](/cache/)
* [Chunker](/chunker/) - transparently splits large files for other remotes
* [Citrix ShareFile](/sharefile/)
* [Crypt](/crypt/) - to encrypt other remotes
* [DigitalOcean Spaces](/s3/#digitalocean-spaces)
* [Dropbox](/dropbox/)

View File

@ -22,6 +22,7 @@ Here is an overview of the major features of each cloud storage system.
| Amazon S3 | MD5 | Yes | No | No | R/W |
| Backblaze B2 | SHA1 | Yes | No | No | R/W |
| Box | SHA1 | Yes | Yes | No | - |
| Citrix ShareFile | MD5 | Yes | Yes | No | - |
| Dropbox | DBHASH † | Yes | Yes | No | - |
| FTP | - | No | No | No | - |
| Google Cloud Storage | MD5 | Yes | No | No | R/W |
@ -143,6 +144,7 @@ operations more efficient.
| Amazon S3 | No | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | No |
| Backblaze B2 | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No |
| Box | Yes | Yes | Yes | Yes | No [#575](https://github.com/rclone/rclone/issues/575) | No | Yes | Yes | No | Yes |
| Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | Yes | No | No | Yes |
| Dropbox | Yes | Yes | Yes | Yes | No [#575](https://github.com/rclone/rclone/issues/575) | No | Yes | Yes | Yes | Yes |
| FTP | No | No | Yes | Yes | No | No | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | Yes |
| Google Cloud Storage | Yes | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | No |

224
docs/content/sharefile.md Normal file
View File

@ -0,0 +1,224 @@
---
title: "Citrix ShareFile"
description: "Rclone docs for Citrix ShareFile"
date: "2019-09-30"
---
## <i class="fas fa-share-square"></i> Citrix ShareFile
[Citrix ShareFile](https://sharefile.com) is a secure file sharing and transfer service aimed as business.
The initial setup for Citrix ShareFile involves getting a token from
Citrix ShareFile which you can in your browser. `rclone config` walks you
through it.
Here is an example of how to make a remote called `remote`. First run:
rclone config
This will guide you through an interactive setup process:
```
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
name> remote
Type of storage to configure.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
XX / Citrix Sharefile
\ "sharefile"
Storage> sharefile
** See help for sharefile backend at: https://rclone.org/sharefile/ **
ID of the root folder
Leave blank to access "Personal Folders". You can use one of the
standard values here or any folder ID (long hex number ID).
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
1 / Access the Personal Folders. (Default)
\ ""
2 / Access the Favorites folder.
\ "favorites"
3 / Access all the shared folders.
\ "allshared"
4 / Access all the individual connectors.
\ "connectors"
5 / Access the home, favorites, and shared folders as well as the connectors.
\ "top"
root_folder_id>
Edit advanced config? (y/n)
y) Yes
n) No
y/n> n
Remote config
Use auto config?
* Say Y if not sure
* Say N if you are working on a remote or headless machine
y) Yes
n) No
y/n> y
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth?state=XXX
Log in and authorize rclone for access
Waiting for code...
Got code
--------------------
[remote]
type = sharefile
endpoint = https://XXX.sharefile.com
token = {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2019-09-30T19:41:45.878561877+01:00"}
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
See the [remote setup docs](/remote_setup/) for how to set it up on a
machine with no Internet browser available.
Note that rclone runs a webserver on your local machine to collect the
token as returned from Citrix ShareFile. This only runs from the moment it opens
your browser to the moment you get back the verification code. This
is on `http://127.0.0.1:53682/` and this it may require you to unblock
it temporarily if you are running a host firewall.
Once configured you can then use `rclone` like this,
List directories in top level of your ShareFile
rclone lsd remote:
List all the files in your ShareFile
rclone ls remote:
To copy a local directory to an ShareFile directory called backup
rclone copy /home/source remote:backup
Paths may be as deep as required, eg `remote:directory/subdirectory`.
### Modified time and hashes ###
ShareFile allows modification times to be set on objects accurate to 1
second. These will be used to detect whether objects need syncing or
not.
ShareFile supports MD5 type hashes, so you can use the `--checksum`
flag.
### Transfers ###
For files above 128MB rclone will use a chunked transfer. Rclone will
upload up to `--transfers` chunks at the same time (shared among all
the multipart uploads). Chunks are buffered in memory and are
normally 64MB so increasing `--transfers` will increase memory use.
### Limitations ###
Note that ShareFile is case insensitive so you can't have a file called
"Hello.doc" and one called "hello.doc".
ShareFile only supports filenames up to 256 characters in length.
#### Restricted filename characters
In addition to the [default restricted characters set](/overview/#restricted-characters)
the following characters are also replaced:
| Character | Value | Replacement |
| --------- |:-----:|:-----------:|
| \\ | 0x5C | |
| * | 0x2A | |
| < | 0x3C | |
| > | 0x3E | |
| ? | 0x3F | |
| : | 0x3A | |
| \| | 0x7C | |
| " | 0x22 | |
File names can also not start or end with the following characters.
These only get replaced if they are first or last character in the
name:
| Character | Value | Replacement |
| --------- |:-----:|:-----------:|
| SP | 0x20 | ␠ |
| . | 0x2E | |
Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
as they can't be used in JSON strings.
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/sharefile/sharefile.go then run make backenddocs -->
### Standard Options
Here are the standard options specific to sharefile (Citrix Sharefile).
#### --sharefile-root-folder-id
ID of the root folder
Leave blank to access "Personal Folders". You can use one of the
standard values here or any folder ID (long hex number ID).
- Config: root_folder_id
- Env Var: RCLONE_SHAREFILE_ROOT_FOLDER_ID
- Type: string
- Default: ""
- Examples:
- ""
- Access the Personal Folders. (Default)
- "favorites"
- Access the Favorites folder.
- "allshared"
- Access all the shared folders.
- "connectors"
- Access all the individual connectors.
- "top"
- Access the home, favorites, and shared folders as well as the connectors.
### Advanced Options
Here are the advanced options specific to sharefile (Citrix Sharefile).
#### --sharefile-upload-cutoff
Cutoff for switching to multipart upload.
- Config: upload_cutoff
- Env Var: RCLONE_SHAREFILE_UPLOAD_CUTOFF
- Type: SizeSuffix
- Default: 128M
#### --sharefile-chunk-size
Upload chunk size. Must a power of 2 >= 256k.
Making this larger will improve performance, but note that each chunk
is buffered in memory one per transfer.
Reducing this will reduce memory usage but decrease performance.
- Config: chunk_size
- Env Var: RCLONE_SHAREFILE_CHUNK_SIZE
- Type: SizeSuffix
- Default: 64M
#### --sharefile-endpoint
Endpoint for API calls.
This is usually auto discovered as part of the oauth process, but can
be set manually to something like: https://XXX.sharefile.com
- Config: endpoint
- Env Var: RCLONE_SHAREFILE_ENDPOINT
- Type: string
- Default: ""
<!--- autogenerated options stop -->

View File

@ -64,6 +64,7 @@
<li><a href="/box/"><i class="fa fa-archive"></i> Box</a></li>
<li><a href="/cache/"><i class="fa fa-archive"></i> Cache</a></li>
<li><a href="/chunker/"><i class="fa fa-cut"></i> Chunker (splits large files)</a></li>
<li><a href="/sharefile/"><i class="fas fa-share-square"></i> Citrix ShareFile</a></li>
<li><a href="/crypt/"><i class="fa fa-lock"></i> Crypt (encrypts the others)</a></li>
<li><a href="/dropbox/"><i class="fab fa-dropbox"></i> Dropbox</a></li>
<li><a href="/ftp/"><i class="fa fa-file"></i> FTP</a></li>

View File

@ -187,7 +187,9 @@ backends:
fastlist: false
- backend: "putio"
remote: "TestPutio:"
subdir: false
fastlist: false
- backend: "sharefile"
remote: "TestSharefile:"
fastlist: false
- backend: "mailru"
remote: "TestMailru:"