Replace test_all.sh with test_all.go which is cross platform and parallel

s3-about
Nick Craig-Wood 2015-12-30 00:18:31 +00:00
parent 2df5d95d70
commit ddf39f2d57
5 changed files with 162 additions and 66 deletions

View File

@ -105,7 +105,7 @@ but they can be run against any of the remotes.
If you want to run all the integration tests against all the remotes, If you want to run all the integration tests against all the remotes,
then run in that directory then run in that directory
./test_all.sh go run test_all.go
## Making a release ## ## Making a release ##

View File

@ -9,7 +9,7 @@ rclone:
test: rclone test: rclone
go test ./... go test ./...
cd fs && ./test_all.sh cd fs && go run test_all.go
check: rclone check: rclone
go vet ./... go vet ./...

159
fs/test_all.go Normal file
View File

@ -0,0 +1,159 @@
// +build ignore
// Run tests for all the remotes
//
// Run with go run test_all.go
package main
import (
"flag"
"log"
"os"
"os/exec"
"runtime"
"strings"
"time"
)
var (
remotes = []string{
"TestSwift:",
"TestS3:",
"TestDrive:",
"TestGoogleCloudStorage:",
"TestDropbox:",
"TestAmazonCloudDrive:",
"TestOneDrive:",
"TestHubic:",
}
binary = "fs.test"
// Flags
maxTries = flag.Int("maxtries", 3, "Number of times to try each test")
runTests = flag.String("run", "", "Comma separated list of remotes to test, eg 'TestSwift:,TestS3'")
)
// test holds info about a running test
type test struct {
remote string
subdir bool
cmdLine []string
cmdString string
try int
err error
output []byte
}
// newTest creates a new test
func newTest(remote string, subdir bool) *test {
t := &test{
remote: remote,
subdir: subdir,
cmdLine: []string{"./" + binary, "-test.v", "-remote", remote},
try: 1,
}
if subdir {
t.cmdLine = append(t.cmdLine, "-subdir")
}
t.cmdString = strings.Join(t.cmdLine, " ")
return t
}
// trial runs a single test
func (t *test) trial() {
log.Printf("%q - Starting (try %d/%d)", t.cmdString, t.try, *maxTries)
cmd := exec.Command(t.cmdLine[0], t.cmdLine[1:]...)
start := time.Now()
t.output, t.err = cmd.CombinedOutput()
duration := time.Since(start)
if t.passed() {
log.Printf("%q - Finished OK in %v (try %d/%d)", t.cmdString, duration, t.try, *maxTries)
} else {
log.Printf("%q - Finished ERROR in %v (try %d/%d): %v", t.cmdString, duration, t.try, *maxTries, t.err)
}
}
// passed returns true if the test passed
func (t *test) passed() bool {
return t.err == nil
}
// run runs all the trials for this test
func (t *test) run(result chan<- *test) {
for try := 1; try <= *maxTries; try++ {
t.trial()
if t.passed() {
break
}
}
if !t.passed() {
log.Println("------------------------------------------------------------")
log.Println(string(t.output))
log.Println("------------------------------------------------------------")
}
result <- t
}
// makeTestBinary makes the binary we will run
func makeTestBinary() {
if runtime.GOOS == "windows" {
binary += ".exe"
}
log.Printf("Making test binary %q", binary)
err := exec.Command("go", "test", "-c", "-o", binary).Run()
if err != nil {
log.Fatalf("Failed to make test binary: %v", err)
}
if _, err := os.Stat(binary); err != nil {
log.Fatalf("Couldn't find test binary %q", binary)
}
}
// removeTestBinary removes the binary made in makeTestBinary
func removeTestBinary() {
err := os.Remove(binary) // Delete the binary when finished
if err != nil {
log.Printf("Error removing test binary %q: %v", binary, err)
}
}
func main() {
flag.Parse()
if *runTests != "" {
remotes = strings.Split(*runTests, ",")
}
log.Printf("Testing remotes: %s", strings.Join(remotes, ", "))
start := time.Now()
makeTestBinary()
defer removeTestBinary()
// start the tests
results := make(chan *test, 8)
awaiting := 0
for _, remote := range remotes {
awaiting += 2
go newTest(remote, false).run(results)
go newTest(remote, true).run(results)
}
// Wait for the tests to finish
var failed []*test
for ; awaiting > 0; awaiting-- {
t := <-results
if !t.passed() {
failed = append(failed, t)
}
}
duration := time.Since(start)
// Summarise results
if len(failed) == 0 {
log.Printf("PASS: All tests finished OK in %v", duration)
} else {
log.Printf("FAIL: %d tests failed in %v", len(failed), duration)
for _, t := range failed {
log.Printf(" * %s", t.cmdString)
}
os.Exit(1)
}
}

View File

@ -1,32 +0,0 @@
#!/bin/bash
go install
REMOTES="
TestSwift:
TestS3:
TestDrive:
TestGoogleCloudStorage:
TestDropbox:
TestAmazonCloudDrive:
TestOneDrive:
TestHubic:
"
function test_remote {
args=$@
echo "@go test $args"
go test $args || {
echo "*** test $args FAILED ***"
exit 1
}
}
test_remote
test_remote --subdir
for remote in $REMOTES; do
test_remote --remote $remote
test_remote --remote $remote --subdir
done
echo "All OK"

View File

@ -1,51 +1,20 @@
Perhaps make Md5sum() and Modtime() optional. Define the zero values
"" and 0. Make it so we can support remotes which can't do those.
Fix the docs
* factor the README.md into the docs directory
* create it as part of make by assembling other parts
* write long docs about each flag
Change lsd command so it doesn't show -1 Change lsd command so it doesn't show -1
* Make sure all Fses show -1 for objects Zero for dates etc * Make sure all Fses show -1 for objects Zero for dates etc
* Make test? * Make test?
Put the TestRemote names into the Fs description Put the TestRemote names into the Fs description
Make test_all.sh use the TestRemote name automatically Make fs/test_all.go use the TestRemote name automatically
Run errcheck and go vet in the make file
.. Also race detector?
.. go tool vet -shadow
Get rid of Storable? Get rid of Storable?
Write developer manual
Todo Todo
* FIXME: More -dry-run checks for object transfer * FIXME: More -dry-run checks for object transfer
* Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files * Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files
* swift: Ignoring the pseudo directories
* if object.PseudoDirectory {
* fmt.Printf("%9s %19s %s\n", "Directory", "-", fs.Remote())
* Make Account wrapper
* make Account do progress meter
* -timeout: Make all timeouts be settable with command line parameters
* Add max object size to fs metadata - 5GB for swift, infinite for local, ? for s3
* tie into -max-size flag
* FIXME Make NewFs to return err.IsAnObject so can put the LimitedFs * FIXME Make NewFs to return err.IsAnObject so can put the LimitedFs
creation in common code? Or try for as much as possible? creation in common code? Or try for as much as possible?
* FIXME Account all the transactons (ls etc) using a different * FIXME Account all the transactons (ls etc) using a different
Roundtripper wrapper which wraps the transactions? Roundtripper wrapper which wraps the transactions?
More rsync features
* include
* exclude
* max size
* -c, --checksum skip based on checksum, not mod-time & size
Ideas for flags
* --retries N flag which would make rclone retry a sync until successful or it tried N times.
Ideas Ideas
* could do encryption - put IV into metadata? * could do encryption - put IV into metadata?
* optimise remote copy container to another container using remote * optimise remote copy container to another container using remote