From 5dc6d0df94076e116934c83b837e2dd416efa784 Mon Sep 17 00:00:00 2001 From: Robert van Gent Date: Fri, 3 May 2019 16:38:05 -0700 Subject: deploy: Add tests; fix Windows --- deploy/deploy.go | 152 ++++++++++------ deploy/deployConfig.go | 1 + deploy/deploy_test.go | 469 +++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 553 insertions(+), 69 deletions(-) (limited to 'deploy') diff --git a/deploy/deploy.go b/deploy/deploy.go index dadff7d40..40c49c2e5 100644 --- a/deploy/deploy.go +++ b/deploy/deploy.go @@ -20,6 +20,7 @@ import ( "crypto/md5" "fmt" "io" + "io/ioutil" "mime" "os" "path/filepath" @@ -46,6 +47,7 @@ import ( // Deployer supports deploying the site to target cloud providers. type Deployer struct { localFs afero.Fs + bucket *blob.Bucket target *target // the target to deploy to matchers []*matcher // matchers to apply to uploaded files @@ -56,6 +58,13 @@ type Deployer struct { force bool // true forces upload of all files invalidateCDN bool // true enables invalidate CDN cache (if possible) maxDeletes int // caps the # of files to delete; -1 to disable + + // For tests... + summary deploySummary // summary of latest Deploy results +} + +type deploySummary struct { + NumLocal, NumRemote, NumUploads, NumDeletes int } // New constructs a new *Deployer. @@ -92,11 +101,18 @@ func New(cfg config.Provider, localFs afero.Fs) (*Deployer, error) { }, nil } +func (d *Deployer) openBucket(ctx context.Context) (*blob.Bucket, error) { + if d.bucket != nil { + return d.bucket, nil + } + return blob.OpenBucket(ctx, d.target.URL) +} + // Deploy deploys the site to a target. func (d *Deployer) Deploy(ctx context.Context) error { // TODO: This opens the root path in the bucket/container. // Consider adding support for targeting a subdirectory. - bucket, err := blob.OpenBucket(ctx, d.target.URL) + bucket, err := d.openBucket(ctx) if err != nil { return err } @@ -107,6 +123,7 @@ func (d *Deployer) Deploy(ctx context.Context) error { return err } jww.INFO.Printf("Found %d local files.\n", len(local)) + d.summary.NumLocal = len(local) // Load remote files from the target. remote, err := walkRemote(ctx, bucket) @@ -114,12 +131,15 @@ func (d *Deployer) Deploy(ctx context.Context) error { return err } jww.INFO.Printf("Found %d remote files.\n", len(remote)) + d.summary.NumRemote = len(remote) // Diff local vs remote to see what changes need to be applied. uploads, deletes := findDiffs(local, remote, d.force) if err != nil { return err } + d.summary.NumUploads = len(uploads) + d.summary.NumDeletes = len(deletes) if len(uploads)+len(deletes) == 0 { if !d.quiet { jww.FEEDBACK.Println("No changes required.") @@ -187,6 +207,7 @@ func (d *Deployer) Deploy(ctx context.Context) error { if d.maxDeletes != -1 && len(deletes) > d.maxDeletes { jww.WARN.Printf("Skipping %d deletes because it is more than --maxDeletes (%d). If this is expected, set --maxDeletes to a larger number, or -1 to disable this check.\n", len(deletes), d.maxDeletes) + d.summary.NumDeletes = 0 } else { // Apply deletes in parallel. sort.Slice(deletes, func(i, j int) bool { return deletes[i] < deletes[j] }) @@ -252,11 +273,16 @@ func doSingleUpload(ctx context.Context, bucket *blob.Bucket, upload *fileToUplo ContentEncoding: upload.Local.ContentEncoding(), ContentType: upload.Local.ContentType(), } - w, err := bucket.NewWriter(ctx, upload.Local.Path, opts) + w, err := bucket.NewWriter(ctx, upload.Local.SlashPath, opts) if err != nil { return err } - _, err = io.Copy(w, upload.Local.UploadContentReader) + r, err := upload.Local.Reader() + if err != nil { + return err + } + defer r.Close() + _, err = io.Copy(w, r) if err != nil { return err } @@ -269,58 +295,70 @@ func doSingleUpload(ctx context.Context, bucket *blob.Bucket, upload *fileToUplo // localFile represents a local file from the source. Use newLocalFile to // construct one. type localFile struct { - // Path is the relative path to the file. - Path string + // NativePath is the native path to the file (using file.Separator). + NativePath string + // SlashPath is NativePath converted to use /. + SlashPath string // UploadSize is the size of the content to be uploaded. It may not // be the same as the local file size if the content will be // gzipped before upload. UploadSize int64 - // UploadContentReader reads the content to be uploaded. Again, - // it may not be the same as the local file content due to gzipping. - UploadContentReader io.Reader fs afero.Fs matcher *matcher - md5 []byte // cache + md5 []byte // cache + gzipped bytes.Buffer // cached of gzipped contents if gzipping } // newLocalFile initializes a *localFile. -func newLocalFile(fs afero.Fs, path string, m *matcher) (*localFile, error) { - r, size, err := contentToUpload(fs, path, m) +func newLocalFile(fs afero.Fs, nativePath, slashpath string, m *matcher) (*localFile, error) { + f, err := fs.Open(nativePath) if err != nil { return nil, err } - return &localFile{ - Path: path, - UploadSize: size, - UploadContentReader: r, - fs: fs, - matcher: m, - }, nil -} - -// contentToUpload returns an io.Reader and size for the content to be uploaded -// from path. It applies gzip encoding if needed. -func contentToUpload(fs afero.Fs, path string, m *matcher) (io.Reader, int64, error) { - f, err := fs.Open(path) - if err != nil { - return nil, 0, err - } - info, err := f.Stat() - if err != nil { - return nil, 0, err + defer f.Close() + lf := &localFile{ + NativePath: nativePath, + SlashPath: slashpath, + fs: fs, + matcher: m, } - r := io.Reader(f) - size := info.Size() if m != nil && m.Gzip { - var b bytes.Buffer - gz := gzip.NewWriter(&b) - io.Copy(gz, f) - gz.Close() - r = &b - size = int64(b.Len()) - } - return r, size, nil + // We're going to gzip the content. Do it once now, and cache the result + // in gzipped. The UploadSize is the size of the gzipped content. + gz := gzip.NewWriter(&lf.gzipped) + if _, err := io.Copy(gz, f); err != nil { + return nil, err + } + if err := gz.Close(); err != nil { + return nil, err + } + lf.UploadSize = int64(lf.gzipped.Len()) + } else { + // Raw content. Just get the UploadSize. + info, err := f.Stat() + if err != nil { + return nil, err + } + lf.UploadSize = info.Size() + } + return lf, nil +} + +// Reader returns an io.ReadCloser for reading the content to be uploaded. +// The caller must call Close on the returned ReaderCloser. +// The reader content may not be the same as the local file content due to +// gzipping. +func (lf *localFile) Reader() (io.ReadCloser, error) { + if lf.matcher != nil && lf.matcher.Gzip { + // We've got the gzipped contents cached in gzipped. + // Note: we can't use lf.gzipped directly as a Reader, since we it discards + // data after it is read, and we may read it more than once. + return ioutil.NopCloser(bytes.NewReader(lf.gzipped.Bytes())), nil + } + // Not expected to fail since we did it successfully earlier in newLocalFile, + // but could happen due to changes in the underlying filesystem. + return lf.fs.Open(lf.NativePath) } // CacheControl returns the Cache-Control header to use for lf, based on the @@ -357,7 +395,7 @@ func (lf *localFile) ContentType() string { // TODO: Hugo has a MediaType and a MediaTypes list and also a concept // of custom MIME types. // Use 1) The matcher 2) Hugo's MIME types 3) TypeByExtension. - return mime.TypeByExtension(filepath.Ext(lf.Path)) + return mime.TypeByExtension(filepath.Ext(lf.NativePath)) } // Force returns true if the file should be forced to re-upload based on the @@ -371,14 +409,12 @@ func (lf *localFile) MD5() []byte { if len(lf.md5) > 0 { return lf.md5 } - // We can't use lf.UploadContentReader directly because if there's a - // delta we'll want to read it again later, and we have no way of - // resetting the reader. So, create a new one. - r, _, err := contentToUpload(lf.fs, lf.Path, lf.matcher) + h := md5.New() + r, err := lf.Reader() if err != nil { return nil } - h := md5.New() + defer r.Close() if _, err := io.Copy(h, r); err != nil { return nil } @@ -386,7 +422,8 @@ func (lf *localFile) MD5() []byte { return lf.md5 } -// walkLocal walks the source directory and returns a flat list of files. +// walkLocal walks the source directory and returns a flat list of files, +// using localFile.SlashPath as the map keys. func walkLocal(fs afero.Fs, matchers []*matcher) (map[string]*localFile, error) { retval := map[string]*localFile{} err := afero.Walk(fs, "", func(path string, info os.FileInfo, err error) error { @@ -412,18 +449,19 @@ func walkLocal(fs afero.Fs, matchers []*matcher) (map[string]*localFile, error) } // Find the first matching matcher (if any). + slashpath := filepath.ToSlash(path) var m *matcher for _, cur := range matchers { - if cur.Matches(path) { + if cur.Matches(slashpath) { m = cur break } } - lf, err := newLocalFile(fs, path, m) + lf, err := newLocalFile(fs, path, slashpath, m) if err != nil { return err } - retval[path] = lf + retval[lf.SlashPath] = lf return nil }) if err != nil { @@ -496,7 +534,7 @@ func (u *fileToUpload) String() string { if s := u.Local.ContentType(); s != "" { details = append(details, fmt.Sprintf("Content-Type: %q", s)) } - return fmt.Sprintf("%s (%s): %v", u.Local.Path, strings.Join(details, ", "), u.Reason) + return fmt.Sprintf("%s (%s): %v", u.Local.SlashPath, strings.Join(details, ", "), u.Reason) } // findDiffs diffs localFiles vs remoteFiles to see what changes should be @@ -506,8 +544,6 @@ func findDiffs(localFiles map[string]*localFile, remoteFiles map[string]*blob.Li var uploads []*fileToUpload var deletes []string - // TODO: Do we need to remap file delimiters, e.g. on Windows? - found := map[string]bool{} for path, lf := range localFiles { upload := false @@ -576,22 +612,22 @@ func findDiffs(localFiles map[string]*localFile, remoteFiles map[string]*blob.Li // The returned slice will have length len(ordering)+1. // // The subslice at index i, for i = 0 ... len(ordering)-1, will have all of the -// uploads whose Local.Path matched the regex at ordering[i] (but not any +// uploads whose Local.SlashPath matched the regex at ordering[i] (but not any // previous ordering regex). // The subslice at index len(ordering) will have the remaining uploads that // didn't match any ordering regex. // -// The subslices are sorted by Local.Path. +// The subslices are sorted by Local.SlashPath. func applyOrdering(ordering []*regexp.Regexp, uploads []*fileToUpload) [][]*fileToUpload { - // Sort the whole slice by Local.Path first. - sort.Slice(uploads, func(i, j int) bool { return uploads[i].Local.Path < uploads[j].Local.Path }) + // Sort the whole slice by Local.SlashPath first. + sort.Slice(uploads, func(i, j int) bool { return uploads[i].Local.SlashPath < uploads[j].Local.SlashPath }) retval := make([][]*fileToUpload, len(ordering)+1) for _, u := range uploads { matched := false for i, re := range ordering { - if re.MatchString(u.Local.Path) { + if re.MatchString(u.Local.SlashPath) { retval[i] = append(retval[i], u) matched = true break diff --git a/deploy/deployConfig.go b/deploy/deployConfig.go index 3cfa27013..b4fa325b7 100644 --- a/deploy/deployConfig.go +++ b/deploy/deployConfig.go @@ -43,6 +43,7 @@ type target struct { // a specified pattern. type matcher struct { // Pattern is the string pattern to match against paths. + // Matching is done against paths converted to use / as the path separator. Pattern string // CacheControl specifies caching attributes to use when serving the blob. diff --git a/deploy/deploy_test.go b/deploy/deploy_test.go index 519a3963f..ed20daef4 100644 --- a/deploy/deploy_test.go +++ b/deploy/deploy_test.go @@ -16,9 +16,14 @@ package deploy import ( "bytes" "compress/gzip" + "context" "crypto/md5" + "fmt" + "io" "io/ioutil" "os" + "path" + "path/filepath" "regexp" "sort" "testing" @@ -27,13 +32,15 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/spf13/afero" "gocloud.dev/blob" + "gocloud.dev/blob/fileblob" + "gocloud.dev/blob/memblob" ) -func TestDeploy_FindDiffs(t *testing.T) { +func TestFindDiffs(t *testing.T) { hash1 := []byte("hash 1") hash2 := []byte("hash 2") makeLocal := func(path string, size int64, hash []byte) *localFile { - return &localFile{Path: path, UploadSize: size, md5: hash} + return &localFile{NativePath: path, SlashPath: filepath.ToSlash(path), UploadSize: size, md5: hash} } makeRemote := func(path string, size int64, hash []byte) *blob.ListObject { return &blob.ListObject{Key: path, Size: size, MD5: hash} @@ -63,6 +70,19 @@ func TestDeploy_FindDiffs(t *testing.T) { makeRemote("ccc", 3, hash2), }, }, + { + Description: "local w/ separators == remote -> no diffs", + Local: []*localFile{ + makeLocal(filepath.Join("aaa", "aaa"), 1, hash1), + makeLocal(filepath.Join("bbb", "bbb"), 2, hash1), + makeLocal(filepath.Join("ccc", "ccc"), 3, hash2), + }, + Remote: []*blob.ListObject{ + makeRemote("aaa/aaa", 1, hash1), + makeRemote("bbb/bbb", 2, hash1), + makeRemote("ccc/ccc", 3, hash2), + }, + }, { Description: "local == remote with force flag true -> diffs", Local: []*localFile{ @@ -85,7 +105,7 @@ func TestDeploy_FindDiffs(t *testing.T) { { Description: "local == remote with route.Force true -> diffs", Local: []*localFile{ - {Path: "aaa", UploadSize: 1, matcher: &matcher{Force: true}, md5: hash1}, + {NativePath: "aaa", SlashPath: "aaa", UploadSize: 1, matcher: &matcher{Force: true}, md5: hash1}, makeLocal("bbb", 2, hash1), }, Remote: []*blob.ListObject{ @@ -168,7 +188,7 @@ func TestDeploy_FindDiffs(t *testing.T) { t.Run(tc.Description, func(t *testing.T) { local := map[string]*localFile{} for _, l := range tc.Local { - local[l.Path] = l + local[l.SlashPath] = l } remote := map[string]*blob.ListObject{} for _, r := range tc.Remote { @@ -187,7 +207,7 @@ func TestDeploy_FindDiffs(t *testing.T) { } } -func TestDeploy_LocalFile(t *testing.T) { +func TestLocalFile(t *testing.T) { const ( content = "hello world!" ) @@ -273,7 +293,7 @@ func TestDeploy_LocalFile(t *testing.T) { if err := afero.WriteFile(fs, tc.Path, []byte(content), os.ModePerm); err != nil { t.Fatal(err) } - lf, err := newLocalFile(fs, tc.Path, tc.Matcher) + lf, err := newLocalFile(fs, tc.Path, filepath.ToSlash(tc.Path), tc.Matcher) if err != nil { t.Fatal(err) } @@ -294,15 +314,33 @@ func TestDeploy_LocalFile(t *testing.T) { t.Errorf("got ContentType %q want %q", got, tc.WantContentType) } } - // Verify the content reader last to ensure the - // previous operations don't interfere with it. - gotContent, err := ioutil.ReadAll(lf.UploadContentReader) + // Verify the reader last to ensure the previous operations don't + // interfere with it. + r, err := lf.Reader() + if err != nil { + t.Fatal(err) + } + gotContent, err := ioutil.ReadAll(r) if err != nil { t.Fatal(err) } if !bytes.Equal(gotContent, tc.WantContent) { t.Errorf("got content %q want %q", string(gotContent), string(tc.WantContent)) } + r.Close() + // Verify we can read again. + r, err = lf.Reader() + if err != nil { + t.Fatal(err) + } + gotContent, err = ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + r.Close() + if !bytes.Equal(gotContent, tc.WantContent) { + t.Errorf("got content %q want %q", string(gotContent), string(tc.WantContent)) + } }) } } @@ -344,14 +382,14 @@ func TestOrdering(t *testing.T) { t.Run(tc.Description, func(t *testing.T) { uploads := make([]*fileToUpload, len(tc.Uploads)) for i, u := range tc.Uploads { - uploads[i] = &fileToUpload{Local: &localFile{Path: u}} + uploads[i] = &fileToUpload{Local: &localFile{SlashPath: u}} } gotUploads := applyOrdering(tc.Ordering, uploads) var got [][]string for _, subslice := range gotUploads { var gotsubslice []string for _, u := range subslice { - gotsubslice = append(gotsubslice, u.Local.Path) + gotsubslice = append(gotsubslice, u.Local.SlashPath) } got = append(got, gotsubslice) } @@ -361,3 +399,412 @@ func TestOrdering(t *testing.T) { }) } } + +type fileData struct { + Name string // name of the file + Contents string // contents of the file +} + +// initLocalFs initializes fs with some test files. +func initLocalFs(ctx context.Context, fs afero.Fs) ([]*fileData, error) { + // The initial local filesystem. + local := []*fileData{ + {"aaa", "aaa"}, + {"bbb", "bbb"}, + {"subdir/aaa", "subdir-aaa"}, + {"subdir/nested/aaa", "subdir-nested-aaa"}, + {"subdir2/bbb", "subdir2-bbb"}, + } + if err := writeFiles(fs, local); err != nil { + return nil, err + } + return local, nil +} + +// fsTest represents an (afero.FS, Go CDK blob.Bucket) against which end-to-end +// tests can be run. +type fsTest struct { + name string + fs afero.Fs + bucket *blob.Bucket +} + +// initFsTests initializes a pair of tests for end-to-end test: +// 1. An in-memory afero.Fs paired with an in-memory Go CDK bucket. +// 2. A filesystem-based afero.Fs paired with an filesystem-based Go CDK bucket. +// It returns the pair of tests and a cleanup function. +func initFsTests() ([]*fsTest, func(), error) { + tmpfsdir, err := ioutil.TempDir("", "fs") + if err != nil { + return nil, nil, err + } + tmpbucketdir, err := ioutil.TempDir("", "bucket") + if err != nil { + return nil, nil, err + } + + memfs := afero.NewMemMapFs() + membucket := memblob.OpenBucket(nil) + + filefs := afero.NewBasePathFs(afero.NewOsFs(), tmpfsdir) + filebucket, err := fileblob.OpenBucket(tmpbucketdir, nil) + if err != nil { + return nil, nil, err + } + + tests := []*fsTest{ + {"mem", memfs, membucket}, + {"file", filefs, filebucket}, + } + cleanup := func() { + membucket.Close() + filebucket.Close() + os.RemoveAll(tmpfsdir) + os.RemoveAll(tmpbucketdir) + } + return tests, cleanup, nil +} + +// TestEndToEndSync verifies that basic adds, updates, and deletes are working +// correctly. +func TestEndToEndSync(t *testing.T) { + ctx := context.Background() + tests, cleanup, err := initFsTests() + if err != nil { + t.Fatal(err) + } + defer cleanup() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + local, err := initLocalFs(ctx, test.fs) + if err != nil { + t.Fatal(err) + } + deployer := &Deployer{ + localFs: test.fs, + maxDeletes: -1, + bucket: test.bucket, + } + + // Initial deployment should sync remote with local. + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("initial deploy: failed: %v", err) + } + wantSummary := deploySummary{NumLocal: 5, NumRemote: 0, NumUploads: 5, NumDeletes: 0} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("initial deploy: got %v, want %v", deployer.summary, wantSummary) + } + if diff, err := verifyRemote(ctx, deployer.bucket, local); err != nil { + t.Errorf("initial deploy: failed to verify remote: %v", err) + } else if diff != "" { + t.Errorf("initial deploy: remote snapshot doesn't match expected:\n%v", diff) + } + + // A repeat deployment shouldn't change anything. + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("no-op deploy: %v", err) + } + wantSummary = deploySummary{NumLocal: 5, NumRemote: 5, NumUploads: 0, NumDeletes: 0} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("no-op deploy: got %v, want %v", deployer.summary, wantSummary) + } + + // Make some changes to the local filesystem: + // 1. Modify file [0]. + // 2. Delete file [1]. + // 3. Add a new file (sorted last). + updatefd := local[0] + updatefd.Contents = "new contents" + deletefd := local[1] + local = append(local[:1], local[2:]...) // removing deleted [1] + newfd := &fileData{"zzz", "zzz"} + local = append(local, newfd) + if err := writeFiles(test.fs, []*fileData{updatefd, newfd}); err != nil { + t.Fatal(err) + } + if err := test.fs.Remove(deletefd.Name); err != nil { + t.Fatal(err) + } + + // A deployment should apply those 3 changes. + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("deploy after changes: failed: %v", err) + } + wantSummary = deploySummary{NumLocal: 5, NumRemote: 5, NumUploads: 2, NumDeletes: 1} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("deploy after changes: got %v, want %v", deployer.summary, wantSummary) + } + if diff, err := verifyRemote(ctx, deployer.bucket, local); err != nil { + t.Errorf("deploy after changes: failed to verify remote: %v", err) + } else if diff != "" { + t.Errorf("deploy after changes: remote snapshot doesn't match expected:\n%v", diff) + } + + // Again, a repeat deployment shouldn't change anything. + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("no-op deploy: %v", err) + } + wantSummary = deploySummary{NumLocal: 5, NumRemote: 5, NumUploads: 0, NumDeletes: 0} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("no-op deploy: got %v, want %v", deployer.summary, wantSummary) + } + }) + } +} + +// TestMaxDeletes verifies that the "maxDeletes" flag is working correctly. +func TestMaxDeletes(t *testing.T) { + ctx := context.Background() + tests, cleanup, err := initFsTests() + if err != nil { + t.Fatal(err) + } + defer cleanup() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + local, err := initLocalFs(ctx, test.fs) + if err != nil { + t.Fatal(err) + } + deployer := &Deployer{ + localFs: test.fs, + maxDeletes: -1, + bucket: test.bucket, + } + + // Sync remote with local. + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("initial deploy: failed: %v", err) + } + wantSummary := deploySummary{NumLocal: 5, NumRemote: 0, NumUploads: 5, NumDeletes: 0} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("initial deploy: got %v, want %v", deployer.summary, wantSummary) + } + + // Delete two files, [1] and [2]. + if err := test.fs.Remove(local[1].Name); err != nil { + t.Fatal(err) + } + if err := test.fs.Remove(local[2].Name); err != nil { + t.Fatal(err) + } + + // A deployment with maxDeletes=0 shouldn't change anything. + deployer.maxDeletes = 0 + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("deploy failed: %v", err) + } + wantSummary = deploySummary{NumLocal: 3, NumRemote: 5, NumUploads: 0, NumDeletes: 0} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("deploy: got %v, want %v", deployer.summary, wantSummary) + } + + // A deployment with maxDeletes=1 shouldn't change anything either. + deployer.maxDeletes = 1 + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("deploy failed: %v", err) + } + wantSummary = deploySummary{NumLocal: 3, NumRemote: 5, NumUploads: 0, NumDeletes: 0} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("deploy: got %v, want %v", deployer.summary, wantSummary) + } + + // A deployment with maxDeletes=2 should make the changes. + deployer.maxDeletes = 2 + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("deploy failed: %v", err) + } + wantSummary = deploySummary{NumLocal: 3, NumRemote: 5, NumUploads: 0, NumDeletes: 2} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("deploy: got %v, want %v", deployer.summary, wantSummary) + } + + // Delete two more files, [0] and [3]. + if err := test.fs.Remove(local[0].Name); err != nil { + t.Fatal(err) + } + if err := test.fs.Remove(local[3].Name); err != nil { + t.Fatal(err) + } + + // A deployment with maxDeletes=-1 should make the changes. + deployer.maxDeletes = -1 + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("deploy failed: %v", err) + } + wantSummary = deploySummary{NumLocal: 1, NumRemote: 3, NumUploads: 0, NumDeletes: 2} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("deploy: got %v, want %v", deployer.summary, wantSummary) + } + }) + } +} + +// TestCompression verifies that gzip compression works correctly. +// In particular, MD5 hashes must be of the compressed content. +func TestCompression(t *testing.T) { + ctx := context.Background() + tests, cleanup, err := initFsTests() + if err != nil { + t.Fatal(err) + } + defer cleanup() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + local, err := initLocalFs(ctx, test.fs) + if err != nil { + t.Fatal(err) + } + deployer := &Deployer{ + localFs: test.fs, + bucket: test.bucket, + matchers: []*matcher{{Pattern: ".*", Gzip: true, re: regexp.MustCompile(".*")}}, + } + + // Initial deployment should sync remote with local. + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("initial deploy: failed: %v", err) + } + wantSummary := deploySummary{NumLocal: 5, NumRemote: 0, NumUploads: 5, NumDeletes: 0} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("initial deploy: got %v, want %v", deployer.summary, wantSummary) + } + + // A repeat deployment shouldn't change anything. + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("no-op deploy: %v", err) + } + wantSummary = deploySummary{NumLocal: 5, NumRemote: 5, NumUploads: 0, NumDeletes: 0} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("no-op deploy: got %v, want %v", deployer.summary, wantSummary) + } + + // Make an update to the local filesystem, on [1]. + updatefd := local[1] + updatefd.Contents = "new contents" + if err := writeFiles(test.fs, []*fileData{updatefd}); err != nil { + t.Fatal(err) + } + + // A deployment should apply the changes. + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("deploy after changes: failed: %v", err) + } + wantSummary = deploySummary{NumLocal: 5, NumRemote: 5, NumUploads: 1, NumDeletes: 0} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("deploy after changes: got %v, want %v", deployer.summary, wantSummary) + } + }) + } +} + +// TestMatching verifies that matchers match correctly, and that the Force +// attribute for matcher works. +func TestMatching(t *testing.T) { + ctx := context.Background() + tests, cleanup, err := initFsTests() + if err != nil { + t.Fatal(err) + } + defer cleanup() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _, err := initLocalFs(ctx, test.fs) + if err != nil { + t.Fatal(err) + } + deployer := &Deployer{ + localFs: test.fs, + bucket: test.bucket, + matchers: []*matcher{{Pattern: "^subdir/aaa$", Force: true, re: regexp.MustCompile("^subdir/aaa$")}}, + } + + // Initial deployment to sync remote with local. + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("initial deploy: failed: %v", err) + } + wantSummary := deploySummary{NumLocal: 5, NumRemote: 0, NumUploads: 5, NumDeletes: 0} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("initial deploy: got %v, want %v", deployer.summary, wantSummary) + } + + // A repeat deployment should upload a single file, the one that matched the Force matcher. + // Note that matching happens based on the ToSlash form, so this matches + // even on Windows. + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("no-op deploy with single force matcher: %v", err) + } + wantSummary = deploySummary{NumLocal: 5, NumRemote: 5, NumUploads: 1, NumDeletes: 0} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("no-op deploy with single force matcher: got %v, want %v", deployer.summary, wantSummary) + } + + // Repeat with a matcher that should now match 3 files. + deployer.matchers = []*matcher{{Pattern: "aaa", Force: true, re: regexp.MustCompile("aaa")}} + if err := deployer.Deploy(ctx); err != nil { + t.Errorf("no-op deploy with triple force matcher: %v", err) + } + wantSummary = deploySummary{NumLocal: 5, NumRemote: 5, NumUploads: 3, NumDeletes: 0} + if !cmp.Equal(deployer.summary, wantSummary) { + t.Errorf("no-op deploy with triple force matcher: got %v, want %v", deployer.summary, wantSummary) + } + }) + } +} + +// writeFiles writes the files in fds to fd. +func writeFiles(fs afero.Fs, fds []*fileData) error { + for _, fd := range fds { + dir := path.Dir(fd.Name) + if dir != "." { + err := fs.MkdirAll(dir, os.ModePerm) + if err != nil { + return err + } + } + f, err := fs.Create(fd.Name) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(fd.Contents) + if err != nil { + return err + } + } + return nil +} + +// verifyRemote that the current contents of bucket matches local. +// It returns an empty string if the contents matched, and a non-empty string +// capturing the diff if they didn't. +func verifyRemote(ctx context.Context, bucket *blob.Bucket, local []*fileData) (string, error) { + var cur []*fileData + iter := bucket.List(nil) + for { + obj, err := iter.Next(ctx) + if err == io.EOF { + break + } + if err != nil { + return "", err + } + contents, err := bucket.ReadAll(ctx, obj.Key) + if err != nil { + return "", err + } + cur = append(cur, &fileData{obj.Key, string(contents)}) + } + if cmp.Equal(cur, local) { + return "", nil + } + diff := "got: \n" + for _, f := range cur { + diff += fmt.Sprintf(" %s: %s\n", f.Name, f.Contents) + } + diff += "want: \n" + for _, f := range local { + diff += fmt.Sprintf(" %s: %s\n", f.Name, f.Contents) + } + return diff, nil +} -- cgit v1.2.3