summaryrefslogtreecommitdiffstats
path: root/cache
diff options
context:
space:
mode:
authorBjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>2019-08-10 21:05:17 +0200
committerBjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>2019-08-12 13:26:32 +0200
commit9e571827055dedb46b78c5db3d17d6913f14870b (patch)
treef5f0108afe0c9385ff6dc27664943d9f719f57ad /cache
parent6027ee11082d0b9d72de1d4d1980a702be294ad2 (diff)
tests: Convert from testify to quicktest
Diffstat (limited to 'cache')
-rw-r--r--cache/filecache/filecache_config_test.go50
-rw-r--r--cache/filecache/filecache_pruner_test.go26
-rw-r--r--cache/filecache/filecache_test.go113
-rw-r--r--cache/namedmemcache/named_cache_test.go22
-rw-r--r--cache/partitioned_lazy_cache_test.go40
5 files changed, 125 insertions, 126 deletions
diff --git a/cache/filecache/filecache_config_test.go b/cache/filecache/filecache_config_test.go
index f2f75344b..9f80a4f90 100644
--- a/cache/filecache/filecache_config_test.go
+++ b/cache/filecache/filecache_config_test.go
@@ -24,14 +24,14 @@ import (
"github.com/gohugoio/hugo/config"
+ qt "github.com/frankban/quicktest"
"github.com/spf13/viper"
- "github.com/stretchr/testify/require"
)
func TestDecodeConfig(t *testing.T) {
t.Parallel()
- assert := require.New(t)
+ c := qt.New(t)
configStr := `
resourceDir = "myresources"
@@ -55,27 +55,27 @@ dir = "/path/to/c3"
`
cfg, err := config.FromConfigString(configStr, "toml")
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
fs := afero.NewMemMapFs()
decoded, err := DecodeConfig(fs, cfg)
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
- assert.Equal(5, len(decoded))
+ c.Assert(len(decoded), qt.Equals, 5)
c2 := decoded["getcsv"]
- assert.Equal("11h0m0s", c2.MaxAge.String())
- assert.Equal(filepath.FromSlash("/path/to/c2/filecache/getcsv"), c2.Dir)
+ c.Assert(c2.MaxAge.String(), qt.Equals, "11h0m0s")
+ c.Assert(c2.Dir, qt.Equals, filepath.FromSlash("/path/to/c2/filecache/getcsv"))
c3 := decoded["images"]
- assert.Equal(time.Duration(-1), c3.MaxAge)
- assert.Equal(filepath.FromSlash("/path/to/c3/filecache/images"), c3.Dir)
+ c.Assert(c3.MaxAge, qt.Equals, time.Duration(-1))
+ c.Assert(c3.Dir, qt.Equals, filepath.FromSlash("/path/to/c3/filecache/images"))
}
func TestDecodeConfigIgnoreCache(t *testing.T) {
t.Parallel()
- assert := require.New(t)
+ c := qt.New(t)
configStr := `
resourceDir = "myresources"
@@ -100,21 +100,21 @@ dir = "/path/to/c3"
`
cfg, err := config.FromConfigString(configStr, "toml")
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
fs := afero.NewMemMapFs()
decoded, err := DecodeConfig(fs, cfg)
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
- assert.Equal(5, len(decoded))
+ c.Assert(len(decoded), qt.Equals, 5)
for _, v := range decoded {
- assert.Equal(time.Duration(0), v.MaxAge)
+ c.Assert(v.MaxAge, qt.Equals, time.Duration(0))
}
}
func TestDecodeConfigDefault(t *testing.T) {
- assert := require.New(t)
+ c := qt.New(t)
cfg := newTestConfig()
if runtime.GOOS == "windows" {
@@ -130,28 +130,28 @@ func TestDecodeConfigDefault(t *testing.T) {
decoded, err := DecodeConfig(fs, cfg)
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
- assert.Equal(5, len(decoded))
+ c.Assert(len(decoded), qt.Equals, 5)
imgConfig := decoded[cacheKeyImages]
jsonConfig := decoded[cacheKeyGetJSON]
if runtime.GOOS == "windows" {
- assert.Equal(filepath.FromSlash("_gen/images"), imgConfig.Dir)
+ c.Assert(imgConfig.Dir, qt.Equals, filepath.FromSlash("_gen/images"))
} else {
- assert.Equal("_gen/images", imgConfig.Dir)
- assert.Equal("/cache/thecache/hugoproject/filecache/getjson", jsonConfig.Dir)
+ c.Assert(imgConfig.Dir, qt.Equals, "_gen/images")
+ c.Assert(jsonConfig.Dir, qt.Equals, "/cache/thecache/hugoproject/filecache/getjson")
}
- assert.True(imgConfig.isResourceDir)
- assert.False(jsonConfig.isResourceDir)
+ c.Assert(imgConfig.isResourceDir, qt.Equals, true)
+ c.Assert(jsonConfig.isResourceDir, qt.Equals, false)
}
func TestDecodeConfigInvalidDir(t *testing.T) {
t.Parallel()
- assert := require.New(t)
+ c := qt.New(t)
configStr := `
resourceDir = "myresources"
@@ -173,11 +173,11 @@ dir = "/"
}
cfg, err := config.FromConfigString(configStr, "toml")
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
fs := afero.NewMemMapFs()
_, err = DecodeConfig(fs, cfg)
- assert.Error(err)
+ c.Assert(err, qt.Not(qt.IsNil))
}
diff --git a/cache/filecache/filecache_pruner_test.go b/cache/filecache/filecache_pruner_test.go
index 72c6781ac..48bce723e 100644
--- a/cache/filecache/filecache_pruner_test.go
+++ b/cache/filecache/filecache_pruner_test.go
@@ -20,13 +20,13 @@ import (
"github.com/spf13/afero"
- "github.com/stretchr/testify/require"
+ qt "github.com/frankban/quicktest"
)
func TestPrune(t *testing.T) {
t.Parallel()
- assert := require.New(t)
+ c := qt.New(t)
configStr := `
resourceDir = "myresources"
@@ -53,10 +53,10 @@ dir = ":resourceDir/_gen"
`
for _, name := range []string{cacheKeyGetCSV, cacheKeyGetJSON, cacheKeyAssets, cacheKeyImages} {
- msg := fmt.Sprintf("cache: %s", name)
+ msg := qt.Commentf("cache: %s", name)
p := newPathsSpec(t, afero.NewMemMapFs(), configStr)
caches, err := NewCaches(p)
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
cache := caches[name]
for i := 0; i < 10; i++ {
id := fmt.Sprintf("i%d", i)
@@ -70,21 +70,21 @@ dir = ":resourceDir/_gen"
}
count, err := caches.Prune()
- assert.NoError(err)
- assert.Equal(5, count, msg)
+ c.Assert(err, qt.IsNil)
+ c.Assert(count, qt.Equals, 5, msg)
for i := 0; i < 10; i++ {
id := fmt.Sprintf("i%d", i)
v := cache.getString(id)
if i < 5 {
- assert.Equal("", v, id)
+ c.Assert(v, qt.Equals, "")
} else {
- assert.Equal("abc", v, id)
+ c.Assert(v, qt.Equals, "abc")
}
}
caches, err = NewCaches(p)
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
cache = caches[name]
// Touch one and then prune.
cache.GetOrCreateBytes("i5", func() ([]byte, error) {
@@ -92,17 +92,17 @@ dir = ":resourceDir/_gen"
})
count, err = caches.Prune()
- assert.NoError(err)
- assert.Equal(4, count)
+ c.Assert(err, qt.IsNil)
+ c.Assert(count, qt.Equals, 4)
// Now only the i5 should be left.
for i := 0; i < 10; i++ {
id := fmt.Sprintf("i%d", i)
v := cache.getString(id)
if i != 5 {
- assert.Equal("", v, id)
+ c.Assert(v, qt.Equals, "")
} else {
- assert.Equal("abc", v, id)
+ c.Assert(v, qt.Equals, "abc")
}
}
diff --git a/cache/filecache/filecache_test.go b/cache/filecache/filecache_test.go
index 78becd43b..6d3ea6289 100644
--- a/cache/filecache/filecache_test.go
+++ b/cache/filecache/filecache_test.go
@@ -19,7 +19,6 @@ import (
"io/ioutil"
"os"
"path/filepath"
- "regexp"
"strings"
"sync"
"testing"
@@ -35,19 +34,19 @@ import (
"github.com/gohugoio/hugo/hugofs"
"github.com/spf13/afero"
- "github.com/stretchr/testify/require"
+ qt "github.com/frankban/quicktest"
)
func TestFileCache(t *testing.T) {
t.Parallel()
- assert := require.New(t)
+ c := qt.New(t)
tempWorkingDir, err := ioutil.TempDir("", "hugo_filecache_test_work")
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
defer os.Remove(tempWorkingDir)
tempCacheDir, err := ioutil.TempDir("", "hugo_filecache_test_cache")
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
defer os.Remove(tempCacheDir)
osfs := afero.NewOsFs()
@@ -89,30 +88,30 @@ dir = ":cacheDir/c"
p := newPathsSpec(t, osfs, configStr)
caches, err := NewCaches(p)
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
cache := caches.Get("GetJSON")
- assert.NotNil(cache)
- assert.Equal("10h0m0s", cache.maxAge.String())
+ c.Assert(cache, qt.Not(qt.IsNil))
+ c.Assert(cache.maxAge.String(), qt.Equals, "10h0m0s")
bfs, ok := cache.Fs.(*afero.BasePathFs)
- assert.True(ok)
+ c.Assert(ok, qt.Equals, true)
filename, err := bfs.RealPath("key")
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
if test.cacheDir != "" {
- assert.Equal(filepath.Join(test.cacheDir, "c/"+filecacheRootDirname+"/getjson/key"), filename)
+ c.Assert(filename, qt.Equals, filepath.Join(test.cacheDir, "c/"+filecacheRootDirname+"/getjson/key"))
} else {
// Temp dir.
- assert.Regexp(regexp.MustCompile(".*hugo_cache.*"+filecacheRootDirname+".*key"), filename)
+ c.Assert(filename, qt.Matches, ".*hugo_cache.*"+filecacheRootDirname+".*key")
}
cache = caches.Get("Images")
- assert.NotNil(cache)
- assert.Equal(time.Duration(-1), cache.maxAge)
+ c.Assert(cache, qt.Not(qt.IsNil))
+ c.Assert(cache.maxAge, qt.Equals, time.Duration(-1))
bfs, ok = cache.Fs.(*afero.BasePathFs)
- assert.True(ok)
+ c.Assert(ok, qt.Equals, true)
filename, _ = bfs.RealPath("key")
- assert.Equal(filepath.FromSlash("_gen/images/key"), filename)
+ c.Assert(filename, qt.Equals, filepath.FromSlash("_gen/images/key"))
rf := func(s string) func() (io.ReadCloser, error) {
return func() (io.ReadCloser, error) {
@@ -130,55 +129,55 @@ dir = ":cacheDir/c"
return []byte("bcd"), nil
}
- for _, c := range []*Cache{caches.ImageCache(), caches.AssetsCache(), caches.GetJSONCache(), caches.GetCSVCache()} {
+ for _, ca := range []*Cache{caches.ImageCache(), caches.AssetsCache(), caches.GetJSONCache(), caches.GetCSVCache()} {
for i := 0; i < 2; i++ {
- info, r, err := c.GetOrCreate("a", rf("abc"))
- assert.NoError(err)
- assert.NotNil(r)
- assert.Equal("a", info.Name)
+ info, r, err := ca.GetOrCreate("a", rf("abc"))
+ c.Assert(err, qt.IsNil)
+ c.Assert(r, qt.Not(qt.IsNil))
+ c.Assert(info.Name, qt.Equals, "a")
b, _ := ioutil.ReadAll(r)
r.Close()
- assert.Equal("abc", string(b))
+ c.Assert(string(b), qt.Equals, "abc")
- info, b, err = c.GetOrCreateBytes("b", bf)
- assert.NoError(err)
- assert.NotNil(r)
- assert.Equal("b", info.Name)
- assert.Equal("bcd", string(b))
+ info, b, err = ca.GetOrCreateBytes("b", bf)
+ c.Assert(err, qt.IsNil)
+ c.Assert(r, qt.Not(qt.IsNil))
+ c.Assert(info.Name, qt.Equals, "b")
+ c.Assert(string(b), qt.Equals, "bcd")
- _, b, err = c.GetOrCreateBytes("a", bf)
- assert.NoError(err)
- assert.Equal("abc", string(b))
+ _, b, err = ca.GetOrCreateBytes("a", bf)
+ c.Assert(err, qt.IsNil)
+ c.Assert(string(b), qt.Equals, "abc")
- _, r, err = c.GetOrCreate("a", rf("bcd"))
- assert.NoError(err)
+ _, r, err = ca.GetOrCreate("a", rf("bcd"))
+ c.Assert(err, qt.IsNil)
b, _ = ioutil.ReadAll(r)
r.Close()
- assert.Equal("abc", string(b))
+ c.Assert(string(b), qt.Equals, "abc")
}
}
- assert.NotNil(caches.Get("getJSON"))
+ c.Assert(caches.Get("getJSON"), qt.Not(qt.IsNil))
info, w, err := caches.ImageCache().WriteCloser("mykey")
- assert.NoError(err)
- assert.Equal("mykey", info.Name)
+ c.Assert(err, qt.IsNil)
+ c.Assert(info.Name, qt.Equals, "mykey")
io.WriteString(w, "Hugo is great!")
w.Close()
- assert.Equal("Hugo is great!", caches.ImageCache().getString("mykey"))
+ c.Assert(caches.ImageCache().getString("mykey"), qt.Equals, "Hugo is great!")
info, r, err := caches.ImageCache().Get("mykey")
- assert.NoError(err)
- assert.NotNil(r)
- assert.Equal("mykey", info.Name)
+ c.Assert(err, qt.IsNil)
+ c.Assert(r, qt.Not(qt.IsNil))
+ c.Assert(info.Name, qt.Equals, "mykey")
b, _ := ioutil.ReadAll(r)
r.Close()
- assert.Equal("Hugo is great!", string(b))
+ c.Assert(string(b), qt.Equals, "Hugo is great!")
info, b, err = caches.ImageCache().GetBytes("mykey")
- assert.NoError(err)
- assert.Equal("mykey", info.Name)
- assert.Equal("Hugo is great!", string(b))
+ c.Assert(err, qt.IsNil)
+ c.Assert(info.Name, qt.Equals, "mykey")
+ c.Assert(string(b), qt.Equals, "Hugo is great!")
}
@@ -187,7 +186,7 @@ dir = ":cacheDir/c"
func TestFileCacheConcurrent(t *testing.T) {
t.Parallel()
- assert := require.New(t)
+ c := qt.New(t)
configStr := `
resourceDir = "myresources"
@@ -208,7 +207,7 @@ dir = "/cache/c"
p := newPathsSpec(t, afero.NewMemMapFs(), configStr)
caches, err := NewCaches(p)
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
const cacheName = "getjson"
@@ -225,16 +224,16 @@ dir = "/cache/c"
go func(i int) {
defer wg.Done()
for j := 0; j < 20; j++ {
- c := caches.Get(cacheName)
- assert.NotNil(c)
+ ca := caches.Get(cacheName)
+ c.Assert(ca, qt.Not(qt.IsNil))
filename, data := filenameData(i)
- _, r, err := c.GetOrCreate(filename, func() (io.ReadCloser, error) {
+ _, r, err := ca.GetOrCreate(filename, func() (io.ReadCloser, error) {
return hugio.ToReadCloser(strings.NewReader(data)), nil
})
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
b, _ := ioutil.ReadAll(r)
r.Close()
- assert.Equal(data, string(b))
+ c.Assert(string(b), qt.Equals, data)
// Trigger some expiration.
time.Sleep(50 * time.Millisecond)
}
@@ -245,9 +244,9 @@ dir = "/cache/c"
}
func TestCleanID(t *testing.T) {
- assert := require.New(t)
- assert.Equal(filepath.FromSlash("a/b/c.txt"), cleanID(filepath.FromSlash("/a/b//c.txt")))
- assert.Equal(filepath.FromSlash("a/b/c.txt"), cleanID(filepath.FromSlash("a/b//c.txt")))
+ c := qt.New(t)
+ c.Assert(cleanID(filepath.FromSlash("/a/b//c.txt")), qt.Equals, filepath.FromSlash("a/b/c.txt"))
+ c.Assert(cleanID(filepath.FromSlash("a/b//c.txt")), qt.Equals, filepath.FromSlash("a/b/c.txt"))
}
func initConfig(fs afero.Fs, cfg config.Provider) error {
@@ -288,12 +287,12 @@ func initConfig(fs afero.Fs, cfg config.Provider) error {
}
func newPathsSpec(t *testing.T, fs afero.Fs, configStr string) *helpers.PathSpec {
- assert := require.New(t)
+ c := qt.New(t)
cfg, err := config.FromConfigString(configStr, "toml")
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
initConfig(fs, cfg)
p, err := helpers.NewPathSpec(hugofs.NewFrom(fs, cfg), cfg, nil)
- assert.NoError(err)
+ c.Assert(err, qt.IsNil)
return p
}
diff --git a/cache/namedmemcache/named_cache_test.go b/cache/namedmemcache/named_cache_test.go
index cf64aa210..9feddb11f 100644
--- a/cache/namedmemcache/named_cache_test.go
+++ b/cache/namedmemcache/named_cache_test.go
@@ -18,12 +18,12 @@ import (
"sync"
"testing"
- "github.com/stretchr/testify/require"
+ qt "github.com/frankban/quicktest"
)
func TestNamedCache(t *testing.T) {
t.Parallel()
- assert := require.New(t)
+ c := qt.New(t)
cache := New()
@@ -35,24 +35,24 @@ func TestNamedCache(t *testing.T) {
for i := 0; i < 5; i++ {
v1, err := cache.GetOrCreate("a1", create)
- assert.NoError(err)
- assert.Equal(1, v1)
+ c.Assert(err, qt.IsNil)
+ c.Assert(v1, qt.Equals, 1)
v2, err := cache.GetOrCreate("a2", create)
- assert.NoError(err)
- assert.Equal(2, v2)
+ c.Assert(err, qt.IsNil)
+ c.Assert(v2, qt.Equals, 2)
}
cache.Clear()
v3, err := cache.GetOrCreate("a2", create)
- assert.NoError(err)
- assert.Equal(3, v3)
+ c.Assert(err, qt.IsNil)
+ c.Assert(v3, qt.Equals, 3)
}
func TestNamedCacheConcurrent(t *testing.T) {
t.Parallel()
- assert := require.New(t)
+ c := qt.New(t)
var wg sync.WaitGroup
@@ -71,8 +71,8 @@ func TestNamedCacheConcurrent(t *testing.T) {
for j := 0; j < 100; j++ {
id := fmt.Sprintf("id%d", j)
v, err := cache.GetOrCreate(id, create(j))
- assert.NoError(err)
- assert.Equal(j, v)
+ c.Assert(err, qt.IsNil)
+ c.Assert(v, qt.Equals, j)
}
}()
}
diff --git a/cache/partitioned_lazy_cache_test.go b/cache/partitioned_lazy_cache_test.go
index ba8b6a454..2c61a6560 100644
--- a/cache/partitioned_lazy_cache_test.go
+++ b/cache/partitioned_lazy_cache_test.go
@@ -18,13 +18,13 @@ import (
"sync"
"testing"
- "github.com/stretchr/testify/require"
+ qt "github.com/frankban/quicktest"
)
func TestNewPartitionedLazyCache(t *testing.T) {
t.Parallel()
- assert := require.New(t)
+ c := qt.New(t)
p1 := Partition{
Key: "p1",
@@ -51,28 +51,28 @@ func TestNewPartitionedLazyCache(t *testing.T) {
cache := NewPartitionedLazyCache(p1, p2)
v, err := cache.Get("p1", "p1_1")
- assert.NoError(err)
- assert.Equal("p1v1", v)
+ c.Assert(err, qt.IsNil)
+ c.Assert(v, qt.Equals, "p1v1")
v, err = cache.Get("p1", "p2_1")
- assert.NoError(err)
- assert.Nil(v)
+ c.Assert(err, qt.IsNil)
+ c.Assert(v, qt.IsNil)
v, err = cache.Get("p1", "p1_nil")
- assert.NoError(err)
- assert.Nil(v)
+ c.Assert(err, qt.IsNil)
+ c.Assert(v, qt.IsNil)
v, err = cache.Get("p2", "p2_3")
- assert.NoError(err)
- assert.Equal("p2v3", v)
+ c.Assert(err, qt.IsNil)
+ c.Assert(v, qt.Equals, "p2v3")
v, err = cache.Get("doesnotexist", "p1_1")
- assert.NoError(err)
- assert.Nil(v)
+ c.Assert(err, qt.IsNil)
+ c.Assert(v, qt.IsNil)
v, err = cache.Get("p1", "doesnotexist")
- assert.NoError(err)
- assert.Nil(v)
+ c.Assert(err, qt.IsNil)
+ c.Assert(v, qt.IsNil)
errorP := Partition{
Key: "p3",
@@ -84,18 +84,18 @@ func TestNewPartitionedLazyCache(t *testing.T) {
cache = NewPartitionedLazyCache(errorP)
v, err = cache.Get("p1", "doesnotexist")
- assert.NoError(err)
- assert.Nil(v)
+ c.Assert(err, qt.IsNil)
+ c.Assert(v, qt.IsNil)
_, err = cache.Get("p3", "doesnotexist")
- assert.Error(err)
+ c.Assert(err, qt.Not(qt.IsNil))
}
func TestConcurrentPartitionedLazyCache(t *testing.T) {
t.Parallel()
- assert := require.New(t)
+ c := qt.New(t)
var wg sync.WaitGroup
@@ -129,8 +129,8 @@ func TestConcurrentPartitionedLazyCache(t *testing.T) {
defer wg.Done()
for j := 0; j < 10; j++ {
v, err := cache.Get("p1", "p1_1")
- assert.NoError(err)
- assert.Equal("p1v1", v)
+ c.Assert(err, qt.IsNil)
+ c.Assert(v, qt.Equals, "p1v1")
}
}()
}