summaryrefslogtreecommitdiffstats
path: root/src/tokenizer_test.go
diff options
context:
space:
mode:
authorJunegunn Choi <junegunn.c@gmail.com>2016-08-14 00:39:44 +0900
committerJunegunn Choi <junegunn.c@gmail.com>2016-08-14 00:41:30 +0900
commit1d4057c20907b7d263d6f2b8cb4350a024859dfe (patch)
treeadb1edd9c4f1806cd65f8c5117645c22618c7301 /src/tokenizer_test.go
parent822b86942c4ffb0dbf7fd096584d2970675f3ebc (diff)
[perf] Avoid allocating rune array for ascii string
In the best case (all ascii), this reduces the memory footprint by 60% and the response time by 15% to 20%. In the worst case (every line has non-ascii characters), 3 to 4% overhead is observed.
Diffstat (limited to 'src/tokenizer_test.go')
-rw-r--r--src/tokenizer_test.go44
1 files changed, 24 insertions, 20 deletions
diff --git a/src/tokenizer_test.go b/src/tokenizer_test.go
index b0924402..1dd44144 100644
--- a/src/tokenizer_test.go
+++ b/src/tokenizer_test.go
@@ -1,6 +1,10 @@
package fzf
-import "testing"
+import (
+ "testing"
+
+ "github.com/junegunn/fzf/src/util"
+)
func TestParseRange(t *testing.T) {
{
@@ -43,23 +47,23 @@ func TestParseRange(t *testing.T) {
func TestTokenize(t *testing.T) {
// AWK-style
input := " abc: def: ghi "
- tokens := Tokenize([]rune(input), Delimiter{})
- if string(tokens[0].text) != "abc: " || tokens[0].prefixLength != 2 || tokens[0].trimLength != 4 {
+ tokens := Tokenize(util.RunesToChars([]rune(input)), Delimiter{})
+ if tokens[0].text.ToString() != "abc: " || tokens[0].prefixLength != 2 || tokens[0].trimLength != 4 {
t.Errorf("%s", tokens)
}
// With delimiter
- tokens = Tokenize([]rune(input), delimiterRegexp(":"))
- if string(tokens[0].text) != " abc:" || tokens[0].prefixLength != 0 || tokens[0].trimLength != 4 {
+ tokens = Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp(":"))
+ if tokens[0].text.ToString() != " abc:" || tokens[0].prefixLength != 0 || tokens[0].trimLength != 4 {
t.Errorf("%s", tokens)
}
// With delimiter regex
- tokens = Tokenize([]rune(input), delimiterRegexp("\\s+"))
- if string(tokens[0].text) != " " || tokens[0].prefixLength != 0 || tokens[0].trimLength != 0 ||
- string(tokens[1].text) != "abc: " || tokens[1].prefixLength != 2 || tokens[1].trimLength != 4 ||
- string(tokens[2].text) != "def: " || tokens[2].prefixLength != 8 || tokens[2].trimLength != 4 ||
- string(tokens[3].text) != "ghi " || tokens[3].prefixLength != 14 || tokens[3].trimLength != 3 {
+ tokens = Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp("\\s+"))
+ if tokens[0].text.ToString() != " " || tokens[0].prefixLength != 0 || tokens[0].trimLength != 0 ||
+ tokens[1].text.ToString() != "abc: " || tokens[1].prefixLength != 2 || tokens[1].trimLength != 4 ||
+ tokens[2].text.ToString() != "def: " || tokens[2].prefixLength != 8 || tokens[2].trimLength != 4 ||
+ tokens[3].text.ToString() != "ghi " || tokens[3].prefixLength != 14 || tokens[3].trimLength != 3 {
t.Errorf("%s", tokens)
}
}
@@ -67,7 +71,7 @@ func TestTokenize(t *testing.T) {
func TestTransform(t *testing.T) {
input := " abc: def: ghi: jkl"
{
- tokens := Tokenize([]rune(input), Delimiter{})
+ tokens := Tokenize(util.RunesToChars([]rune(input)), Delimiter{})
{
ranges := splitNth("1,2,3")
tx := Transform(tokens, ranges)
@@ -80,25 +84,25 @@ func TestTransform(t *testing.T) {
tx := Transform(tokens, ranges)
if string(joinTokens(tx)) != "abc: def: ghi: def: ghi: jklabc: " ||
len(tx) != 4 ||
- string(tx[0].text) != "abc: def: " || tx[0].prefixLength != 2 ||
- string(tx[1].text) != "ghi: " || tx[1].prefixLength != 14 ||
- string(tx[2].text) != "def: ghi: jkl" || tx[2].prefixLength != 8 ||
- string(tx[3].text) != "abc: " || tx[3].prefixLength != 2 {
+ tx[0].text.ToString() != "abc: def: " || tx[0].prefixLength != 2 ||
+ tx[1].text.ToString() != "ghi: " || tx[1].prefixLength != 14 ||
+ tx[2].text.ToString() != "def: ghi: jkl" || tx[2].prefixLength != 8 ||
+ tx[3].text.ToString() != "abc: " || tx[3].prefixLength != 2 {
t.Errorf("%s", tx)
}
}
}
{
- tokens := Tokenize([]rune(input), delimiterRegexp(":"))
+ tokens := Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp(":"))
{
ranges := splitNth("1..2,3,2..,1")
tx := Transform(tokens, ranges)
if string(joinTokens(tx)) != " abc: def: ghi: def: ghi: jkl abc:" ||
len(tx) != 4 ||
- string(tx[0].text) != " abc: def:" || tx[0].prefixLength != 0 ||
- string(tx[1].text) != " ghi:" || tx[1].prefixLength != 12 ||
- string(tx[2].text) != " def: ghi: jkl" || tx[2].prefixLength != 6 ||
- string(tx[3].text) != " abc:" || tx[3].prefixLength != 0 {
+ tx[0].text.ToString() != " abc: def:" || tx[0].prefixLength != 0 ||
+ tx[1].text.ToString() != " ghi:" || tx[1].prefixLength != 12 ||
+ tx[2].text.ToString() != " def: ghi: jkl" || tx[2].prefixLength != 6 ||
+ tx[3].text.ToString() != " abc:" || tx[3].prefixLength != 0 {
t.Errorf("%s", tx)
}
}