summaryrefslogtreecommitdiffstats
path: root/src/tokenizer_test.go
diff options
context:
space:
mode:
authorJunegunn Choi <junegunn.c@gmail.com>2015-08-02 14:00:18 +0900
committerJunegunn Choi <junegunn.c@gmail.com>2015-08-02 14:00:18 +0900
commit0ea66329b84cc6e4f8ff61ee99c00bb238070247 (patch)
tree72c3bc62ec491246390b56b2aac5b33645839503 /src/tokenizer_test.go
parent634670e3ea51a2fa1498a3de0c074b819828e2d8 (diff)
Performance tuning - eager rune array conversion
> wc -l /tmp/list2 2594098 /tmp/list2 > time cat /tmp/list2 | fzf-0.10.1-darwin_amd64 -fqwerty > /dev/null real 0m5.418s user 0m10.990s sys 0m1.302s > time cat /tmp/list2 | fzf-head -fqwerty > /dev/null real 0m4.862s user 0m6.619s sys 0m0.982s
Diffstat (limited to 'src/tokenizer_test.go')
-rw-r--r--src/tokenizer_test.go44
1 files changed, 22 insertions, 22 deletions
diff --git a/src/tokenizer_test.go b/src/tokenizer_test.go
index 0362b5ab..06603ae9 100644
--- a/src/tokenizer_test.go
+++ b/src/tokenizer_test.go
@@ -43,14 +43,14 @@ func TestParseRange(t *testing.T) {
func TestTokenize(t *testing.T) {
// AWK-style
input := " abc: def: ghi "
- tokens := Tokenize(&input, nil)
- if string(*tokens[0].text) != "abc: " || tokens[0].prefixLength != 2 {
+ tokens := Tokenize([]rune(input), nil)
+ if string(tokens[0].text) != "abc: " || tokens[0].prefixLength != 2 {
t.Errorf("%s", tokens)
}
// With delimiter
- tokens = Tokenize(&input, delimiterRegexp(":"))
- if string(*tokens[0].text) != " abc:" || tokens[0].prefixLength != 0 {
+ tokens = Tokenize([]rune(input), delimiterRegexp(":"))
+ if string(tokens[0].text) != " abc:" || tokens[0].prefixLength != 0 {
t.Errorf("%s", tokens)
}
}
@@ -58,39 +58,39 @@ func TestTokenize(t *testing.T) {
func TestTransform(t *testing.T) {
input := " abc: def: ghi: jkl"
{
- tokens := Tokenize(&input, nil)
+ tokens := Tokenize([]rune(input), nil)
{
ranges := splitNth("1,2,3")
tx := Transform(tokens, ranges)
- if *joinTokens(tx) != "abc: def: ghi: " {
- t.Errorf("%s", *tx)
+ if string(joinTokens(tx)) != "abc: def: ghi: " {
+ t.Errorf("%s", tx)
}
}
{
ranges := splitNth("1..2,3,2..,1")
tx := Transform(tokens, ranges)
- if *joinTokens(tx) != "abc: def: ghi: def: ghi: jklabc: " ||
- len(*tx) != 4 ||
- string(*(*tx)[0].text) != "abc: def: " || (*tx)[0].prefixLength != 2 ||
- string(*(*tx)[1].text) != "ghi: " || (*tx)[1].prefixLength != 14 ||
- string(*(*tx)[2].text) != "def: ghi: jkl" || (*tx)[2].prefixLength != 8 ||
- string(*(*tx)[3].text) != "abc: " || (*tx)[3].prefixLength != 2 {
- t.Errorf("%s", *tx)
+ if string(joinTokens(tx)) != "abc: def: ghi: def: ghi: jklabc: " ||
+ len(tx) != 4 ||
+ string(tx[0].text) != "abc: def: " || tx[0].prefixLength != 2 ||
+ string(tx[1].text) != "ghi: " || tx[1].prefixLength != 14 ||
+ string(tx[2].text) != "def: ghi: jkl" || tx[2].prefixLength != 8 ||
+ string(tx[3].text) != "abc: " || tx[3].prefixLength != 2 {
+ t.Errorf("%s", tx)
}
}
}
{
- tokens := Tokenize(&input, delimiterRegexp(":"))
+ tokens := Tokenize([]rune(input), delimiterRegexp(":"))
{
ranges := splitNth("1..2,3,2..,1")
tx := Transform(tokens, ranges)
- if *joinTokens(tx) != " abc: def: ghi: def: ghi: jkl abc:" ||
- len(*tx) != 4 ||
- string(*(*tx)[0].text) != " abc: def:" || (*tx)[0].prefixLength != 0 ||
- string(*(*tx)[1].text) != " ghi:" || (*tx)[1].prefixLength != 12 ||
- string(*(*tx)[2].text) != " def: ghi: jkl" || (*tx)[2].prefixLength != 6 ||
- string(*(*tx)[3].text) != " abc:" || (*tx)[3].prefixLength != 0 {
- t.Errorf("%s", *tx)
+ if string(joinTokens(tx)) != " abc: def: ghi: def: ghi: jkl abc:" ||
+ len(tx) != 4 ||
+ string(tx[0].text) != " abc: def:" || tx[0].prefixLength != 0 ||
+ string(tx[1].text) != " ghi:" || tx[1].prefixLength != 12 ||
+ string(tx[2].text) != " def: ghi: jkl" || tx[2].prefixLength != 6 ||
+ string(tx[3].text) != " abc:" || tx[3].prefixLength != 0 {
+ t.Errorf("%s", tx)
}
}
}