summaryrefslogtreecommitdiffstats
path: root/src/tokenizer_test.go
diff options
context:
space:
mode:
authorJunegunn Choi <junegunn.c@gmail.com>2017-07-20 02:44:30 +0900
committerJunegunn Choi <junegunn.c@gmail.com>2017-07-20 02:44:30 +0900
commitc9f16b6430f3b9c9d12ee078e2218e8467c13340 (patch)
treeb0d7e33da2d605696d98da98e1691bf8d89437de /src/tokenizer_test.go
parentbc9d2abdb67639e06f7002b278341fb498b79456 (diff)
Avoid unconditionally storsing input as runes
When --with-nth is used, fzf used to preprocess each line and store the result as rune array, which was wasteful if the line only contains ascii characters.
Diffstat (limited to 'src/tokenizer_test.go')
-rw-r--r--src/tokenizer_test.go14
1 files changed, 6 insertions, 8 deletions
diff --git a/src/tokenizer_test.go b/src/tokenizer_test.go
index 59250906..110fd062 100644
--- a/src/tokenizer_test.go
+++ b/src/tokenizer_test.go
@@ -2,8 +2,6 @@ package fzf
import (
"testing"
-
- "github.com/junegunn/fzf/src/util"
)
func TestParseRange(t *testing.T) {
@@ -47,19 +45,19 @@ func TestParseRange(t *testing.T) {
func TestTokenize(t *testing.T) {
// AWK-style
input := " abc: def: ghi "
- tokens := Tokenize(util.RunesToChars([]rune(input)), Delimiter{})
+ tokens := Tokenize(input, Delimiter{})
if tokens[0].text.ToString() != "abc: " || tokens[0].prefixLength != 2 {
t.Errorf("%s", tokens)
}
// With delimiter
- tokens = Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp(":"))
+ tokens = Tokenize(input, delimiterRegexp(":"))
if tokens[0].text.ToString() != " abc:" || tokens[0].prefixLength != 0 {
- t.Errorf("%s", tokens)
+ t.Error(tokens[0].text.ToString(), tokens[0].prefixLength)
}
// With delimiter regex
- tokens = Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp("\\s+"))
+ tokens = Tokenize(input, delimiterRegexp("\\s+"))
if tokens[0].text.ToString() != " " || tokens[0].prefixLength != 0 ||
tokens[1].text.ToString() != "abc: " || tokens[1].prefixLength != 2 ||
tokens[2].text.ToString() != "def: " || tokens[2].prefixLength != 8 ||
@@ -71,7 +69,7 @@ func TestTokenize(t *testing.T) {
func TestTransform(t *testing.T) {
input := " abc: def: ghi: jkl"
{
- tokens := Tokenize(util.RunesToChars([]rune(input)), Delimiter{})
+ tokens := Tokenize(input, Delimiter{})
{
ranges := splitNth("1,2,3")
tx := Transform(tokens, ranges)
@@ -93,7 +91,7 @@ func TestTransform(t *testing.T) {
}
}
{
- tokens := Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp(":"))
+ tokens := Tokenize(input, delimiterRegexp(":"))
{
ranges := splitNth("1..2,3,2..,1")
tx := Transform(tokens, ranges)