diff options
author | Junegunn Choi <junegunn.c@gmail.com> | 2015-10-02 18:40:20 +0900 |
---|---|---|
committer | Junegunn Choi <junegunn.c@gmail.com> | 2015-10-02 18:40:20 +0900 |
commit | 92a75c9563600a174e9ee8334853f99ed560492a (patch) | |
tree | c65a17633ee57dbfbbafa4b351c41bbbfffa3f9f /src/tokenizer_test.go | |
parent | 7c7a30c472463e0115adcf8bc2a792b48c03bf08 (diff) |
Use trimmed length when --nth is used with --tiebreak=length
This change improves sort ordering for aligned tabular input.
Given the following input:
apple juice 100
apple pie 200
fzf --nth=2 will now prefer the one with pie. Before this change fzf
compared "juice " and "pie ", both of which have the same length.
Diffstat (limited to 'src/tokenizer_test.go')
-rw-r--r-- | src/tokenizer_test.go | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/src/tokenizer_test.go b/src/tokenizer_test.go index 0f95aa13..b0924402 100644 --- a/src/tokenizer_test.go +++ b/src/tokenizer_test.go @@ -44,22 +44,22 @@ func TestTokenize(t *testing.T) { // AWK-style input := " abc: def: ghi " tokens := Tokenize([]rune(input), Delimiter{}) - if string(tokens[0].text) != "abc: " || tokens[0].prefixLength != 2 { + if string(tokens[0].text) != "abc: " || tokens[0].prefixLength != 2 || tokens[0].trimLength != 4 { t.Errorf("%s", tokens) } // With delimiter tokens = Tokenize([]rune(input), delimiterRegexp(":")) - if string(tokens[0].text) != " abc:" || tokens[0].prefixLength != 0 { + if string(tokens[0].text) != " abc:" || tokens[0].prefixLength != 0 || tokens[0].trimLength != 4 { t.Errorf("%s", tokens) } // With delimiter regex tokens = Tokenize([]rune(input), delimiterRegexp("\\s+")) - if string(tokens[0].text) != " " || tokens[0].prefixLength != 0 || - string(tokens[1].text) != "abc: " || tokens[1].prefixLength != 2 || - string(tokens[2].text) != "def: " || tokens[2].prefixLength != 8 || - string(tokens[3].text) != "ghi " || tokens[3].prefixLength != 14 { + if string(tokens[0].text) != " " || tokens[0].prefixLength != 0 || tokens[0].trimLength != 0 || + string(tokens[1].text) != "abc: " || tokens[1].prefixLength != 2 || tokens[1].trimLength != 4 || + string(tokens[2].text) != "def: " || tokens[2].prefixLength != 8 || tokens[2].trimLength != 4 || + string(tokens[3].text) != "ghi " || tokens[3].prefixLength != 14 || tokens[3].trimLength != 3 { t.Errorf("%s", tokens) } } |