summaryrefslogtreecommitdiffstats
path: root/parser
diff options
context:
space:
mode:
authorBjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>2018-10-21 12:20:21 +0200
committerBjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>2018-10-22 20:46:14 +0200
commitd1661b823af25c50d3bbe5366ea40a3cdd52e237 (patch)
treecd84d18229fb9c294ff1be56d7c0ce92a8f46761 /parser
parent7930d2132a3c36c1aaca20f16f56978c84656b0a (diff)
hugolib: Continue the file context/line number errors work
See #5324
Diffstat (limited to 'parser')
-rw-r--r--parser/pageparser/pagelexer.go17
-rw-r--r--parser/pageparser/pageparser.go5
-rw-r--r--parser/pageparser/pageparser_intro_test.go8
3 files changed, 22 insertions, 8 deletions
diff --git a/parser/pageparser/pagelexer.go b/parser/pageparser/pagelexer.go
index b68850b10..e02475d42 100644
--- a/parser/pageparser/pagelexer.go
+++ b/parser/pageparser/pagelexer.go
@@ -408,15 +408,22 @@ func (l *pageLexer) lexFrontMatterSection(tp ItemType, delimr rune, name string,
}
}
+ // Let front matter start at line 1
+ wasEndOfLine := l.consumeCRLF()
// We don't care about the delimiters.
l.ignore()
+ var r rune
+
for {
- r := l.next()
- if r == eof {
- return l.errorf("EOF looking for end %s front matter delimiter", name)
+ if !wasEndOfLine {
+ r = l.next()
+ if r == eof {
+ return l.errorf("EOF looking for end %s front matter delimiter", name)
+ }
}
- if isEndOfLine(r) {
+
+ if wasEndOfLine || isEndOfLine(r) {
if l.hasPrefix(delim) {
l.emit(tp)
l.pos += 3
@@ -425,6 +432,8 @@ func (l *pageLexer) lexFrontMatterSection(tp ItemType, delimr rune, name string,
break
}
}
+
+ wasEndOfLine = false
}
return lexMainSection
diff --git a/parser/pageparser/pageparser.go b/parser/pageparser/pageparser.go
index 2cd141d37..6e75f195a 100644
--- a/parser/pageparser/pageparser.go
+++ b/parser/pageparser/pageparser.go
@@ -66,6 +66,11 @@ func (t *Iterator) Next() Item {
return t.current()
}
+// Input returns the input source.
+func (t *Iterator) Input() []byte {
+ return t.l.Input()
+}
+
var errIndexOutOfBounds = Item{tError, 0, []byte("no more tokens")}
func (t *Iterator) current() Item {
diff --git a/parser/pageparser/pageparser_intro_test.go b/parser/pageparser/pageparser_intro_test.go
index 1a8c2d237..32de6dc44 100644
--- a/parser/pageparser/pageparser_intro_test.go
+++ b/parser/pageparser/pageparser_intro_test.go
@@ -32,9 +32,9 @@ func nti(tp ItemType, val string) Item {
var (
tstJSON = `{ "a": { "b": "\"Hugo\"}" } }`
- tstFrontMatterTOML = nti(TypeFrontMatterTOML, "\nfoo = \"bar\"\n")
- tstFrontMatterYAML = nti(TypeFrontMatterYAML, "\nfoo: \"bar\"\n")
- tstFrontMatterYAMLCRLF = nti(TypeFrontMatterYAML, "\r\nfoo: \"bar\"\r\n")
+ tstFrontMatterTOML = nti(TypeFrontMatterTOML, "foo = \"bar\"\n")
+ tstFrontMatterYAML = nti(TypeFrontMatterYAML, "foo: \"bar\"\n")
+ tstFrontMatterYAMLCRLF = nti(TypeFrontMatterYAML, "foo: \"bar\"\r\n")
tstFrontMatterJSON = nti(TypeFrontMatterJSON, tstJSON+"\r\n")
tstSomeText = nti(tText, "\nSome text.\n")
tstSummaryDivider = nti(TypeLeadSummaryDivider, "<!--more-->")
@@ -58,7 +58,7 @@ var frontMatterTests = []lexerTest{
{"HTML Document 2", `<html><h1>Hugo Rocks</h1></html>`, []Item{nti(TypeHTMLDocument, "<html><h1>Hugo Rocks</h1></html>"), tstEOF}},
{"No front matter", "\nSome text.\n", []Item{tstSomeText, tstEOF}},
{"YAML front matter", "---\nfoo: \"bar\"\n---\n\nSome text.\n", []Item{tstFrontMatterYAML, tstSomeText, tstEOF}},
- {"YAML empty front matter", "---\n---\n\nSome text.\n", []Item{nti(TypeFrontMatterYAML, "\n"), tstSomeText, tstEOF}},
+ {"YAML empty front matter", "---\n---\n\nSome text.\n", []Item{nti(TypeFrontMatterYAML, ""), tstSomeText, tstEOF}},
{"YAML commented out front matter", "<!--\n---\nfoo: \"bar\"\n---\n-->\nSome text.\n", []Item{nti(TypeHTMLComment, "<!--\n---\nfoo: \"bar\"\n---\n-->"), tstSomeText, tstEOF}},
// Note that we keep all bytes as they are, but we need to handle CRLF
{"YAML front matter CRLF", "---\r\nfoo: \"bar\"\r\n---\n\nSome text.\n", []Item{tstFrontMatterYAMLCRLF, tstSomeText, tstEOF}},