aboutsummaryrefslogtreecommitdiff
path: root/driver
diff options
context:
space:
mode:
Diffstat (limited to 'driver')
-rw-r--r--driver/lexer/lexer.go311
-rw-r--r--driver/lexer/lexer_test.go918
-rw-r--r--driver/lexer/spec.go71
-rw-r--r--driver/lexer/template.go760
-rw-r--r--driver/parser/conflict_test.go (renamed from driver/conflict_test.go)13
-rw-r--r--driver/parser/lac_test.go (renamed from driver/lac_test.go)13
-rw-r--r--driver/parser/parser.go (renamed from driver/parser.go)2
-rw-r--r--driver/parser/parser_test.go (renamed from driver/parser_test.go)13
-rw-r--r--driver/parser/semantic_action.go (renamed from driver/semantic_action.go)2
-rw-r--r--driver/parser/semantic_action_test.go (renamed from driver/semantic_action_test.go)18
-rw-r--r--driver/parser/spec.go (renamed from driver/spec.go)30
-rw-r--r--driver/parser/syntax_error_test.go (renamed from driver/syntax_error_test.go)22
-rw-r--r--driver/parser/template.go (renamed from driver/template.go)34
-rw-r--r--driver/parser/token_stream.go (renamed from driver/token_stream.go)12
14 files changed, 2125 insertions, 94 deletions
diff --git a/driver/lexer/lexer.go b/driver/lexer/lexer.go
new file mode 100644
index 0000000..de7cdbd
--- /dev/null
+++ b/driver/lexer/lexer.go
@@ -0,0 +1,311 @@
+package lexer
+
+import (
+ "fmt"
+ "io"
+)
+
+type ModeID int
+
+func (id ModeID) Int() int {
+ return int(id)
+}
+
+type StateID int
+
+func (id StateID) Int() int {
+ return int(id)
+}
+
+type KindID int
+
+func (id KindID) Int() int {
+ return int(id)
+}
+
+type ModeKindID int
+
+func (id ModeKindID) Int() int {
+ return int(id)
+}
+
+type LexSpec interface {
+ InitialMode() ModeID
+ Pop(mode ModeID, modeKind ModeKindID) bool
+ Push(mode ModeID, modeKind ModeKindID) (ModeID, bool)
+ ModeName(mode ModeID) string
+ InitialState(mode ModeID) StateID
+ NextState(mode ModeID, state StateID, v int) (StateID, bool)
+ Accept(mode ModeID, state StateID) (ModeKindID, bool)
+ KindIDAndName(mode ModeID, modeKind ModeKindID) (KindID, string)
+}
+
+// Token representes a token.
+type Token struct {
+ // ModeID is an ID of a lex mode.
+ ModeID ModeID
+
+ // KindID is an ID of a kind. This is unique among all modes.
+ KindID KindID
+
+ // ModeKindID is an ID of a lexical kind. This is unique only within a mode.
+ // Note that you need to use KindID field if you want to identify a kind across all modes.
+ ModeKindID ModeKindID
+
+ // Row is a row number where a lexeme appears.
+ Row int
+
+ // Col is a column number where a lexeme appears.
+ // Note that Col is counted in code points, not bytes.
+ Col int
+
+ // Lexeme is a byte sequence matched a pattern of a lexical specification.
+ Lexeme []byte
+
+ // When this field is true, it means the token is the EOF token.
+ EOF bool
+
+ // When this field is true, it means the token is an error token.
+ Invalid bool
+}
+
+type LexerOption func(l *Lexer) error
+
+// DisableModeTransition disables the active mode transition. Thus, even if the lexical specification has the push and pop
+// operations, the lexer doesn't perform these operations. When the lexical specification has multiple modes, and this option is
+// enabled, you need to call the Lexer.Push and Lexer.Pop methods to perform the mode transition. You can use the Lexer.Mode method
+// to know the current lex mode.
+func DisableModeTransition() LexerOption {
+ return func(l *Lexer) error {
+ l.passiveModeTran = true
+ return nil
+ }
+}
+
+type Lexer struct {
+ spec LexSpec
+ src []byte
+ srcPtr int
+ row int
+ col int
+ prevRow int
+ prevCol int
+ tokBuf []*Token
+ modeStack []ModeID
+ passiveModeTran bool
+}
+
+// NewLexer returns a new lexer.
+func NewLexer(spec LexSpec, src io.Reader, opts ...LexerOption) (*Lexer, error) {
+ b, err := io.ReadAll(src)
+ if err != nil {
+ return nil, err
+ }
+ l := &Lexer{
+ spec: spec,
+ src: b,
+ srcPtr: 0,
+ row: 0,
+ col: 0,
+ modeStack: []ModeID{
+ spec.InitialMode(),
+ },
+ passiveModeTran: false,
+ }
+ for _, opt := range opts {
+ err := opt(l)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return l, nil
+}
+
+// Next returns a next token.
+func (l *Lexer) Next() (*Token, error) {
+ if len(l.tokBuf) > 0 {
+ tok := l.tokBuf[0]
+ l.tokBuf = l.tokBuf[1:]
+ return tok, nil
+ }
+
+ tok, err := l.nextAndTransition()
+ if err != nil {
+ return nil, err
+ }
+ if !tok.Invalid {
+ return tok, nil
+ }
+ errTok := tok
+ for {
+ tok, err = l.nextAndTransition()
+ if err != nil {
+ return nil, err
+ }
+ if !tok.Invalid {
+ break
+ }
+ errTok.Lexeme = append(errTok.Lexeme, tok.Lexeme...)
+ }
+ l.tokBuf = append(l.tokBuf, tok)
+
+ return errTok, nil
+}
+
+func (l *Lexer) nextAndTransition() (*Token, error) {
+ tok, err := l.next()
+ if err != nil {
+ return nil, err
+ }
+ if tok.EOF || tok.Invalid {
+ return tok, nil
+ }
+ if l.passiveModeTran {
+ return tok, nil
+ }
+ mode := l.Mode()
+ if l.spec.Pop(mode, tok.ModeKindID) {
+ err := l.PopMode()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if mode, ok := l.spec.Push(mode, tok.ModeKindID); ok {
+ l.PushMode(mode)
+ }
+ // The checking length of the mode stack must be at after pop and push operations because those operations can be performed
+ // at the same time. When the mode stack has just one element and popped it, the mode stack will be temporarily emptied.
+ // However, since a push operation may be performed immediately after it, the lexer allows the stack to be temporarily empty.
+ if len(l.modeStack) == 0 {
+ return nil, fmt.Errorf("a mode stack must have at least one element")
+ }
+ return tok, nil
+}
+
+func (l *Lexer) next() (*Token, error) {
+ mode := l.Mode()
+ state := l.spec.InitialState(mode)
+ buf := []byte{}
+ unfixedBufLen := 0
+ row := l.row
+ col := l.col
+ var tok *Token
+ for {
+ v, eof := l.read()
+ if eof {
+ if tok != nil {
+ l.unread(unfixedBufLen)
+ return tok, nil
+ }
+ // When `buf` has unaccepted data and reads the EOF, the lexer treats the buffered data as an invalid token.
+ if len(buf) > 0 {
+ return &Token{
+ ModeID: mode,
+ ModeKindID: 0,
+ Lexeme: buf,
+ Row: row,
+ Col: col,
+ Invalid: true,
+ }, nil
+ }
+ return &Token{
+ ModeID: mode,
+ ModeKindID: 0,
+ Row: 0,
+ Col: 0,
+ EOF: true,
+ }, nil
+ }
+ buf = append(buf, v)
+ unfixedBufLen++
+ nextState, ok := l.spec.NextState(mode, state, int(v))
+ if !ok {
+ if tok != nil {
+ l.unread(unfixedBufLen)
+ return tok, nil
+ }
+ return &Token{
+ ModeID: mode,
+ ModeKindID: 0,
+ Lexeme: buf,
+ Row: row,
+ Col: col,
+ Invalid: true,
+ }, nil
+ }
+ state = nextState
+ if modeKindID, ok := l.spec.Accept(mode, state); ok {
+ kindID, _ := l.spec.KindIDAndName(mode, modeKindID)
+ tok = &Token{
+ ModeID: mode,
+ KindID: kindID,
+ ModeKindID: modeKindID,
+ Lexeme: buf,
+ Row: row,
+ Col: col,
+ }
+ unfixedBufLen = 0
+ }
+ }
+}
+
+// Mode returns the current lex mode.
+func (l *Lexer) Mode() ModeID {
+ return l.modeStack[len(l.modeStack)-1]
+}
+
+// PushMode adds a lex mode onto the mode stack.
+func (l *Lexer) PushMode(mode ModeID) {
+ l.modeStack = append(l.modeStack, mode)
+}
+
+// PopMode removes a lex mode from the top of the mode stack.
+func (l *Lexer) PopMode() error {
+ sLen := len(l.modeStack)
+ if sLen == 0 {
+ return fmt.Errorf("cannot pop a lex mode from a lex mode stack any more")
+ }
+ l.modeStack = l.modeStack[:sLen-1]
+ return nil
+}
+
+func (l *Lexer) read() (byte, bool) {
+ if l.srcPtr >= len(l.src) {
+ return 0, true
+ }
+
+ b := l.src[l.srcPtr]
+ l.srcPtr++
+
+ l.prevRow = l.row
+ l.prevCol = l.col
+
+ // Count the token positions.
+ // The driver treats LF as the end of lines and counts columns in code points, not bytes.
+ // To count in code points, we refer to the First Byte column in the Table 3-6.
+ //
+ // Reference:
+ // - [Table 3-6] https://www.unicode.org/versions/Unicode13.0.0/ch03.pdf > Table 3-6. UTF-8 Bit Distribution
+ if b < 128 {
+ // 0x0A is LF.
+ if b == 0x0A {
+ l.row++
+ l.col = 0
+ } else {
+ l.col++
+ }
+ } else if b>>5 == 6 || b>>4 == 14 || b>>3 == 30 {
+ l.col++
+ }
+
+ return b, false
+}
+
+// We must not call this function consecutively to record the token position correctly.
+func (l *Lexer) unread(n int) {
+ l.srcPtr -= n
+
+ l.row = l.prevRow
+ l.col = l.prevCol
+}
diff --git a/driver/lexer/lexer_test.go b/driver/lexer/lexer_test.go
new file mode 100644
index 0000000..247cc73
--- /dev/null
+++ b/driver/lexer/lexer_test.go
@@ -0,0 +1,918 @@
+package lexer
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/nihei9/vartan/grammar/lexical"
+ spec "github.com/nihei9/vartan/spec/grammar"
+)
+
+func newLexEntry(modes []string, kind string, pattern string, push string, pop bool) *lexical.LexEntry {
+ ms := []spec.LexModeName{}
+ for _, m := range modes {
+ ms = append(ms, spec.LexModeName(m))
+ }
+ return &lexical.LexEntry{
+ Kind: spec.LexKindName(kind),
+ Pattern: pattern,
+ Modes: ms,
+ Push: spec.LexModeName(push),
+ Pop: pop,
+ }
+}
+
+func newLexEntryDefaultNOP(kind string, pattern string) *lexical.LexEntry {
+ return &lexical.LexEntry{
+ Kind: spec.LexKindName(kind),
+ Pattern: pattern,
+ Modes: []spec.LexModeName{
+ spec.LexModeNameDefault,
+ },
+ }
+}
+
+func newLexEntryFragment(kind string, pattern string) *lexical.LexEntry {
+ return &lexical.LexEntry{
+ Kind: spec.LexKindName(kind),
+ Pattern: pattern,
+ Fragment: true,
+ }
+}
+
+func newToken(modeID ModeID, kindID KindID, modeKindID ModeKindID, lexeme []byte) *Token {
+ return &Token{
+ ModeID: modeID,
+ KindID: kindID,
+ ModeKindID: modeKindID,
+ Lexeme: lexeme,
+ }
+}
+
+func newTokenDefault(kindID int, modeKindID int, lexeme []byte) *Token {
+ return newToken(
+ ModeID(spec.LexModeIDDefault.Int()),
+ KindID(spec.LexKindID(kindID).Int()),
+ ModeKindID(spec.LexModeKindID(modeKindID).Int()),
+ lexeme,
+ )
+}
+
+func newEOFToken(modeID ModeID, modeName string) *Token {
+ return &Token{
+ ModeID: modeID,
+ ModeKindID: 0,
+ EOF: true,
+ }
+}
+
+func newEOFTokenDefault() *Token {
+ return newEOFToken(ModeID(spec.LexModeIDDefault.Int()), spec.LexModeNameDefault.String())
+}
+
+func newInvalidTokenDefault(lexeme []byte) *Token {
+ return &Token{
+ ModeID: ModeID(spec.LexModeIDDefault.Int()),
+ ModeKindID: 0,
+ Lexeme: lexeme,
+ Invalid: true,
+ }
+}
+
+func withPos(tok *Token, row, col int) *Token {
+ tok.Row = row
+ tok.Col = col
+ return tok
+}
+
+func TestLexer_Next(t *testing.T) {
+ test := []struct {
+ lspec *lexical.LexSpec
+ src string
+ tokens []*Token
+ passiveModeTran bool
+ tran func(l *Lexer, tok *Token) error
+ }{
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("t1", "(a|b)*abb"),
+ newLexEntryDefaultNOP("t2", " +"),
+ },
+ },
+ src: "abb aabb aaabb babb bbabb abbbabb",
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte("abb")),
+ newTokenDefault(2, 2, []byte(" ")),
+ newTokenDefault(1, 1, []byte("aabb")),
+ newTokenDefault(2, 2, []byte(" ")),
+ newTokenDefault(1, 1, []byte("aaabb")),
+ newTokenDefault(2, 2, []byte(" ")),
+ newTokenDefault(1, 1, []byte("babb")),
+ newTokenDefault(2, 2, []byte(" ")),
+ newTokenDefault(1, 1, []byte("bbabb")),
+ newTokenDefault(2, 2, []byte(" ")),
+ newTokenDefault(1, 1, []byte("abbbabb")),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("t1", "b?a+"),
+ newLexEntryDefaultNOP("t2", "(ab)?(cd)+"),
+ newLexEntryDefaultNOP("t3", " +"),
+ },
+ },
+ src: "ba baaa a aaa abcd abcdcdcd cd cdcdcd",
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte("ba")),
+ newTokenDefault(3, 3, []byte(" ")),
+ newTokenDefault(1, 1, []byte("baaa")),
+ newTokenDefault(3, 3, []byte(" ")),
+ newTokenDefault(1, 1, []byte("a")),
+ newTokenDefault(3, 3, []byte(" ")),
+ newTokenDefault(1, 1, []byte("aaa")),
+ newTokenDefault(3, 3, []byte(" ")),
+ newTokenDefault(2, 2, []byte("abcd")),
+ newTokenDefault(3, 3, []byte(" ")),
+ newTokenDefault(2, 2, []byte("abcdcdcd")),
+ newTokenDefault(3, 3, []byte(" ")),
+ newTokenDefault(2, 2, []byte("cd")),
+ newTokenDefault(3, 3, []byte(" ")),
+ newTokenDefault(2, 2, []byte("cdcdcd")),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("t1", "."),
+ },
+ },
+ src: string([]byte{
+ 0x00,
+ 0x7f,
+ 0xc2, 0x80,
+ 0xdf, 0xbf,
+ 0xe1, 0x80, 0x80,
+ 0xec, 0xbf, 0xbf,
+ 0xed, 0x80, 0x80,
+ 0xed, 0x9f, 0xbf,
+ 0xee, 0x80, 0x80,
+ 0xef, 0xbf, 0xbf,
+ 0xf0, 0x90, 0x80, 0x80,
+ 0xf0, 0xbf, 0xbf, 0xbf,
+ 0xf1, 0x80, 0x80, 0x80,
+ 0xf3, 0xbf, 0xbf, 0xbf,
+ 0xf4, 0x80, 0x80, 0x80,
+ 0xf4, 0x8f, 0xbf, 0xbf,
+ }),
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte{0x00}),
+ newTokenDefault(1, 1, []byte{0x7f}),
+ newTokenDefault(1, 1, []byte{0xc2, 0x80}),
+ newTokenDefault(1, 1, []byte{0xdf, 0xbf}),
+ newTokenDefault(1, 1, []byte{0xe1, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xec, 0xbf, 0xbf}),
+ newTokenDefault(1, 1, []byte{0xed, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xed, 0x9f, 0xbf}),
+ newTokenDefault(1, 1, []byte{0xee, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xef, 0xbf, 0xbf}),
+ newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbf}),
+ newTokenDefault(1, 1, []byte{0xf1, 0x80, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xf3, 0xbf, 0xbf, 0xbf}),
+ newTokenDefault(1, 1, []byte{0xf4, 0x80, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xf4, 0x8f, 0xbf, 0xbf}),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("t1", "[ab.*+?|()[\\]]"),
+ },
+ },
+ src: "ab.*+?|()[]",
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte("a")),
+ newTokenDefault(1, 1, []byte("b")),
+ newTokenDefault(1, 1, []byte(".")),
+ newTokenDefault(1, 1, []byte("*")),
+ newTokenDefault(1, 1, []byte("+")),
+ newTokenDefault(1, 1, []byte("?")),
+ newTokenDefault(1, 1, []byte("|")),
+ newTokenDefault(1, 1, []byte("(")),
+ newTokenDefault(1, 1, []byte(")")),
+ newTokenDefault(1, 1, []byte("[")),
+ newTokenDefault(1, 1, []byte("]")),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ // all 1 byte characters except null character (U+0000)
+ //
+ // NOTE:
+ // vartan cannot handle the null character in patterns because lexical.lexer,
+ // specifically read() and restore(), recognizes the null characters as that a symbol doesn't exist.
+ // If a pattern needs a null character, use code point expression \u{0000}.
+ newLexEntryDefaultNOP("char_1_byte", "[\x01-\x7f]"),
+ },
+ },
+ src: string([]byte{
+ 0x01,
+ 0x02,
+ 0x7e,
+ 0x7f,
+ }),
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte{0x01}),
+ newTokenDefault(1, 1, []byte{0x02}),
+ newTokenDefault(1, 1, []byte{0x7e}),
+ newTokenDefault(1, 1, []byte{0x7f}),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ // all 2 byte characters
+ newLexEntryDefaultNOP("char_2_byte", "[\xc2\x80-\xdf\xbf]"),
+ },
+ },
+ src: string([]byte{
+ 0xc2, 0x80,
+ 0xc2, 0x81,
+ 0xdf, 0xbe,
+ 0xdf, 0xbf,
+ }),
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte{0xc2, 0x80}),
+ newTokenDefault(1, 1, []byte{0xc2, 0x81}),
+ newTokenDefault(1, 1, []byte{0xdf, 0xbe}),
+ newTokenDefault(1, 1, []byte{0xdf, 0xbf}),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ // All bytes are the same.
+ newLexEntryDefaultNOP("char_3_byte", "[\xe0\xa0\x80-\xe0\xa0\x80]"),
+ },
+ },
+ src: string([]byte{
+ 0xe0, 0xa0, 0x80,
+ }),
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ // The first two bytes are the same.
+ newLexEntryDefaultNOP("char_3_byte", "[\xe0\xa0\x80-\xe0\xa0\xbf]"),
+ },
+ },
+ src: string([]byte{
+ 0xe0, 0xa0, 0x80,
+ 0xe0, 0xa0, 0x81,
+ 0xe0, 0xa0, 0xbe,
+ 0xe0, 0xa0, 0xbf,
+ }),
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}),
+ newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x81}),
+ newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0xbe}),
+ newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0xbf}),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ // The first byte are the same.
+ newLexEntryDefaultNOP("char_3_byte", "[\xe0\xa0\x80-\xe0\xbf\xbf]"),
+ },
+ },
+ src: string([]byte{
+ 0xe0, 0xa0, 0x80,
+ 0xe0, 0xa0, 0x81,
+ 0xe0, 0xbf, 0xbe,
+ 0xe0, 0xbf, 0xbf,
+ }),
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}),
+ newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x81}),
+ newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbe}),
+ newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbf}),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ // all 3 byte characters
+ newLexEntryDefaultNOP("char_3_byte", "[\xe0\xa0\x80-\xef\xbf\xbf]"),
+ },
+ },
+ src: string([]byte{
+ 0xe0, 0xa0, 0x80,
+ 0xe0, 0xa0, 0x81,
+ 0xe0, 0xbf, 0xbe,
+ 0xe0, 0xbf, 0xbf,
+ 0xe1, 0x80, 0x80,
+ 0xe1, 0x80, 0x81,
+ 0xec, 0xbf, 0xbe,
+ 0xec, 0xbf, 0xbf,
+ 0xed, 0x80, 0x80,
+ 0xed, 0x80, 0x81,
+ 0xed, 0x9f, 0xbe,
+ 0xed, 0x9f, 0xbf,
+ 0xee, 0x80, 0x80,
+ 0xee, 0x80, 0x81,
+ 0xef, 0xbf, 0xbe,
+ 0xef, 0xbf, 0xbf,
+ }),
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}),
+ newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x81}),
+ newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbe}),
+ newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbf}),
+ newTokenDefault(1, 1, []byte{0xe1, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xe1, 0x80, 0x81}),
+ newTokenDefault(1, 1, []byte{0xec, 0xbf, 0xbe}),
+ newTokenDefault(1, 1, []byte{0xec, 0xbf, 0xbf}),
+ newTokenDefault(1, 1, []byte{0xed, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xed, 0x80, 0x81}),
+ newTokenDefault(1, 1, []byte{0xed, 0x9f, 0xbe}),
+ newTokenDefault(1, 1, []byte{0xed, 0x9f, 0xbf}),
+ newTokenDefault(1, 1, []byte{0xee, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xee, 0x80, 0x81}),
+ newTokenDefault(1, 1, []byte{0xef, 0xbf, 0xbe}),
+ newTokenDefault(1, 1, []byte{0xef, 0xbf, 0xbf}),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ // All bytes are the same.
+ newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf0\x90\x80\x80]"),
+ },
+ },
+ src: string([]byte{
+ 0xf0, 0x90, 0x80, 0x80,
+ }),
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ // The first 3 bytes are the same.
+ newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf0\x90\x80\xbf]"),
+ },
+ },
+ src: string([]byte{
+ 0xf0, 0x90, 0x80, 0x80,
+ 0xf0, 0x90, 0x80, 0x81,
+ 0xf0, 0x90, 0x80, 0xbe,
+ 0xf0, 0x90, 0x80, 0xbf,
+ }),
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}),
+ newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0xbe}),
+ newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0xbf}),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ // The first 2 bytes are the same.
+ newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf0\x90\xbf\xbf]"),
+ },
+ },
+ src: string([]byte{
+ 0xf0, 0x90, 0x80, 0x80,
+ 0xf0, 0x90, 0x80, 0x81,
+ 0xf0, 0x90, 0xbf, 0xbe,
+ 0xf0, 0x90, 0xbf, 0xbf,
+ }),
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}),
+ newTokenDefault(1, 1, []byte{0xf0, 0x90, 0xbf, 0xbe}),
+ newTokenDefault(1, 1, []byte{0xf0, 0x90, 0xbf, 0xbf}),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ // The first byte are the same.
+ newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf0\xbf\xbf\xbf]"),
+ },
+ },
+ src: string([]byte{
+ 0xf0, 0x90, 0x80, 0x80,
+ 0xf0, 0x90, 0x80, 0x81,
+ 0xf0, 0xbf, 0xbf, 0xbe,
+ 0xf0, 0xbf, 0xbf, 0xbf,
+ }),
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}),
+ newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbe}),
+ newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbf}),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ // all 4 byte characters
+ newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf4\x8f\xbf\xbf]"),
+ },
+ },
+ src: string([]byte{
+ 0xf0, 0x90, 0x80, 0x80,
+ 0xf0, 0x90, 0x80, 0x81,
+ 0xf0, 0xbf, 0xbf, 0xbe,
+ 0xf0, 0xbf, 0xbf, 0xbf,
+ 0xf1, 0x80, 0x80, 0x80,
+ 0xf1, 0x80, 0x80, 0x81,
+ 0xf3, 0xbf, 0xbf, 0xbe,
+ 0xf3, 0xbf, 0xbf, 0xbf,
+ 0xf4, 0x80, 0x80, 0x80,
+ 0xf4, 0x80, 0x80, 0x81,
+ 0xf4, 0x8f, 0xbf, 0xbe,
+ 0xf4, 0x8f, 0xbf, 0xbf,
+ }),
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}),
+ newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbe}),
+ newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbf}),
+ newTokenDefault(1, 1, []byte{0xf1, 0x80, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xf1, 0x80, 0x80, 0x81}),
+ newTokenDefault(1, 1, []byte{0xf3, 0xbf, 0xbf, 0xbe}),
+ newTokenDefault(1, 1, []byte{0xf3, 0xbf, 0xbf, 0xbf}),
+ newTokenDefault(1, 1, []byte{0xf4, 0x80, 0x80, 0x80}),
+ newTokenDefault(1, 1, []byte{0xf4, 0x80, 0x80, 0x81}),
+ newTokenDefault(1, 1, []byte{0xf4, 0x8f, 0xbf, 0xbe}),
+ newTokenDefault(1, 1, []byte{0xf4, 0x8f, 0xbf, 0xbf}),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("non_number", "[^0-9]+[0-9]"),
+ },
+ },
+ src: "foo9",
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte("foo9")),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("char_1_byte", "\\u{006E}"),
+ newLexEntryDefaultNOP("char_2_byte", "\\u{03BD}"),
+ newLexEntryDefaultNOP("char_3_byte", "\\u{306B}"),
+ newLexEntryDefaultNOP("char_4_byte", "\\u{01F638}"),
+ },
+ },
+ src: "nνに😸",
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte{0x6E}),
+ newTokenDefault(2, 2, []byte{0xCE, 0xBD}),
+ newTokenDefault(3, 3, []byte{0xE3, 0x81, 0xAB}),
+ newTokenDefault(4, 4, []byte{0xF0, 0x9F, 0x98, 0xB8}),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("code_points_alt", "[\\u{006E}\\u{03BD}\\u{306B}\\u{01F638}]"),
+ },
+ },
+ src: "nνに😸",
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte{0x6E}),
+ newTokenDefault(1, 1, []byte{0xCE, 0xBD}),
+ newTokenDefault(1, 1, []byte{0xE3, 0x81, 0xAB}),
+ newTokenDefault(1, 1, []byte{0xF0, 0x9F, 0x98, 0xB8}),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("t1", "\\f{a2c}\\f{d2f}+"),
+ newLexEntryFragment("a2c", "abc"),
+ newLexEntryFragment("d2f", "def"),
+ },
+ },
+ src: "abcdefdefabcdef",
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte("abcdefdef")),
+ newTokenDefault(1, 1, []byte("abcdef")),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("t1", "(\\f{a2c}|\\f{d2f})+"),
+ newLexEntryFragment("a2c", "abc"),
+ newLexEntryFragment("d2f", "def"),
+ },
+ },
+ src: "abcdefdefabc",
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte("abcdefdefabc")),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("t1", "\\f{a2c_or_d2f}+"),
+ newLexEntryFragment("a2c_or_d2f", "\\f{a2c}|\\f{d2f}"),
+ newLexEntryFragment("a2c", "abc"),
+ newLexEntryFragment("d2f", "def"),
+ },
+ },
+ src: "abcdefdefabc",
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte("abcdefdefabc")),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("white_space", ` *`),
+ newLexEntry([]string{"default"}, "string_open", `"`, "string", false),
+ newLexEntry([]string{"string"}, "escape_sequence", `\\[n"\\]`, "", false),
+ newLexEntry([]string{"string"}, "char_sequence", `[^"\\]*`, "", false),
+ newLexEntry([]string{"string"}, "string_close", `"`, "", true),
+ },
+ },
+ src: `"" "Hello world.\n\"Hello world.\""`,
+ tokens: []*Token{
+ newToken(1, 2, 2, []byte(`"`)),
+ newToken(2, 5, 3, []byte(`"`)),
+ newToken(1, 1, 1, []byte(` `)),
+ newToken(1, 2, 2, []byte(`"`)),
+ newToken(2, 4, 2, []byte(`Hello world.`)),
+ newToken(2, 3, 1, []byte(`\n`)),
+ newToken(2, 3, 1, []byte(`\"`)),
+ newToken(2, 4, 2, []byte(`Hello world.`)),
+ newToken(2, 3, 1, []byte(`\"`)),
+ newToken(2, 5, 3, []byte(`"`)),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ // `white_space` is enabled in multiple modes.
+ newLexEntry([]string{"default", "state_a", "state_b"}, "white_space", ` *`, "", false),
+ newLexEntry([]string{"default"}, "char_a", `a`, "state_a", false),
+ newLexEntry([]string{"state_a"}, "char_b", `b`, "state_b", false),
+ newLexEntry([]string{"state_a"}, "back_from_a", `<`, "", true),
+ newLexEntry([]string{"state_b"}, "back_from_b", `<`, "", true),
+ },
+ },
+ src: ` a b < < `,
+ tokens: []*Token{
+ newToken(1, 1, 1, []byte(` `)),
+ newToken(1, 2, 2, []byte(`a`)),
+ newToken(2, 1, 1, []byte(` `)),
+ newToken(2, 3, 2, []byte(`b`)),
+ newToken(3, 1, 1, []byte(` `)),
+ newToken(3, 5, 2, []byte(`<`)),
+ newToken(2, 1, 1, []byte(` `)),
+ newToken(2, 4, 3, []byte(`<`)),
+ newToken(1, 1, 1, []byte(` `)),
+ newEOFTokenDefault(),
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntry([]string{"default", "mode_1", "mode_2"}, "white_space", ` *`, "", false),
+ newLexEntry([]string{"default"}, "char", `.`, "", false),
+ newLexEntry([]string{"default"}, "push_1", `-> 1`, "", false),
+ newLexEntry([]string{"mode_1"}, "push_2", `-> 2`, "", false),
+ newLexEntry([]string{"mode_1"}, "pop_1", `<-`, "", false),
+ newLexEntry([]string{"mode_2"}, "pop_2", `<-`, "", false),
+ },
+ },
+ src: `-> 1 -> 2 <- <- a`,
+ tokens: []*Token{
+ newToken(1, 3, 3, []byte(`-> 1`)),
+ newToken(2, 1, 1, []byte(` `)),
+ newToken(2, 4, 2, []byte(`-> 2`)),
+ newToken(3, 1, 1, []byte(` `)),
+ newToken(3, 6, 2, []byte(`<-`)),
+ newToken(2, 1, 1, []byte(` `)),
+ newToken(2, 5, 3, []byte(`<-`)),
+ newToken(1, 1, 1, []byte(` `)),
+ newToken(1, 2, 2, []byte(`a`)),
+ newEOFTokenDefault(),
+ },
+ passiveModeTran: true,
+ tran: func(l *Lexer, tok *Token) error {
+ switch l.spec.ModeName(l.Mode()) {
+ case "default":
+ switch tok.KindID {
+ case 3: // push_1
+ l.PushMode(2)
+ }
+ case "mode_1":
+ switch tok.KindID {
+ case 4: // push_2
+ l.PushMode(3)
+ case 5: // pop_1
+ return l.PopMode()
+ }
+ case "mode_2":
+ switch tok.KindID {
+ case 6: // pop_2
+ return l.PopMode()
+ }
+ }
+ return nil
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntry([]string{"default", "mode_1", "mode_2"}, "white_space", ` *`, "", false),
+ newLexEntry([]string{"default"}, "char", `.`, "", false),
+ newLexEntry([]string{"default"}, "push_1", `-> 1`, "mode_1", false),
+ newLexEntry([]string{"mode_1"}, "push_2", `-> 2`, "", false),
+ newLexEntry([]string{"mode_1"}, "pop_1", `<-`, "", false),
+ newLexEntry([]string{"mode_2"}, "pop_2", `<-`, "", true),
+ },
+ },
+ src: `-> 1 -> 2 <- <- a`,
+ tokens: []*Token{
+ newToken(1, 3, 3, []byte(`-> 1`)),
+ newToken(2, 1, 1, []byte(` `)),
+ newToken(2, 4, 2, []byte(`-> 2`)),
+ newToken(3, 1, 1, []byte(` `)),
+ newToken(3, 6, 2, []byte(`<-`)),
+ newToken(2, 1, 1, []byte(` `)),
+ newToken(2, 5, 3, []byte(`<-`)),
+ newToken(1, 1, 1, []byte(` `)),
+ newToken(1, 2, 2, []byte(`a`)),
+ newEOFTokenDefault(),
+ },
+ // Active mode transition and an external transition function can be used together.
+ passiveModeTran: false,
+ tran: func(l *Lexer, tok *Token) error {
+ switch l.spec.ModeName(l.Mode()) {
+ case "mode_1":
+ switch tok.KindID {
+ case 4: // push_2
+ l.PushMode(3)
+ case 5: // pop_1
+ return l.PopMode()
+ }
+ }
+ return nil
+ },
+ },
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("dot", spec.EscapePattern(`.`)),
+ newLexEntryDefaultNOP("star", spec.EscapePattern(`*`)),
+ newLexEntryDefaultNOP("plus", spec.EscapePattern(`+`)),
+ newLexEntryDefaultNOP("question", spec.EscapePattern(`?`)),
+ newLexEntryDefaultNOP("vbar", spec.EscapePattern(`|`)),
+ newLexEntryDefaultNOP("lparen", spec.EscapePattern(`(`)),
+ newLexEntryDefaultNOP("rparen", spec.EscapePattern(`)`)),
+ newLexEntryDefaultNOP("lbrace", spec.EscapePattern(`[`)),
+ newLexEntryDefaultNOP("backslash", spec.EscapePattern(`\`)),
+ },
+ },
+ src: `.*+?|()[\`,
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte(`.`)),
+ newTokenDefault(2, 2, []byte(`*`)),
+ newTokenDefault(3, 3, []byte(`+`)),
+ newTokenDefault(4, 4, []byte(`?`)),
+ newTokenDefault(5, 5, []byte(`|`)),
+ newTokenDefault(6, 6, []byte(`(`)),
+ newTokenDefault(7, 7, []byte(`)`)),
+ newTokenDefault(8, 8, []byte(`[`)),
+ newTokenDefault(9, 9, []byte(`\`)),
+ newEOFTokenDefault(),
+ },
+ },
+ // Character properties are available in a bracket expression.
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("letter", `[\p{Letter}]+`),
+ newLexEntryDefaultNOP("non_letter", `[^\p{Letter}]+`),
+ },
+ },
+ src: `foo123`,
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte(`foo`)),
+ newTokenDefault(2, 2, []byte(`123`)),
+ newEOFTokenDefault(),
+ },
+ },
+ // The driver can continue lexical analysis even after it detects an invalid token.
+ {
+ lspec: &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("lower", `[a-z]+`),
+ },
+ },
+ src: `foo123bar`,
+ tokens: []*Token{
+ newTokenDefault(1, 1, []byte(`foo`)),
+ newInvalidTokenDefault([]byte(`123`)),
+ newTokenDefault(1, 1, []byte(`bar`)),
+ newEOFTokenDefault(),
+ },
+ },
+ }
+ for i, tt := range test {
+ for compLv := lexical.CompressionLevelMin; compLv <= lexical.CompressionLevelMax; compLv++ {
+ t.Run(fmt.Sprintf("#%v-%v", i, compLv), func(t *testing.T) {
+ clspec, err, cerrs := lexical.Compile(tt.lspec, compLv)
+ if err != nil {
+ for _, cerr := range cerrs {
+ t.Logf("%#v", cerr)
+ }
+ t.Fatalf("unexpected error: %v", err)
+ }
+ opts := []LexerOption{}
+ if tt.passiveModeTran {
+ opts = append(opts, DisableModeTransition())
+ }
+ lexer, err := NewLexer(NewLexSpec(clspec), strings.NewReader(tt.src), opts...)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ for _, eTok := range tt.tokens {
+ tok, err := lexer.Next()
+ if err != nil {
+ t.Log(err)
+ break
+ }
+ testToken(t, eTok, tok, false)
+
+ if tok.EOF {
+ break
+ }
+
+ if tt.tran != nil {
+ err := tt.tran(lexer, tok)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ }
+ }
+ })
+ }
+ }
+}
+
+func TestLexer_Next_WithPosition(t *testing.T) {
+ lspec := &lexical.LexSpec{
+ Entries: []*lexical.LexEntry{
+ newLexEntryDefaultNOP("newline", `\u{000A}+`),
+ newLexEntryDefaultNOP("any", `.`),
+ },
+ }
+
+ clspec, err, _ := lexical.Compile(lspec, lexical.CompressionLevelMax)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ src := string([]byte{
+ 0x00,
+ 0x7F,
+ 0x0A,
+
+ 0xC2, 0x80,
+ 0xDF, 0xBF,
+ 0x0A,
+
+ 0xE0, 0xA0, 0x80,
+ 0xE0, 0xBF, 0xBF,
+ 0xE1, 0x80, 0x80,
+ 0xEC, 0xBF, 0xBF,
+ 0xED, 0x80, 0x80,
+ 0xED, 0x9F, 0xBF,
+ 0xEE, 0x80, 0x80,
+ 0xEF, 0xBF, 0xBF,
+ 0x0A,
+
+ 0xF0, 0x90, 0x80, 0x80,
+ 0xF0, 0xBF, 0xBF, 0xBF,
+ 0xF1, 0x80, 0x80, 0x80,
+ 0xF3, 0xBF, 0xBF, 0xBF,
+ 0xF4, 0x80, 0x80, 0x80,
+ 0xF4, 0x8F, 0xBF, 0xBF,
+ 0x0A,
+ 0x0A,
+ 0x0A,
+ })
+
+ expected := []*Token{
+ withPos(newTokenDefault(2, 2, []byte{0x00}), 0, 0),
+ withPos(newTokenDefault(2, 2, []byte{0x7F}), 0, 1),
+ withPos(newTokenDefault(1, 1, []byte{0x0A}), 0, 2),
+
+ withPos(newTokenDefault(2, 2, []byte{0xC2, 0x80}), 1, 0),
+ withPos(newTokenDefault(2, 2, []byte{0xDF, 0xBF}), 1, 1),
+ withPos(newTokenDefault(1, 1, []byte{0x0A}), 1, 2),
+
+ withPos(newTokenDefault(2, 2, []byte{0xE0, 0xA0, 0x80}), 2, 0),
+ withPos(newTokenDefault(2, 2, []byte{0xE0, 0xBF, 0xBF}), 2, 1),
+ withPos(newTokenDefault(2, 2, []byte{0xE1, 0x80, 0x80}), 2, 2),
+ withPos(newTokenDefault(2, 2, []byte{0xEC, 0xBF, 0xBF}), 2, 3),
+ withPos(newTokenDefault(2, 2, []byte{0xED, 0x80, 0x80}), 2, 4),
+ withPos(newTokenDefault(2, 2, []byte{0xED, 0x9F, 0xBF}), 2, 5),
+ withPos(newTokenDefault(2, 2, []byte{0xEE, 0x80, 0x80}), 2, 6),
+ withPos(newTokenDefault(2, 2, []byte{0xEF, 0xBF, 0xBF}), 2, 7),
+ withPos(newTokenDefault(1, 1, []byte{0x0A}), 2, 8),
+
+ withPos(newTokenDefault(2, 2, []byte{0xF0, 0x90, 0x80, 0x80}), 3, 0),
+ withPos(newTokenDefault(2, 2, []byte{0xF0, 0xBF, 0xBF, 0xBF}), 3, 1),
+ withPos(newTokenDefault(2, 2, []byte{0xF1, 0x80, 0x80, 0x80}), 3, 2),
+ withPos(newTokenDefault(2, 2, []byte{0xF3, 0xBF, 0xBF, 0xBF}), 3, 3),
+ withPos(newTokenDefault(2, 2, []byte{0xF4, 0x80, 0x80, 0x80}), 3, 4),
+ withPos(newTokenDefault(2, 2, []byte{0xF4, 0x8F, 0xBF, 0xBF}), 3, 5),
+
+ // When a token contains multiple line breaks, the driver sets the token position to
+ // the line number where a lexeme first appears.
+ withPos(newTokenDefault(1, 1, []byte{0x0A, 0x0A, 0x0A}), 3, 6),
+
+ withPos(newEOFTokenDefault(), 0, 0),
+ }
+
+ lexer, err := NewLexer(NewLexSpec(clspec), strings.NewReader(src))
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ for _, eTok := range expected {
+ tok, err := lexer.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testToken(t, eTok, tok, true)
+
+ if tok.EOF {
+ break
+ }
+ }
+}
+
+func testToken(t *testing.T, expected, actual *Token, checkPosition bool) {
+ t.Helper()
+
+ if actual.ModeID != expected.ModeID ||
+ actual.KindID != expected.KindID ||
+ actual.ModeKindID != expected.ModeKindID ||
+ !bytes.Equal(actual.Lexeme, expected.Lexeme) ||
+ actual.EOF != expected.EOF ||
+ actual.Invalid != expected.Invalid {
+ t.Fatalf(`unexpected token; want: %v ("%#v"), got: %v ("%#v")`, expected, string(expected.Lexeme), actual, string(actual.Lexeme))
+ }
+
+ if checkPosition {
+ if actual.Row != expected.Row || actual.Col != expected.Col {
+ t.Fatalf(`unexpected token; want: %v ("%#v"), got: %v ("%#v")`, expected, string(expected.Lexeme), actual, string(actual.Lexeme))
+ }
+ }
+}
diff --git a/driver/lexer/spec.go b/driver/lexer/spec.go
new file mode 100644
index 0000000..23debbf
--- /dev/null
+++ b/driver/lexer/spec.go
@@ -0,0 +1,71 @@
+package lexer
+
+import spec "github.com/nihei9/vartan/spec/grammar"
+
+type lexSpec struct {
+ spec *spec.LexicalSpec
+}
+
+func NewLexSpec(spec *spec.LexicalSpec) *lexSpec {
+ return &lexSpec{
+ spec: spec,
+ }
+}
+
+func (s *lexSpec) InitialMode() ModeID {
+ return ModeID(s.spec.InitialModeID.Int())
+}
+
+func (s *lexSpec) Pop(mode ModeID, modeKind ModeKindID) bool {
+ return s.spec.Specs[mode].Pop[modeKind] == 1
+}
+
+func (s *lexSpec) Push(mode ModeID, modeKind ModeKindID) (ModeID, bool) {
+ modeID := s.spec.Specs[mode].Push[modeKind]
+ return ModeID(modeID.Int()), !modeID.IsNil()
+}
+
+func (s *lexSpec) ModeName(mode ModeID) string {
+ return s.spec.ModeNames[mode].String()
+}
+
+func (s *lexSpec) InitialState(mode ModeID) StateID {
+ return StateID(s.spec.Specs[mode].DFA.InitialStateID.Int())
+}
+
+func (s *lexSpec) NextState(mode ModeID, state StateID, v int) (StateID, bool) {
+ switch s.spec.CompressionLevel {
+ case 2:
+ tran := s.spec.Specs[mode].DFA.Transition
+ rowNum := tran.RowNums[state]
+ d := tran.UniqueEntries.RowDisplacement[rowNum]
+ if tran.UniqueEntries.Bounds[d+v] != rowNum {
+ return StateID(tran.UniqueEntries.EmptyValue.Int()), false
+ }
+ return StateID(tran.UniqueEntries.Entries[d+v].Int()), true
+ case 1:
+ tran := s.spec.Specs[mode].DFA.Transition
+ next := tran.UncompressedUniqueEntries[tran.RowNums[state]*tran.OriginalColCount+v]
+ if next == spec.StateIDNil {
+ return StateID(spec.StateIDNil.Int()), false
+ }
+ return StateID(next.Int()), true
+ }
+
+ modeSpec := s.spec.Specs[mode]
+ next := modeSpec.DFA.UncompressedTransition[state.Int()*modeSpec.DFA.ColCount+v]
+ if next == spec.StateIDNil {
+ return StateID(spec.StateIDNil), false
+ }
+ return StateID(next.Int()), true
+}
+
+func (s *lexSpec) Accept(mode ModeID, state StateID) (ModeKindID, bool) {
+ modeKindID := s.spec.Specs[mode].DFA.AcceptingStates[state]
+ return ModeKindID(modeKindID.Int()), modeKindID != spec.LexModeKindIDNil
+}
+
+func (s *lexSpec) KindIDAndName(mode ModeID, modeKind ModeKindID) (KindID, string) {
+ kindID := s.spec.KindIDs[mode][modeKind]
+ return KindID(kindID.Int()), s.spec.KindNames[kindID].String()
+}
diff --git a/driver/lexer/template.go b/driver/lexer/template.go
new file mode 100644
index 0000000..52f9ebd
--- /dev/null
+++ b/driver/lexer/template.go
@@ -0,0 +1,760 @@
+package lexer
+
+import (
+ "bytes"
+ _ "embed"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "strings"
+ "text/template"
+
+ "github.com/nihei9/vartan/grammar/lexical"
+ spec "github.com/nihei9/vartan/spec/grammar"
+)
+
+//go:embed lexer.go
+var lexerCoreSrc string
+
+func GenLexer(lexSpec *spec.LexicalSpec, pkgName string) ([]byte, error) {
+ var lexerSrc string
+ {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "lexer.go", lexerCoreSrc, parser.ParseComments)
+ if err != nil {
+ return nil, err
+ }
+
+ var b strings.Builder
+ err = format.Node(&b, fset, f)
+ if err != nil {
+ return nil, err
+ }
+
+ lexerSrc = b.String()
+ }
+
+ var modeIDsSrc string
+ {
+ var b strings.Builder
+ fmt.Fprintf(&b, "const (\n")
+ for i, k := range lexSpec.ModeNames {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, " ModeIDNil ModeID = %v\n", i)
+ continue
+ }
+ fmt.Fprintf(&b, " ModeID%v ModeID = %v\n", lexical.SnakeCaseToUpperCamelCase(k.String()), i)
+ }
+ fmt.Fprintf(&b, ")")
+
+ modeIDsSrc = b.String()
+ }
+
+ var modeNamesSrc string
+ {
+ var b strings.Builder
+ fmt.Fprintf(&b, "const (\n")
+ for i, k := range lexSpec.ModeNames {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, " ModeNameNil = %#v\n", "")
+ continue
+ }
+ fmt.Fprintf(&b, " ModeName%v = %#v\n", lexical.SnakeCaseToUpperCamelCase(k.String()), k)
+ }
+ fmt.Fprintf(&b, ")")
+
+ modeNamesSrc = b.String()
+ }
+
+ var modeIDToNameSrc string
+ {
+ var b strings.Builder
+ fmt.Fprintf(&b, `
+// ModeIDToName converts a mode ID to a name.
+func ModeIDToName(id ModeID) string {
+ switch id {`)
+ for i, k := range lexSpec.ModeNames {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, `
+ case ModeIDNil:
+ return ModeNameNil`)
+ continue
+ }
+ name := lexical.SnakeCaseToUpperCamelCase(k.String())
+ fmt.Fprintf(&b, `
+ case ModeID%v:
+ return ModeName%v`, name, name)
+ }
+ fmt.Fprintf(&b, `
+ }
+ return ""
+}
+`)
+
+ modeIDToNameSrc = b.String()
+ }
+
+ var kindIDsSrc string
+ {
+ var b strings.Builder
+ fmt.Fprintf(&b, "const (\n")
+ for i, k := range lexSpec.KindNames {
+ if i == spec.LexKindIDNil.Int() {
+ fmt.Fprintf(&b, " KindIDNil KindID = %v\n", i)
+ continue
+ }
+ fmt.Fprintf(&b, " KindID%v KindID = %v\n", lexical.SnakeCaseToUpperCamelCase(k.String()), i)
+ }
+ fmt.Fprintf(&b, ")")
+
+ kindIDsSrc = b.String()
+ }
+
+ var kindNamesSrc string
+ {
+ var b strings.Builder
+ fmt.Fprintf(&b, "const (\n")
+ fmt.Fprintf(&b, " KindNameNil = %#v\n", "")
+ for _, k := range lexSpec.KindNames[1:] {
+ fmt.Fprintf(&b, " KindName%v = %#v\n", lexical.SnakeCaseToUpperCamelCase(k.String()), k)
+ }
+ fmt.Fprintf(&b, ")")
+
+ kindNamesSrc = b.String()
+ }
+
+ var kindIDToNameSrc string
+ {
+ var b strings.Builder
+ fmt.Fprintf(&b, `
+// KindIDToName converts a kind ID to a name.
+func KindIDToName(id KindID) string {
+ switch id {`)
+ for i, k := range lexSpec.KindNames {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, `
+ case KindIDNil:
+ return KindNameNil`)
+ continue
+ }
+ name := lexical.SnakeCaseToUpperCamelCase(k.String())
+ fmt.Fprintf(&b, `
+ case KindID%v:
+ return KindName%v`, name, name)
+ }
+ fmt.Fprintf(&b, `
+ }
+ return ""
+}
+`)
+
+ kindIDToNameSrc = b.String()
+ }
+
+ var specSrc string
+ {
+ t, err := template.New("").Funcs(genTemplateFuncs(lexSpec)).Parse(lexSpecTemplate)
+ if err != nil {
+ return nil, err
+ }
+
+ var b strings.Builder
+ err = t.Execute(&b, map[string]interface{}{
+ "initialModeID": "ModeID" + lexical.SnakeCaseToUpperCamelCase(lexSpec.ModeNames[lexSpec.InitialModeID].String()),
+ "modeIDNil": "ModeIDNil",
+ "modeKindIDNil": spec.LexModeKindIDNil,
+ "stateIDNil": spec.StateIDNil,
+ "compressionLevel": lexSpec.CompressionLevel,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ specSrc = b.String()
+ }
+
+ var src string
+ {
+ tmpl := `// Code generated by vartan-go. DO NOT EDIT.
+{{ .lexerSrc }}
+
+{{ .modeIDsSrc }}
+
+{{ .modeNamesSrc }}
+
+{{ .modeIDToNameSrc }}
+
+{{ .kindIDsSrc }}
+
+{{ .kindNamesSrc }}
+
+{{ .kindIDToNameSrc }}
+
+{{ .specSrc }}
+`
+
+ t, err := template.New("").Parse(tmpl)
+ if err != nil {
+ return nil, err
+ }
+
+ var b strings.Builder
+ err = t.Execute(&b, map[string]string{
+ "lexerSrc": lexerSrc,
+ "modeIDsSrc": modeIDsSrc,
+ "modeNamesSrc": modeNamesSrc,
+ "modeIDToNameSrc": modeIDToNameSrc,
+ "kindIDsSrc": kindIDsSrc,
+ "kindNamesSrc": kindNamesSrc,
+ "kindIDToNameSrc": kindIDToNameSrc,
+ "specSrc": specSrc,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ src = b.String()
+ }
+
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ return nil, err
+ }
+
+ f.Name = ast.NewIdent(pkgName)
+
+ var b bytes.Buffer
+ err = format.Node(&b, fset, f)
+ if err != nil {
+ return nil, err
+ }
+
+ return b.Bytes(), nil
+}
+
+const lexSpecTemplate = `
+type lexSpec struct {
+ pop [][]bool
+ push [][]ModeID
+ modeNames []string
+ initialStates []StateID
+ acceptances [][]ModeKindID
+ kindIDs [][]KindID
+ kindNames []string
+ initialModeID ModeID
+ modeIDNil ModeID
+ modeKindIDNil ModeKindID
+ stateIDNil StateID
+
+ rowNums [][]int
+ rowDisplacements [][]int
+ bounds [][]int
+ entries [][]StateID
+ originalColCounts []int
+}
+
+func NewLexSpec() *lexSpec {
+ return &lexSpec{
+ pop: {{ genPopTable }},
+ push: {{ genPushTable }},
+ modeNames: {{ genModeNameTable }},
+ initialStates: {{ genInitialStateTable }},
+ acceptances: {{ genAcceptTable }},
+ kindIDs: {{ genKindIDTable }},
+ kindNames: {{ genKindNameTable }},
+ initialModeID: {{ .initialModeID }},
+ modeIDNil: {{ .modeIDNil }},
+ modeKindIDNil: {{ .modeKindIDNil }},
+ stateIDNil: {{ .stateIDNil }},
+
+ rowNums: {{ genRowNums }},
+ rowDisplacements: {{ genRowDisplacements }},
+ bounds: {{ genBounds }},
+ entries: {{ genEntries }},
+ originalColCounts: {{ genOriginalColCounts }},
+ }
+}
+
+func (s *lexSpec) InitialMode() ModeID {
+ return s.initialModeID
+}
+
+func (s *lexSpec) Pop(mode ModeID, modeKind ModeKindID) bool {
+ return s.pop[mode][modeKind]
+}
+
+func (s *lexSpec) Push(mode ModeID, modeKind ModeKindID) (ModeID, bool) {
+ id := s.push[mode][modeKind]
+ return id, id != s.modeIDNil
+}
+
+func (s *lexSpec) ModeName(mode ModeID) string {
+ return s.modeNames[mode]
+}
+
+func (s *lexSpec) InitialState(mode ModeID) StateID {
+ return s.initialStates[mode]
+}
+
+func (s *lexSpec) NextState(mode ModeID, state StateID, v int) (StateID, bool) {
+{{ if eq .compressionLevel 2 -}}
+ rowNum := s.rowNums[mode][state]
+ d := s.rowDisplacements[mode][rowNum]
+ if s.bounds[mode][d+v] != rowNum {
+ return s.stateIDNil, false
+ }
+ return s.entries[mode][d+v], true
+{{ else if eq .compressionLevel 1 -}}
+ rowNum := s.rowNums[mode][state]
+ colCount := s.originalColCounts[mode]
+ next := s.entries[mode][rowNum*colCount+v]
+ if next == s.stateIDNil {
+ return s.stateIDNil, false
+ }
+ return next, true
+{{ else -}}
+ colCount := s.originalColCounts[mode]
+ next := s.entries[mode][int(state)*colCount+v]
+ if next == s.stateIDNil {
+ return s.stateIDNil, false
+ }
+ return next, true
+{{ end -}}
+}
+
+func (s *lexSpec) Accept(mode ModeID, state StateID) (ModeKindID, bool) {
+ id := s.acceptances[mode][state]
+ return id, id != s.modeKindIDNil
+}
+
+func (s *lexSpec) KindIDAndName(mode ModeID, modeKind ModeKindID) (KindID, string) {
+ id := s.kindIDs[mode][modeKind]
+ return id, s.kindNames[id]
+}
+`
+
+func genTemplateFuncs(lexSpec *spec.LexicalSpec) template.FuncMap {
+ fns := template.FuncMap{
+ "genPopTable": func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[][]bool{\n")
+ for i, s := range lexSpec.Specs {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "nil,\n")
+ continue
+ }
+
+ c := 1
+ fmt.Fprintf(&b, "{\n")
+ for _, v := range s.Pop {
+ fmt.Fprintf(&b, "%v, ", v != 0)
+
+ if c == 20 {
+ fmt.Fprintf(&b, "\n")
+ c = 1
+ } else {
+ c++
+ }
+ }
+ if c > 1 {
+ fmt.Fprintf(&b, "\n")
+ }
+ fmt.Fprintf(&b, "},\n")
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ },
+ "genPushTable": func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[][]ModeID{\n")
+ for i, s := range lexSpec.Specs {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "nil,\n")
+ continue
+ }
+
+ c := 1
+ fmt.Fprintf(&b, "{\n")
+ for _, v := range s.Push {
+ fmt.Fprintf(&b, "%v,", v)
+
+ if c == 20 {
+ fmt.Fprintf(&b, "\n")
+ c = 1
+ } else {
+ c++
+ }
+ }
+ if c > 1 {
+ fmt.Fprintf(&b, "\n")
+ }
+ fmt.Fprintf(&b, "},\n")
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ },
+ "genModeNameTable": func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[]string{\n")
+ for i, name := range lexSpec.ModeNames {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "ModeNameNil,\n")
+ continue
+ }
+ fmt.Fprintf(&b, "ModeName%v,\n", lexical.SnakeCaseToUpperCamelCase(name.String()))
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ },
+ "genInitialStateTable": func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[]StateID{\n")
+ for i, s := range lexSpec.Specs {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "%v,\n", spec.StateIDNil)
+ continue
+ }
+
+ fmt.Fprintf(&b, "%v,\n", s.DFA.InitialStateID)
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ },
+ "genAcceptTable": func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[][]ModeKindID{\n")
+ for i, s := range lexSpec.Specs {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "nil,\n")
+ continue
+ }
+
+ c := 1
+ fmt.Fprintf(&b, "{\n")
+ for _, v := range s.DFA.AcceptingStates {
+ fmt.Fprintf(&b, "%v,", v)
+
+ if c == 20 {
+ fmt.Fprintf(&b, "\n")
+ c = 1
+ } else {
+ c++
+ }
+ }
+ if c > 1 {
+ fmt.Fprintf(&b, "\n")
+ }
+ fmt.Fprintf(&b, "},\n")
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ },
+ "genKindIDTable": func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[][]KindID{\n")
+ for i, ids := range lexSpec.KindIDs {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "nil,\n")
+ continue
+ }
+
+ fmt.Fprintf(&b, "{\n")
+ for j, id := range ids {
+ if j == spec.LexModeKindIDNil.Int() {
+ fmt.Fprintf(&b, "KindIDNil,\n")
+ continue
+ }
+ fmt.Fprintf(&b, "KindID%v,\n", lexical.SnakeCaseToUpperCamelCase(string(lexSpec.KindNames[id].String())))
+ }
+ fmt.Fprintf(&b, "},\n")
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ },
+ "genKindNameTable": func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[]string{\n")
+ for i, name := range lexSpec.KindNames {
+ if i == spec.LexKindIDNil.Int() {
+ fmt.Fprintf(&b, "KindNameNil,\n")
+ continue
+ }
+ fmt.Fprintf(&b, "KindName%v,\n", lexical.SnakeCaseToUpperCamelCase(name.String()))
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ },
+ }
+
+ switch lexSpec.CompressionLevel {
+ case 2:
+ fns["genRowNums"] = func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[][]int{\n")
+ for i, s := range lexSpec.Specs {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "nil,\n")
+ continue
+ }
+
+ c := 1
+ fmt.Fprintf(&b, "{\n")
+ for _, v := range s.DFA.Transition.RowNums {
+ fmt.Fprintf(&b, "%v,", v)
+
+ if c == 20 {
+ fmt.Fprintf(&b, "\n")
+ c = 1
+ } else {
+ c++
+ }
+ }
+ if c > 1 {
+ fmt.Fprintf(&b, "\n")
+ }
+ fmt.Fprintf(&b, "},\n")
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ }
+
+ fns["genRowDisplacements"] = func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[][]int{\n")
+ for i, s := range lexSpec.Specs {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "nil,\n")
+ continue
+ }
+
+ c := 1
+ fmt.Fprintf(&b, "{\n")
+ for _, d := range s.DFA.Transition.UniqueEntries.RowDisplacement {
+ fmt.Fprintf(&b, "%v,", d)
+
+ if c == 20 {
+ fmt.Fprintf(&b, "\n")
+ c = 1
+ } else {
+ c++
+ }
+ }
+ if c > 1 {
+ fmt.Fprintf(&b, "\n")
+ }
+ fmt.Fprintf(&b, "},\n")
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ }
+
+ fns["genBounds"] = func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[][]int{\n")
+ for i, s := range lexSpec.Specs {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "nil,\n")
+ continue
+ }
+
+ c := 1
+ fmt.Fprintf(&b, "{\n")
+ for _, v := range s.DFA.Transition.UniqueEntries.Bounds {
+ fmt.Fprintf(&b, "%v,", v)
+
+ if c == 20 {
+ fmt.Fprintf(&b, "\n")
+ c = 1
+ } else {
+ c++
+ }
+ }
+ if c > 1 {
+ fmt.Fprintf(&b, "\n")
+ }
+ fmt.Fprintf(&b, "},\n")
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ }
+
+ fns["genEntries"] = func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[][]StateID{\n")
+ for i, s := range lexSpec.Specs {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "nil,\n")
+ continue
+ }
+
+ c := 1
+ fmt.Fprintf(&b, "{\n")
+ for _, v := range s.DFA.Transition.UniqueEntries.Entries {
+ fmt.Fprintf(&b, "%v,", v)
+
+ if c == 20 {
+ fmt.Fprintf(&b, "\n")
+ c = 1
+ } else {
+ c++
+ }
+ }
+ if c > 1 {
+ fmt.Fprintf(&b, "\n")
+ }
+ fmt.Fprintf(&b, "},\n")
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ }
+
+ fns["genOriginalColCounts"] = func() string {
+ return "nil"
+ }
+ case 1:
+ fns["genRowNums"] = func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[][]int{\n")
+ for i, s := range lexSpec.Specs {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "nil,\n")
+ continue
+ }
+
+ c := 1
+ fmt.Fprintf(&b, "{\n")
+ for _, v := range s.DFA.Transition.RowNums {
+ fmt.Fprintf(&b, "%v,", v)
+
+ if c == 20 {
+ fmt.Fprintf(&b, "\n")
+ c = 1
+ } else {
+ c++
+ }
+ }
+ if c > 1 {
+ fmt.Fprintf(&b, "\n")
+ }
+ fmt.Fprintf(&b, "},\n")
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ }
+
+ fns["genRowDisplacements"] = func() string {
+ return "nil"
+ }
+
+ fns["genBounds"] = func() string {
+ return "nil"
+ }
+
+ fns["genEntries"] = func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[][]StateID{\n")
+ for i, s := range lexSpec.Specs {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "nil,\n")
+ continue
+ }
+
+ c := 1
+ fmt.Fprintf(&b, "{\n")
+ for _, v := range s.DFA.Transition.UncompressedUniqueEntries {
+ fmt.Fprintf(&b, "%v,", v)
+
+ if c == 20 {
+ fmt.Fprintf(&b, "\n")
+ c = 1
+ } else {
+ c++
+ }
+ }
+ if c > 1 {
+ fmt.Fprintf(&b, "\n")
+ }
+ fmt.Fprintf(&b, "},\n")
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ }
+
+ fns["genOriginalColCounts"] = func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[]int{\n")
+ for i, s := range lexSpec.Specs {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "0,\n")
+ continue
+ }
+
+ fmt.Fprintf(&b, "%v,\n", s.DFA.Transition.OriginalColCount)
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ }
+ default:
+ fns["genRowNums"] = func() string {
+ return "nil"
+ }
+
+ fns["genRowDisplacements"] = func() string {
+ return "nil"
+ }
+
+ fns["genBounds"] = func() string {
+ return "nil"
+ }
+
+ fns["genEntries"] = func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[][]StateID{\n")
+ for i, s := range lexSpec.Specs {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "nil,\n")
+ continue
+ }
+
+ c := 1
+ fmt.Fprintf(&b, "{\n")
+ for _, v := range s.DFA.UncompressedTransition {
+ fmt.Fprintf(&b, "%v,", v)
+
+ if c == 20 {
+ fmt.Fprintf(&b, "\n")
+ c = 1
+ } else {
+ c++
+ }
+ }
+ if c > 1 {
+ fmt.Fprintf(&b, "\n")
+ }
+ fmt.Fprintf(&b, "},\n")
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ }
+
+ fns["genOriginalColCounts"] = func() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "[]int{\n")
+ for i, s := range lexSpec.Specs {
+ if i == spec.LexModeIDNil.Int() {
+ fmt.Fprintf(&b, "0,\n")
+ continue
+ }
+
+ fmt.Fprintf(&b, "%v,\n", s.DFA.ColCount)
+ }
+ fmt.Fprintf(&b, "}")
+ return b.String()
+ }
+ }
+
+ return fns
+}
diff --git a/driver/conflict_test.go b/driver/parser/conflict_test.go
index 3b0c5fb..21b829a 100644
--- a/driver/conflict_test.go
+++ b/driver/parser/conflict_test.go
@@ -1,11 +1,11 @@
-package driver
+package parser
import (
"strings"
"testing"
"github.com/nihei9/vartan/grammar"
- spec "github.com/nihei9/vartan/spec/grammar"
+ "github.com/nihei9/vartan/spec/grammar/parser"
)
func TestParserWithConflicts(t *testing.T) {
@@ -486,7 +486,7 @@ assign: '=';
for _, tt := range tests {
t.Run(tt.caption, func(t *testing.T) {
- ast, err := spec.Parse(strings.NewReader(tt.specSrc))
+ ast, err := parser.Parse(strings.NewReader(tt.specSrc))
if err != nil {
t.Fatal(err)
}
@@ -494,12 +494,7 @@ assign: '=';
b := grammar.GrammarBuilder{
AST: ast,
}
- g, err := b.Build()
- if err != nil {
- t.Fatal(err)
- }
-
- cg, _, err := grammar.Compile(g)
+ cg, _, err := b.Build()
if err != nil {
t.Fatal(err)
}
diff --git a/driver/lac_test.go b/driver/parser/lac_test.go
index 2274583..14bd2cf 100644
--- a/driver/lac_test.go
+++ b/driver/parser/lac_test.go
@@ -1,11 +1,11 @@
-package driver
+package parser
import (
"strings"
"testing"
"github.com/nihei9/vartan/grammar"
- spec "github.com/nihei9/vartan/spec/grammar"
+ "github.com/nihei9/vartan/spec/grammar/parser"
)
func TestParserWithLAC(t *testing.T) {
@@ -43,7 +43,7 @@ d: 'd';
"miss",
}
- ast, err := spec.Parse(strings.NewReader(specSrc))
+ ast, err := parser.Parse(strings.NewReader(specSrc))
if err != nil {
t.Fatal(err)
}
@@ -51,12 +51,7 @@ d: 'd';
b := grammar.GrammarBuilder{
AST: ast,
}
- g, err := b.Build()
- if err != nil {
- t.Fatal(err)
- }
-
- gram, _, err := grammar.Compile(g)
+ gram, _, err := b.Build()
if err != nil {
t.Fatal(err)
}
diff --git a/driver/parser.go b/driver/parser/parser.go
index a152c3d..05f7d38 100644
--- a/driver/parser.go
+++ b/driver/parser/parser.go
@@ -1,4 +1,4 @@
-package driver
+package parser
import (
"fmt"
diff --git a/driver/parser_test.go b/driver/parser/parser_test.go
index da4f714..b1b9e4f 100644
--- a/driver/parser_test.go
+++ b/driver/parser/parser_test.go
@@ -1,4 +1,4 @@
-package driver
+package parser
import (
"fmt"
@@ -6,7 +6,7 @@ import (
"testing"
"github.com/nihei9/vartan/grammar"
- spec "github.com/nihei9/vartan/spec/grammar"
+ "github.com/nihei9/vartan/spec/grammar/parser"
)
func termNode(kind string, text string, children ...*Node) *Node {
@@ -765,7 +765,7 @@ bar: 'bar';
for i, tt := range tests {
t.Run(fmt.Sprintf("#%v", i), func(t *testing.T) {
- ast, err := spec.Parse(strings.NewReader(tt.specSrc))
+ ast, err := parser.Parse(strings.NewReader(tt.specSrc))
if err != nil {
t.Fatal(err)
}
@@ -773,12 +773,7 @@ bar: 'bar';
b := grammar.GrammarBuilder{
AST: ast,
}
- g, err := b.Build()
- if err != nil {
- t.Fatal(err)
- }
-
- cg, _, err := grammar.Compile(g)
+ cg, _, err := b.Build()
if err != nil {
t.Fatal(err)
}
diff --git a/driver/semantic_action.go b/driver/parser/semantic_action.go
index 7e5a773..f709d4f 100644
--- a/driver/semantic_action.go
+++ b/driver/parser/semantic_action.go
@@ -1,4 +1,4 @@
-package driver
+package parser
import (
"encoding/json"
diff --git a/driver/semantic_action_test.go b/driver/parser/semantic_action_test.go
index 3d5f711..c98a12f 100644
--- a/driver/semantic_action_test.go
+++ b/driver/parser/semantic_action_test.go
@@ -1,4 +1,4 @@
-package driver
+package parser
import (
"fmt"
@@ -7,6 +7,7 @@ import (
"github.com/nihei9/vartan/grammar"
spec "github.com/nihei9/vartan/spec/grammar"
+ "github.com/nihei9/vartan/spec/grammar/parser"
)
type testSemAct struct {
@@ -15,7 +16,7 @@ type testSemAct struct {
}
func (a *testSemAct) Shift(tok VToken, recovered bool) {
- t := a.gram.ParsingTable.Terminals[tok.TerminalID()]
+ t := a.gram.Syntactic.Terminals[tok.TerminalID()]
if recovered {
a.actLog = append(a.actLog, fmt.Sprintf("shift/%v/recovered", t))
} else {
@@ -24,8 +25,8 @@ func (a *testSemAct) Shift(tok VToken, recovered bool) {
}
func (a *testSemAct) Reduce(prodNum int, recovered bool) {
- lhsSym := a.gram.ParsingTable.LHSSymbols[prodNum]
- lhsText := a.gram.ParsingTable.NonTerminals[lhsSym]
+ lhsSym := a.gram.Syntactic.LHSSymbols[prodNum]
+ lhsText := a.gram.Syntactic.NonTerminals[lhsSym]
if recovered {
a.actLog = append(a.actLog, fmt.Sprintf("reduce/%v/recovered", lhsText))
} else {
@@ -181,7 +182,7 @@ char
}
for _, tt := range tests {
t.Run(tt.caption, func(t *testing.T) {
- ast, err := spec.Parse(strings.NewReader(tt.specSrc))
+ ast, err := parser.Parse(strings.NewReader(tt.specSrc))
if err != nil {
t.Fatal(err)
}
@@ -189,12 +190,7 @@ char
b := grammar.GrammarBuilder{
AST: ast,
}
- g, err := b.Build()
- if err != nil {
- t.Fatal(err)
- }
-
- gram, _, err := grammar.Compile(g)
+ gram, _, err := b.Build()
if err != nil {
t.Fatal(err)
}
diff --git a/driver/spec.go b/driver/parser/spec.go
index cf3c7b0..1d57bae 100644
--- a/driver/spec.go
+++ b/driver/parser/spec.go
@@ -1,4 +1,4 @@
-package driver
+package parser
import spec "github.com/nihei9/vartan/spec/grammar"
@@ -13,59 +13,59 @@ func NewGrammar(g *spec.CompiledGrammar) *grammarImpl {
}
func (g *grammarImpl) InitialState() int {
- return g.g.ParsingTable.InitialState
+ return g.g.Syntactic.InitialState
}
func (g *grammarImpl) StartProduction() int {
- return g.g.ParsingTable.StartProduction
+ return g.g.Syntactic.StartProduction
}
func (g *grammarImpl) RecoverProduction(prod int) bool {
- return g.g.ParsingTable.RecoverProductions[prod] != 0
+ return g.g.Syntactic.RecoverProductions[prod] != 0
}
func (g *grammarImpl) Action(state int, terminal int) int {
- return g.g.ParsingTable.Action[state*g.g.ParsingTable.TerminalCount+terminal]
+ return g.g.Syntactic.Action[state*g.g.Syntactic.TerminalCount+terminal]
}
func (g *grammarImpl) GoTo(state int, lhs int) int {
- return g.g.ParsingTable.GoTo[state*g.g.ParsingTable.NonTerminalCount+lhs]
+ return g.g.Syntactic.GoTo[state*g.g.Syntactic.NonTerminalCount+lhs]
}
func (g *grammarImpl) AlternativeSymbolCount(prod int) int {
- return g.g.ParsingTable.AlternativeSymbolCounts[prod]
+ return g.g.Syntactic.AlternativeSymbolCounts[prod]
}
func (g *grammarImpl) TerminalCount() int {
- return g.g.ParsingTable.TerminalCount
+ return g.g.Syntactic.TerminalCount
}
func (g *grammarImpl) SkipTerminal(terminal int) bool {
- return g.g.ParsingTable.TerminalSkip[terminal] == 1
+ return g.g.Syntactic.TerminalSkip[terminal] == 1
}
func (g *grammarImpl) ErrorTrapperState(state int) bool {
- return g.g.ParsingTable.ErrorTrapperStates[state] != 0
+ return g.g.Syntactic.ErrorTrapperStates[state] != 0
}
func (g *grammarImpl) NonTerminal(nonTerminal int) string {
- return g.g.ParsingTable.NonTerminals[nonTerminal]
+ return g.g.Syntactic.NonTerminals[nonTerminal]
}
func (g *grammarImpl) LHS(prod int) int {
- return g.g.ParsingTable.LHSSymbols[prod]
+ return g.g.Syntactic.LHSSymbols[prod]
}
func (g *grammarImpl) EOF() int {
- return g.g.ParsingTable.EOFSymbol
+ return g.g.Syntactic.EOFSymbol
}
func (g *grammarImpl) Error() int {
- return g.g.ParsingTable.ErrorSymbol
+ return g.g.Syntactic.ErrorSymbol
}
func (g *grammarImpl) Terminal(terminal int) string {
- return g.g.ParsingTable.Terminals[terminal]
+ return g.g.Syntactic.Terminals[terminal]
}
func (g *grammarImpl) ASTAction(prod int) []int {
diff --git a/driver/syntax_error_test.go b/driver/parser/syntax_error_test.go
index 683e355..71175be 100644
--- a/driver/syntax_error_test.go
+++ b/driver/parser/syntax_error_test.go
@@ -1,4 +1,4 @@
-package driver
+package parser
import (
"fmt"
@@ -7,7 +7,7 @@ import (
"testing"
"github.com/nihei9/vartan/grammar"
- spec "github.com/nihei9/vartan/spec/grammar"
+ "github.com/nihei9/vartan/spec/grammar/parser"
)
func TestParserWithSyntaxErrors(t *testing.T) {
@@ -123,7 +123,7 @@ c
}
for i, tt := range tests {
t.Run(fmt.Sprintf("#%v", i), func(t *testing.T) {
- ast, err := spec.Parse(strings.NewReader(tt.specSrc))
+ ast, err := parser.Parse(strings.NewReader(tt.specSrc))
if err != nil {
t.Fatal(err)
}
@@ -131,12 +131,7 @@ c
b := grammar.GrammarBuilder{
AST: ast,
}
- g, err := b.Build()
- if err != nil {
- t.Fatal(err)
- }
-
- gram, _, err := grammar.Compile(g)
+ gram, _, err := b.Build()
if err != nil {
t.Fatal(err)
}
@@ -253,7 +248,7 @@ foo
}
for i, tt := range tests {
t.Run(fmt.Sprintf("#%v", i), func(t *testing.T) {
- ast, err := spec.Parse(strings.NewReader(tt.specSrc))
+ ast, err := parser.Parse(strings.NewReader(tt.specSrc))
if err != nil {
t.Fatal(err)
}
@@ -261,12 +256,7 @@ foo
b := grammar.GrammarBuilder{
AST: ast,
}
- g, err := b.Build()
- if err != nil {
- t.Fatal(err)
- }
-
- gram, _, err := grammar.Compile(g)
+ gram, _, err := b.Build()
if err != nil {
t.Fatal(err)
}
diff --git a/driver/template.go b/driver/parser/template.go
index 321b2dd..96eb71f 100644
--- a/driver/template.go
+++ b/driver/parser/template.go
@@ -1,4 +1,4 @@
-package driver
+package parser
import (
"bytes"
@@ -49,12 +49,12 @@ func GenParser(cgram *spec.CompiledGrammar, pkgName string) ([]byte, error) {
var b strings.Builder
err = t.Execute(&b, map[string]interface{}{
- "initialState": cgram.ParsingTable.InitialState,
- "startProduction": cgram.ParsingTable.StartProduction,
- "terminalCount": cgram.ParsingTable.TerminalCount,
- "nonTerminalCount": cgram.ParsingTable.NonTerminalCount,
- "eofSymbol": cgram.ParsingTable.EOFSymbol,
- "errorSymbol": cgram.ParsingTable.ErrorSymbol,
+ "initialState": cgram.Syntactic.InitialState,
+ "startProduction": cgram.Syntactic.StartProduction,
+ "terminalCount": cgram.Syntactic.TerminalCount,
+ "nonTerminalCount": cgram.Syntactic.NonTerminalCount,
+ "eofSymbol": cgram.Syntactic.EOFSymbol,
+ "errorSymbol": cgram.Syntactic.ErrorSymbol,
})
if err != nil {
return nil, err
@@ -233,7 +233,7 @@ func genGrammarTemplateFuncs(cgram *spec.CompiledGrammar) template.FuncMap {
var b strings.Builder
fmt.Fprintf(&b, "[]int{\n")
c := 1
- for _, v := range cgram.ParsingTable.RecoverProductions {
+ for _, v := range cgram.Syntactic.RecoverProductions {
fmt.Fprintf(&b, "%v, ", v)
if c == 20 {
fmt.Fprintf(&b, "\n")
@@ -252,7 +252,7 @@ func genGrammarTemplateFuncs(cgram *spec.CompiledGrammar) template.FuncMap {
var b strings.Builder
fmt.Fprintf(&b, "[]int{\n")
c := 1
- for _, v := range cgram.ParsingTable.Action {
+ for _, v := range cgram.Syntactic.Action {
fmt.Fprintf(&b, "%v, ", v)
if c == 20 {
fmt.Fprintf(&b, "\n")
@@ -271,7 +271,7 @@ func genGrammarTemplateFuncs(cgram *spec.CompiledGrammar) template.FuncMap {
var b strings.Builder
fmt.Fprintf(&b, "[]int{\n")
c := 1
- for _, v := range cgram.ParsingTable.GoTo {
+ for _, v := range cgram.Syntactic.GoTo {
fmt.Fprintf(&b, "%v, ", v)
if c == 20 {
fmt.Fprintf(&b, "\n")
@@ -290,7 +290,7 @@ func genGrammarTemplateFuncs(cgram *spec.CompiledGrammar) template.FuncMap {
var b strings.Builder
fmt.Fprintf(&b, "[]int{\n")
c := 1
- for _, v := range cgram.ParsingTable.AlternativeSymbolCounts {
+ for _, v := range cgram.Syntactic.AlternativeSymbolCounts {
fmt.Fprintf(&b, "%v, ", v)
if c == 20 {
fmt.Fprintf(&b, "\n")
@@ -309,7 +309,7 @@ func genGrammarTemplateFuncs(cgram *spec.CompiledGrammar) template.FuncMap {
var b strings.Builder
fmt.Fprintf(&b, "[]int{\n")
c := 1
- for _, v := range cgram.ParsingTable.ErrorTrapperStates {
+ for _, v := range cgram.Syntactic.ErrorTrapperStates {
fmt.Fprintf(&b, "%v, ", v)
if c == 20 {
fmt.Fprintf(&b, "\n")
@@ -327,7 +327,7 @@ func genGrammarTemplateFuncs(cgram *spec.CompiledGrammar) template.FuncMap {
"genNonTerminals": func() string {
var b strings.Builder
fmt.Fprintf(&b, "[]string{\n")
- for _, v := range cgram.ParsingTable.NonTerminals {
+ for _, v := range cgram.Syntactic.NonTerminals {
fmt.Fprintf(&b, "%v,\n", strconv.Quote(v))
}
fmt.Fprintf(&b, "}")
@@ -337,7 +337,7 @@ func genGrammarTemplateFuncs(cgram *spec.CompiledGrammar) template.FuncMap {
var b strings.Builder
fmt.Fprintf(&b, "[]int{\n")
c := 1
- for _, v := range cgram.ParsingTable.LHSSymbols {
+ for _, v := range cgram.Syntactic.LHSSymbols {
fmt.Fprintf(&b, "%v, ", v)
if c == 20 {
fmt.Fprintf(&b, "\n")
@@ -355,7 +355,7 @@ func genGrammarTemplateFuncs(cgram *spec.CompiledGrammar) template.FuncMap {
"genTerminals": func() string {
var b strings.Builder
fmt.Fprintf(&b, "[]string{\n")
- for _, v := range cgram.ParsingTable.Terminals {
+ for _, v := range cgram.Syntactic.Terminals {
fmt.Fprintf(&b, "%v,\n", strconv.Quote(v))
}
fmt.Fprintf(&b, "}")
@@ -365,7 +365,7 @@ func genGrammarTemplateFuncs(cgram *spec.CompiledGrammar) template.FuncMap {
var b strings.Builder
fmt.Fprintf(&b, "[]int{\n")
c := 1
- for _, v := range cgram.ParsingTable.TerminalSkip {
+ for _, v := range cgram.Syntactic.TerminalSkip {
fmt.Fprintf(&b, "%v, ", v)
if c == 20 {
fmt.Fprintf(&b, "\n")
@@ -473,7 +473,7 @@ func genLexerTemplateFuncs(cgram *spec.CompiledGrammar) template.FuncMap {
var b strings.Builder
fmt.Fprintf(&b, "[]int{\n")
c := 1
- for _, v := range cgram.LexicalSpecification.Maleeni.KindToTerminal {
+ for _, v := range cgram.Syntactic.KindToTerminal {
fmt.Fprintf(&b, "%v, ", v)
if c == 20 {
fmt.Fprintf(&b, "\n")
diff --git a/driver/token_stream.go b/driver/parser/token_stream.go
index eaf56c6..0bc9e32 100644
--- a/driver/token_stream.go
+++ b/driver/parser/token_stream.go
@@ -1,15 +1,15 @@
-package driver
+package parser
import (
"io"
- mldriver "github.com/nihei9/maleeni/driver"
+ "github.com/nihei9/vartan/driver/lexer"
spec "github.com/nihei9/vartan/spec/grammar"
)
type vToken struct {
terminalID int
- tok *mldriver.Token
+ tok *lexer.Token
}
func (t *vToken) TerminalID() int {
@@ -33,19 +33,19 @@ func (t *vToken) Position() (int, int) {
}
type tokenStream struct {
- lex *mldriver.Lexer
+ lex *lexer.Lexer
kindToTerminal []int
}
func NewTokenStream(g *spec.CompiledGrammar, src io.Reader) (TokenStream, error) {
- lex, err := mldriver.NewLexer(mldriver.NewLexSpec(g.LexicalSpecification.Maleeni.Spec), src)
+ lex, err := lexer.NewLexer(lexer.NewLexSpec(g.Lexical), src)
if err != nil {
return nil, err
}
return &tokenStream{
lex: lex,
- kindToTerminal: g.LexicalSpecification.Maleeni.KindToTerminal,
+ kindToTerminal: g.Syntactic.KindToTerminal,
}, nil
}