aboutsummaryrefslogtreecommitdiff
path: root/driver
diff options
context:
space:
mode:
Diffstat (limited to 'driver')
-rw-r--r--driver/lexer/lexer.go335
-rw-r--r--driver/lexer/lexer_test.go932
-rw-r--r--driver/lexer/spec.go71
-rw-r--r--driver/lexer/template.go760
-rw-r--r--driver/parser/conflict_test.go524
-rw-r--r--driver/parser/lac_test.go120
-rw-r--r--driver/parser/parser.go416
-rw-r--r--driver/parser/parser_test.go833
-rw-r--r--driver/parser/semantic_action.go371
-rw-r--r--driver/parser/semantic_action_test.go227
-rw-r--r--driver/parser/spec.go73
-rw-r--r--driver/parser/syntax_error_test.go306
-rw-r--r--driver/parser/template.go535
-rw-r--r--driver/parser/token_stream.go65
14 files changed, 0 insertions, 5568 deletions
diff --git a/driver/lexer/lexer.go b/driver/lexer/lexer.go
deleted file mode 100644
index 3f9712e..0000000
--- a/driver/lexer/lexer.go
+++ /dev/null
@@ -1,335 +0,0 @@
-package lexer
-
-import (
- "fmt"
- "io"
-)
-
-type ModeID int
-
-func (id ModeID) Int() int {
- return int(id)
-}
-
-type StateID int
-
-func (id StateID) Int() int {
- return int(id)
-}
-
-type KindID int
-
-func (id KindID) Int() int {
- return int(id)
-}
-
-type ModeKindID int
-
-func (id ModeKindID) Int() int {
- return int(id)
-}
-
-type LexSpec interface {
- InitialMode() ModeID
- Pop(mode ModeID, modeKind ModeKindID) bool
- Push(mode ModeID, modeKind ModeKindID) (ModeID, bool)
- ModeName(mode ModeID) string
- InitialState(mode ModeID) StateID
- NextState(mode ModeID, state StateID, v int) (StateID, bool)
- Accept(mode ModeID, state StateID) (ModeKindID, bool)
- KindIDAndName(mode ModeID, modeKind ModeKindID) (KindID, string)
-}
-
-// Token representes a token.
-type Token struct {
- // ModeID is an ID of a lex mode.
- ModeID ModeID
-
- // KindID is an ID of a kind. This is unique among all modes.
- KindID KindID
-
- // ModeKindID is an ID of a lexical kind. This is unique only within a mode.
- // Note that you need to use KindID field if you want to identify a kind across all modes.
- ModeKindID ModeKindID
-
- // BytePos is a byte position where a token appears.
- BytePos int
-
- // ByteLen is a length of a token.
- ByteLen int
-
- // Row is a row number where a token appears.
- Row int
-
- // Col is a column number where a token appears.
- // Note that Col is counted in code points, not bytes.
- Col int
-
- // Lexeme is a byte sequence matched a pattern of a lexical specification.
- Lexeme []byte
-
- // When this field is true, it means the token is the EOF token.
- EOF bool
-
- // When this field is true, it means the token is an error token.
- Invalid bool
-}
-
-type LexerOption func(l *Lexer) error
-
-// DisableModeTransition disables the active mode transition. Thus, even if the lexical specification has the push and pop
-// operations, the lexer doesn't perform these operations. When the lexical specification has multiple modes, and this option is
-// enabled, you need to call the Lexer.Push and Lexer.Pop methods to perform the mode transition. You can use the Lexer.Mode method
-// to know the current lex mode.
-func DisableModeTransition() LexerOption {
- return func(l *Lexer) error {
- l.passiveModeTran = true
- return nil
- }
-}
-
-type lexerState struct {
- srcPtr int
- row int
- col int
-}
-
-type Lexer struct {
- spec LexSpec
- src []byte
- state lexerState
- lastAcceptedState lexerState
- tokBuf []*Token
- modeStack []ModeID
- passiveModeTran bool
-}
-
-// NewLexer returns a new lexer.
-func NewLexer(spec LexSpec, src io.Reader, opts ...LexerOption) (*Lexer, error) {
- b, err := io.ReadAll(src)
- if err != nil {
- return nil, err
- }
- l := &Lexer{
- spec: spec,
- src: b,
- state: lexerState{
- srcPtr: 0,
- row: 0,
- col: 0,
- },
- lastAcceptedState: lexerState{
- srcPtr: 0,
- row: 0,
- col: 0,
- },
- modeStack: []ModeID{
- spec.InitialMode(),
- },
- passiveModeTran: false,
- }
- for _, opt := range opts {
- err := opt(l)
- if err != nil {
- return nil, err
- }
- }
-
- return l, nil
-}
-
-// Next returns a next token.
-func (l *Lexer) Next() (*Token, error) {
- if len(l.tokBuf) > 0 {
- tok := l.tokBuf[0]
- l.tokBuf = l.tokBuf[1:]
- return tok, nil
- }
-
- tok, err := l.nextAndTransition()
- if err != nil {
- return nil, err
- }
- if !tok.Invalid {
- return tok, nil
- }
- errTok := tok
- for {
- tok, err = l.nextAndTransition()
- if err != nil {
- return nil, err
- }
- if !tok.Invalid {
- break
- }
- errTok.ByteLen += tok.ByteLen
- errTok.Lexeme = append(errTok.Lexeme, tok.Lexeme...)
- }
- l.tokBuf = append(l.tokBuf, tok)
-
- return errTok, nil
-}
-
-func (l *Lexer) nextAndTransition() (*Token, error) {
- tok, err := l.next()
- if err != nil {
- return nil, err
- }
- if tok.EOF || tok.Invalid {
- return tok, nil
- }
- if l.passiveModeTran {
- return tok, nil
- }
- mode := l.Mode()
- if l.spec.Pop(mode, tok.ModeKindID) {
- err := l.PopMode()
- if err != nil {
- return nil, err
- }
- }
- if mode, ok := l.spec.Push(mode, tok.ModeKindID); ok {
- l.PushMode(mode)
- }
- // The checking length of the mode stack must be at after pop and push operations because those operations can be performed
- // at the same time. When the mode stack has just one element and popped it, the mode stack will be temporarily emptied.
- // However, since a push operation may be performed immediately after it, the lexer allows the stack to be temporarily empty.
- if len(l.modeStack) == 0 {
- return nil, fmt.Errorf("a mode stack must have at least one element")
- }
- return tok, nil
-}
-
-func (l *Lexer) next() (*Token, error) {
- mode := l.Mode()
- state := l.spec.InitialState(mode)
- buf := []byte{}
- startPos := l.state.srcPtr
- row := l.state.row
- col := l.state.col
- var tok *Token
- for {
- v, eof := l.read()
- if eof {
- if tok != nil {
- l.revert()
- return tok, nil
- }
- // When `buf` has unaccepted data and reads the EOF, the lexer treats the buffered data as an invalid token.
- if len(buf) > 0 {
- return &Token{
- ModeID: mode,
- ModeKindID: 0,
- BytePos: startPos,
- ByteLen: l.state.srcPtr - startPos,
- Lexeme: buf,
- Row: row,
- Col: col,
- Invalid: true,
- }, nil
- }
- return &Token{
- ModeID: mode,
- ModeKindID: 0,
- BytePos: startPos,
- Row: row,
- Col: col,
- EOF: true,
- }, nil
- }
- buf = append(buf, v)
- nextState, ok := l.spec.NextState(mode, state, int(v))
- if !ok {
- if tok != nil {
- l.revert()
- return tok, nil
- }
- return &Token{
- ModeID: mode,
- ModeKindID: 0,
- BytePos: startPos,
- ByteLen: l.state.srcPtr - startPos,
- Lexeme: buf,
- Row: row,
- Col: col,
- Invalid: true,
- }, nil
- }
- state = nextState
- if modeKindID, ok := l.spec.Accept(mode, state); ok {
- kindID, _ := l.spec.KindIDAndName(mode, modeKindID)
- tok = &Token{
- ModeID: mode,
- KindID: kindID,
- ModeKindID: modeKindID,
- BytePos: startPos,
- ByteLen: l.state.srcPtr - startPos,
- Lexeme: buf,
- Row: row,
- Col: col,
- }
- l.accept()
- }
- }
-}
-
-// Mode returns the current lex mode.
-func (l *Lexer) Mode() ModeID {
- return l.modeStack[len(l.modeStack)-1]
-}
-
-// PushMode adds a lex mode onto the mode stack.
-func (l *Lexer) PushMode(mode ModeID) {
- l.modeStack = append(l.modeStack, mode)
-}
-
-// PopMode removes a lex mode from the top of the mode stack.
-func (l *Lexer) PopMode() error {
- sLen := len(l.modeStack)
- if sLen == 0 {
- return fmt.Errorf("cannot pop a lex mode from a lex mode stack any more")
- }
- l.modeStack = l.modeStack[:sLen-1]
- return nil
-}
-
-func (l *Lexer) read() (byte, bool) {
- if l.state.srcPtr >= len(l.src) {
- return 0, true
- }
-
- b := l.src[l.state.srcPtr]
- l.state.srcPtr++
-
- // Count the token positions.
- // The driver treats LF as the end of lines and counts columns in code points, not bytes.
- // To count in code points, we refer to the First Byte column in the Table 3-6.
- //
- // Reference:
- // - [Table 3-6] https://www.unicode.org/versions/Unicode13.0.0/ch03.pdf > Table 3-6. UTF-8 Bit Distribution
- if b < 128 {
- // 0x0A is LF.
- if b == 0x0A {
- l.state.row++
- l.state.col = 0
- } else {
- l.state.col++
- }
- } else if b>>5 == 6 || b>>4 == 14 || b>>3 == 30 {
- l.state.col++
- }
-
- return b, false
-}
-
-// accept saves the current state.
-func (l *Lexer) accept() {
- l.lastAcceptedState = l.state
-}
-
-// revert reverts the lexer state to the last accepted state.
-//
-// We must not call this function consecutively.
-func (l *Lexer) revert() {
- l.state = l.lastAcceptedState
-}
diff --git a/driver/lexer/lexer_test.go b/driver/lexer/lexer_test.go
deleted file mode 100644
index d32b087..0000000
--- a/driver/lexer/lexer_test.go
+++ /dev/null
@@ -1,932 +0,0 @@
-package lexer
-
-import (
- "bytes"
- "fmt"
- "strings"
- "testing"
-
- "grammar/lexical"
- spec "spec/grammar"
-)
-
-func newLexEntry(modes []string, kind string, pattern string, push string, pop bool) *lexical.LexEntry {
- ms := []spec.LexModeName{}
- for _, m := range modes {
- ms = append(ms, spec.LexModeName(m))
- }
- return &lexical.LexEntry{
- Kind: spec.LexKindName(kind),
- Pattern: pattern,
- Modes: ms,
- Push: spec.LexModeName(push),
- Pop: pop,
- }
-}
-
-func newLexEntryDefaultNOP(kind string, pattern string) *lexical.LexEntry {
- return &lexical.LexEntry{
- Kind: spec.LexKindName(kind),
- Pattern: pattern,
- Modes: []spec.LexModeName{
- spec.LexModeNameDefault,
- },
- }
-}
-
-func newLexEntryFragment(kind string, pattern string) *lexical.LexEntry {
- return &lexical.LexEntry{
- Kind: spec.LexKindName(kind),
- Pattern: pattern,
- Fragment: true,
- }
-}
-
-func newToken(modeID ModeID, kindID KindID, modeKindID ModeKindID, lexeme []byte) *Token {
- return &Token{
- ModeID: modeID,
- KindID: kindID,
- ModeKindID: modeKindID,
- Lexeme: lexeme,
- }
-}
-
-func newTokenDefault(kindID int, modeKindID int, lexeme []byte) *Token {
- return newToken(
- ModeID(spec.LexModeIDDefault.Int()),
- KindID(spec.LexKindID(kindID).Int()),
- ModeKindID(spec.LexModeKindID(modeKindID).Int()),
- lexeme,
- )
-}
-
-func newEOFToken(modeID ModeID, modeName string) *Token {
- return &Token{
- ModeID: modeID,
- ModeKindID: 0,
- EOF: true,
- }
-}
-
-func newEOFTokenDefault() *Token {
- return newEOFToken(ModeID(spec.LexModeIDDefault.Int()), spec.LexModeNameDefault.String())
-}
-
-func newInvalidTokenDefault(lexeme []byte) *Token {
- return &Token{
- ModeID: ModeID(spec.LexModeIDDefault.Int()),
- ModeKindID: 0,
- Lexeme: lexeme,
- Invalid: true,
- }
-}
-
-func withPos(tok *Token, bytePos int, byteLen int, row int, col int) *Token {
- tok.BytePos = bytePos
- tok.ByteLen = byteLen
- tok.Row = row
- tok.Col = col
- return tok
-}
-
-func TestLexer_Next(t *testing.T) {
- test := []struct {
- lspec *lexical.LexSpec
- src string
- tokens []*Token
- passiveModeTran bool
- tran func(l *Lexer, tok *Token) error
- }{
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("t1", "(a|b)*abb"),
- newLexEntryDefaultNOP("t2", " +"),
- },
- },
- src: "abb aabb aaabb babb bbabb abbbabb",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte("abb")), 0, 3, 0, 0),
- withPos(newTokenDefault(2, 2, []byte(" ")), 3, 1, 0, 3),
- withPos(newTokenDefault(1, 1, []byte("aabb")), 4, 4, 0, 4),
- withPos(newTokenDefault(2, 2, []byte(" ")), 8, 1, 0, 8),
- withPos(newTokenDefault(1, 1, []byte("aaabb")), 9, 5, 0, 9),
- withPos(newTokenDefault(2, 2, []byte(" ")), 14, 1, 0, 14),
- withPos(newTokenDefault(1, 1, []byte("babb")), 15, 4, 0, 15),
- withPos(newTokenDefault(2, 2, []byte(" ")), 19, 1, 0, 19),
- withPos(newTokenDefault(1, 1, []byte("bbabb")), 20, 5, 0, 20),
- withPos(newTokenDefault(2, 2, []byte(" ")), 25, 1, 0, 25),
- withPos(newTokenDefault(1, 1, []byte("abbbabb")), 26, 7, 0, 26),
- withPos(newEOFTokenDefault(), 33, 0, 0, 33),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("t1", "b?a+"),
- newLexEntryDefaultNOP("t2", "(ab)?(cd)+"),
- newLexEntryDefaultNOP("t3", " +"),
- },
- },
- src: "ba baaa a aaa abcd abcdcdcd cd cdcdcd",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte("ba")), 0, 2, 0, 0),
- withPos(newTokenDefault(3, 3, []byte(" ")), 2, 1, 0, 2),
- withPos(newTokenDefault(1, 1, []byte("baaa")), 3, 4, 0, 3),
- withPos(newTokenDefault(3, 3, []byte(" ")), 7, 1, 0, 7),
- withPos(newTokenDefault(1, 1, []byte("a")), 8, 1, 0, 8),
- withPos(newTokenDefault(3, 3, []byte(" ")), 9, 1, 0, 9),
- withPos(newTokenDefault(1, 1, []byte("aaa")), 10, 3, 0, 10),
- withPos(newTokenDefault(3, 3, []byte(" ")), 13, 1, 0, 13),
- withPos(newTokenDefault(2, 2, []byte("abcd")), 14, 4, 0, 14),
- withPos(newTokenDefault(3, 3, []byte(" ")), 18, 1, 0, 18),
- withPos(newTokenDefault(2, 2, []byte("abcdcdcd")), 19, 8, 0, 19),
- withPos(newTokenDefault(3, 3, []byte(" ")), 27, 1, 0, 27),
- withPos(newTokenDefault(2, 2, []byte("cd")), 28, 2, 0, 28),
- withPos(newTokenDefault(3, 3, []byte(" ")), 30, 1, 0, 30),
- withPos(newTokenDefault(2, 2, []byte("cdcdcd")), 31, 6, 0, 31),
- withPos(newEOFTokenDefault(), 37, 0, 0, 37),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("t1", "."),
- },
- },
- src: string([]byte{
- 0x00,
- 0x7f,
- 0xc2, 0x80,
- 0xdf, 0xbf,
- 0xe1, 0x80, 0x80,
- 0xec, 0xbf, 0xbf,
- 0xed, 0x80, 0x80,
- 0xed, 0x9f, 0xbf,
- 0xee, 0x80, 0x80,
- 0xef, 0xbf, 0xbf,
- 0xf0, 0x90, 0x80, 0x80,
- 0xf0, 0xbf, 0xbf, 0xbf,
- 0xf1, 0x80, 0x80, 0x80,
- 0xf3, 0xbf, 0xbf, 0xbf,
- 0xf4, 0x80, 0x80, 0x80,
- 0xf4, 0x8f, 0xbf, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0x00}), 0, 1, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0x7f}), 1, 1, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xc2, 0x80}), 2, 2, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xdf, 0xbf}), 4, 2, 0, 3),
- withPos(newTokenDefault(1, 1, []byte{0xe1, 0x80, 0x80}), 6, 3, 0, 4),
- withPos(newTokenDefault(1, 1, []byte{0xec, 0xbf, 0xbf}), 9, 3, 0, 5),
- withPos(newTokenDefault(1, 1, []byte{0xed, 0x80, 0x80}), 12, 3, 0, 6),
- withPos(newTokenDefault(1, 1, []byte{0xed, 0x9f, 0xbf}), 15, 3, 0, 7),
- withPos(newTokenDefault(1, 1, []byte{0xee, 0x80, 0x80}), 18, 3, 0, 8),
- withPos(newTokenDefault(1, 1, []byte{0xef, 0xbf, 0xbf}), 21, 3, 0, 9),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), 24, 4, 0, 10),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbf}), 28, 4, 0, 11),
- withPos(newTokenDefault(1, 1, []byte{0xf1, 0x80, 0x80, 0x80}), 32, 4, 0, 12),
- withPos(newTokenDefault(1, 1, []byte{0xf3, 0xbf, 0xbf, 0xbf}), 36, 4, 0, 13),
- withPos(newTokenDefault(1, 1, []byte{0xf4, 0x80, 0x80, 0x80}), 40, 4, 0, 14),
- withPos(newTokenDefault(1, 1, []byte{0xf4, 0x8f, 0xbf, 0xbf}), 44, 4, 0, 15),
- withPos(newEOFTokenDefault(), 48, 0, 0, 16),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("t1", "[ab.*+?|()[\\]]"),
- },
- },
- src: "ab.*+?|()[]",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte("a")), 0, 1, 0, 0),
- withPos(newTokenDefault(1, 1, []byte("b")), 1, 1, 0, 1),
- withPos(newTokenDefault(1, 1, []byte(".")), 2, 1, 0, 2),
- withPos(newTokenDefault(1, 1, []byte("*")), 3, 1, 0, 3),
- withPos(newTokenDefault(1, 1, []byte("+")), 4, 1, 0, 4),
- withPos(newTokenDefault(1, 1, []byte("?")), 5, 1, 0, 5),
- withPos(newTokenDefault(1, 1, []byte("|")), 6, 1, 0, 6),
- withPos(newTokenDefault(1, 1, []byte("(")), 7, 1, 0, 7),
- withPos(newTokenDefault(1, 1, []byte(")")), 8, 1, 0, 8),
- withPos(newTokenDefault(1, 1, []byte("[")), 9, 1, 0, 9),
- withPos(newTokenDefault(1, 1, []byte("]")), 10, 1, 0, 10),
- withPos(newEOFTokenDefault(), 11, 0, 0, 11),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // all 1 byte characters except null character (U+0000)
- //
- // NOTE:
- // vartan cannot handle the null character in patterns because lexical.lexer,
- // specifically read() and restore(), recognizes the null characters as that a symbol doesn't exist.
- // If a pattern needs a null character, use code point expression \u{0000}.
- newLexEntryDefaultNOP("char_1_byte", "[\x01-\x7f]"),
- },
- },
- src: string([]byte{
- 0x01,
- 0x02,
- 0x7e,
- 0x7f,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0x01}), 0, 1, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0x02}), 1, 1, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0x7e}), 2, 1, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0x7f}), 3, 1, 0, 3),
- withPos(newEOFTokenDefault(), 4, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // all 2 byte characters
- newLexEntryDefaultNOP("char_2_byte", "[\xc2\x80-\xdf\xbf]"),
- },
- },
- src: string([]byte{
- 0xc2, 0x80,
- 0xc2, 0x81,
- 0xdf, 0xbe,
- 0xdf, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xc2, 0x80}), 0, 2, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xc2, 0x81}), 2, 2, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xdf, 0xbe}), 4, 2, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xdf, 0xbf}), 6, 2, 0, 3),
- withPos(newEOFTokenDefault(), 8, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // All bytes are the same.
- newLexEntryDefaultNOP("char_3_byte", "[\xe0\xa0\x80-\xe0\xa0\x80]"),
- },
- },
- src: string([]byte{
- 0xe0, 0xa0, 0x80,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}), 0, 3, 0, 0),
- withPos(newEOFTokenDefault(), 3, 0, 0, 1),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // The first two bytes are the same.
- newLexEntryDefaultNOP("char_3_byte", "[\xe0\xa0\x80-\xe0\xa0\xbf]"),
- },
- },
- src: string([]byte{
- 0xe0, 0xa0, 0x80,
- 0xe0, 0xa0, 0x81,
- 0xe0, 0xa0, 0xbe,
- 0xe0, 0xa0, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}), 0, 3, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x81}), 3, 3, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0xbe}), 6, 3, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0xbf}), 9, 3, 0, 3),
- withPos(newEOFTokenDefault(), 12, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // The first byte are the same.
- newLexEntryDefaultNOP("char_3_byte", "[\xe0\xa0\x80-\xe0\xbf\xbf]"),
- },
- },
- src: string([]byte{
- 0xe0, 0xa0, 0x80,
- 0xe0, 0xa0, 0x81,
- 0xe0, 0xbf, 0xbe,
- 0xe0, 0xbf, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}), 0, 3, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x81}), 3, 3, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbe}), 6, 3, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbf}), 9, 3, 0, 3),
- withPos(newEOFTokenDefault(), 12, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // all 3 byte characters
- newLexEntryDefaultNOP("char_3_byte", "[\xe0\xa0\x80-\xef\xbf\xbf]"),
- },
- },
- src: string([]byte{
- 0xe0, 0xa0, 0x80,
- 0xe0, 0xa0, 0x81,
- 0xe0, 0xbf, 0xbe,
- 0xe0, 0xbf, 0xbf,
- 0xe1, 0x80, 0x80,
- 0xe1, 0x80, 0x81,
- 0xec, 0xbf, 0xbe,
- 0xec, 0xbf, 0xbf,
- 0xed, 0x80, 0x80,
- 0xed, 0x80, 0x81,
- 0xed, 0x9f, 0xbe,
- 0xed, 0x9f, 0xbf,
- 0xee, 0x80, 0x80,
- 0xee, 0x80, 0x81,
- 0xef, 0xbf, 0xbe,
- 0xef, 0xbf, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}), 0, 3, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x81}), 3, 3, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbe}), 6, 3, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbf}), 9, 3, 0, 3),
- withPos(newTokenDefault(1, 1, []byte{0xe1, 0x80, 0x80}), 12, 3, 0, 4),
- withPos(newTokenDefault(1, 1, []byte{0xe1, 0x80, 0x81}), 15, 3, 0, 5),
- withPos(newTokenDefault(1, 1, []byte{0xec, 0xbf, 0xbe}), 18, 3, 0, 6),
- withPos(newTokenDefault(1, 1, []byte{0xec, 0xbf, 0xbf}), 21, 3, 0, 7),
- withPos(newTokenDefault(1, 1, []byte{0xed, 0x80, 0x80}), 24, 3, 0, 8),
- withPos(newTokenDefault(1, 1, []byte{0xed, 0x80, 0x81}), 27, 3, 0, 9),
- withPos(newTokenDefault(1, 1, []byte{0xed, 0x9f, 0xbe}), 30, 3, 0, 10),
- withPos(newTokenDefault(1, 1, []byte{0xed, 0x9f, 0xbf}), 33, 3, 0, 11),
- withPos(newTokenDefault(1, 1, []byte{0xee, 0x80, 0x80}), 36, 3, 0, 12),
- withPos(newTokenDefault(1, 1, []byte{0xee, 0x80, 0x81}), 39, 3, 0, 13),
- withPos(newTokenDefault(1, 1, []byte{0xef, 0xbf, 0xbe}), 42, 3, 0, 14),
- withPos(newTokenDefault(1, 1, []byte{0xef, 0xbf, 0xbf}), 45, 3, 0, 15),
- withPos(newEOFTokenDefault(), 48, 0, 0, 16),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // All bytes are the same.
- newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf0\x90\x80\x80]"),
- },
- },
- src: string([]byte{
- 0xf0, 0x90, 0x80, 0x80,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), 0, 4, 0, 0),
- withPos(newEOFTokenDefault(), 4, 0, 0, 1),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // The first 3 bytes are the same.
- newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf0\x90\x80\xbf]"),
- },
- },
- src: string([]byte{
- 0xf0, 0x90, 0x80, 0x80,
- 0xf0, 0x90, 0x80, 0x81,
- 0xf0, 0x90, 0x80, 0xbe,
- 0xf0, 0x90, 0x80, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), 0, 4, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}), 4, 4, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0xbe}), 8, 4, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0xbf}), 12, 4, 0, 3),
- withPos(newEOFTokenDefault(), 16, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // The first 2 bytes are the same.
- newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf0\x90\xbf\xbf]"),
- },
- },
- src: string([]byte{
- 0xf0, 0x90, 0x80, 0x80,
- 0xf0, 0x90, 0x80, 0x81,
- 0xf0, 0x90, 0xbf, 0xbe,
- 0xf0, 0x90, 0xbf, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), 0, 4, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}), 4, 4, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0xbf, 0xbe}), 8, 4, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0xbf, 0xbf}), 12, 4, 0, 3),
- withPos(newEOFTokenDefault(), 16, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // The first byte are the same.
- newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf0\xbf\xbf\xbf]"),
- },
- },
- src: string([]byte{
- 0xf0, 0x90, 0x80, 0x80,
- 0xf0, 0x90, 0x80, 0x81,
- 0xf0, 0xbf, 0xbf, 0xbe,
- 0xf0, 0xbf, 0xbf, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), 0, 4, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}), 4, 4, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbe}), 8, 4, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbf}), 12, 4, 0, 3),
- withPos(newEOFTokenDefault(), 16, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // all 4 byte characters
- newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf4\x8f\xbf\xbf]"),
- },
- },
- src: string([]byte{
- 0xf0, 0x90, 0x80, 0x80,
- 0xf0, 0x90, 0x80, 0x81,
- 0xf0, 0xbf, 0xbf, 0xbe,
- 0xf0, 0xbf, 0xbf, 0xbf,
- 0xf1, 0x80, 0x80, 0x80,
- 0xf1, 0x80, 0x80, 0x81,
- 0xf3, 0xbf, 0xbf, 0xbe,
- 0xf3, 0xbf, 0xbf, 0xbf,
- 0xf4, 0x80, 0x80, 0x80,
- 0xf4, 0x80, 0x80, 0x81,
- 0xf4, 0x8f, 0xbf, 0xbe,
- 0xf4, 0x8f, 0xbf, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), 0, 4, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}), 4, 4, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbe}), 8, 4, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbf}), 12, 4, 0, 3),
- withPos(newTokenDefault(1, 1, []byte{0xf1, 0x80, 0x80, 0x80}), 16, 4, 0, 4),
- withPos(newTokenDefault(1, 1, []byte{0xf1, 0x80, 0x80, 0x81}), 20, 4, 0, 5),
- withPos(newTokenDefault(1, 1, []byte{0xf3, 0xbf, 0xbf, 0xbe}), 24, 4, 0, 6),
- withPos(newTokenDefault(1, 1, []byte{0xf3, 0xbf, 0xbf, 0xbf}), 28, 4, 0, 7),
- withPos(newTokenDefault(1, 1, []byte{0xf4, 0x80, 0x80, 0x80}), 32, 4, 0, 8),
- withPos(newTokenDefault(1, 1, []byte{0xf4, 0x80, 0x80, 0x81}), 36, 4, 0, 9),
- withPos(newTokenDefault(1, 1, []byte{0xf4, 0x8f, 0xbf, 0xbe}), 40, 4, 0, 10),
- withPos(newTokenDefault(1, 1, []byte{0xf4, 0x8f, 0xbf, 0xbf}), 44, 4, 0, 11),
- withPos(newEOFTokenDefault(), 48, 0, 0, 12),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("non_number", "[^0-9]+[0-9]"),
- },
- },
- src: "foo9",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte("foo9")), 0, 4, 0, 0),
- withPos(newEOFTokenDefault(), 4, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("char_1_byte", "\\u{006E}"),
- newLexEntryDefaultNOP("char_2_byte", "\\u{03BD}"),
- newLexEntryDefaultNOP("char_3_byte", "\\u{306B}"),
- newLexEntryDefaultNOP("char_4_byte", "\\u{01F638}"),
- },
- },
- src: "nνに😸",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0x6E}), 0, 1, 0, 0),
- withPos(newTokenDefault(2, 2, []byte{0xCE, 0xBD}), 1, 2, 0, 1),
- withPos(newTokenDefault(3, 3, []byte{0xE3, 0x81, 0xAB}), 3, 3, 0, 2),
- withPos(newTokenDefault(4, 4, []byte{0xF0, 0x9F, 0x98, 0xB8}), 6, 4, 0, 3),
- withPos(newEOFTokenDefault(), 10, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("code_points_alt", "[\\u{006E}\\u{03BD}\\u{306B}\\u{01F638}]"),
- },
- },
- src: "nνに😸",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0x6E}), 0, 1, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xCE, 0xBD}), 1, 2, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xE3, 0x81, 0xAB}), 3, 3, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xF0, 0x9F, 0x98, 0xB8}), 6, 4, 0, 3),
- withPos(newEOFTokenDefault(), 10, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("t1", "\\f{a2c}\\f{d2f}+"),
- newLexEntryFragment("a2c", "abc"),
- newLexEntryFragment("d2f", "def"),
- },
- },
- src: "abcdefdefabcdef",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte("abcdefdef")), 0, 9, 0, 0),
- withPos(newTokenDefault(1, 1, []byte("abcdef")), 9, 6, 0, 9),
- withPos(newEOFTokenDefault(), 15, 0, 0, 15),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("t1", "(\\f{a2c}|\\f{d2f})+"),
- newLexEntryFragment("a2c", "abc"),
- newLexEntryFragment("d2f", "def"),
- },
- },
- src: "abcdefdefabc",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte("abcdefdefabc")), 0, 12, 0, 0),
- withPos(newEOFTokenDefault(), 12, 0, 0, 12),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("t1", "\\f{a2c_or_d2f}+"),
- newLexEntryFragment("a2c_or_d2f", "\\f{a2c}|\\f{d2f}"),
- newLexEntryFragment("a2c", "abc"),
- newLexEntryFragment("d2f", "def"),
- },
- },
- src: "abcdefdefabc",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte("abcdefdefabc")), 0, 12, 0, 0),
- withPos(newEOFTokenDefault(), 12, 0, 0, 12),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("white_space", ` *`),
- newLexEntry([]string{"default"}, "string_open", `"`, "string", false),
- newLexEntry([]string{"string"}, "escape_sequence", `\\[n"\\]`, "", false),
- newLexEntry([]string{"string"}, "char_sequence", `[^"\\]*`, "", false),
- newLexEntry([]string{"string"}, "string_close", `"`, "", true),
- },
- },
- src: `"" "Hello world.\n\"Hello world.\""`,
- tokens: []*Token{
- withPos(newToken(1, 2, 2, []byte(`"`)), 0, 1, 0, 0),
- withPos(newToken(2, 5, 3, []byte(`"`)), 1, 1, 0, 1),
- withPos(newToken(1, 1, 1, []byte(` `)), 2, 1, 0, 2),
- withPos(newToken(1, 2, 2, []byte(`"`)), 3, 1, 0, 3),
- withPos(newToken(2, 4, 2, []byte(`Hello world.`)), 4, 12, 0, 4),
- withPos(newToken(2, 3, 1, []byte(`\n`)), 16, 2, 0, 16),
- withPos(newToken(2, 3, 1, []byte(`\"`)), 18, 2, 0, 18),
- withPos(newToken(2, 4, 2, []byte(`Hello world.`)), 20, 12, 0, 20),
- withPos(newToken(2, 3, 1, []byte(`\"`)), 32, 2, 0, 32),
- withPos(newToken(2, 5, 3, []byte(`"`)), 34, 1, 0, 34),
- withPos(newEOFTokenDefault(), 35, 0, 0, 35),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // `white_space` is enabled in multiple modes.
- newLexEntry([]string{"default", "state_a", "state_b"}, "white_space", ` *`, "", false),
- newLexEntry([]string{"default"}, "char_a", `a`, "state_a", false),
- newLexEntry([]string{"state_a"}, "char_b", `b`, "state_b", false),
- newLexEntry([]string{"state_a"}, "back_from_a", `<`, "", true),
- newLexEntry([]string{"state_b"}, "back_from_b", `<`, "", true),
- },
- },
- src: ` a b < < `,
- tokens: []*Token{
- withPos(newToken(1, 1, 1, []byte(` `)), 0, 1, 0, 0),
- withPos(newToken(1, 2, 2, []byte(`a`)), 1, 1, 0, 1),
- withPos(newToken(2, 1, 1, []byte(` `)), 2, 1, 0, 2),
- withPos(newToken(2, 3, 2, []byte(`b`)), 3, 1, 0, 3),
- withPos(newToken(3, 1, 1, []byte(` `)), 4, 1, 0, 4),
- withPos(newToken(3, 5, 2, []byte(`<`)), 5, 1, 0, 5),
- withPos(newToken(2, 1, 1, []byte(` `)), 6, 1, 0, 6),
- withPos(newToken(2, 4, 3, []byte(`<`)), 7, 1, 0, 7),
- withPos(newToken(1, 1, 1, []byte(` `)), 8, 1, 0, 8),
- withPos(newEOFTokenDefault(), 9, 0, 0, 9),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntry([]string{"default", "mode_1", "mode_2"}, "white_space", ` *`, "", false),
- newLexEntry([]string{"default"}, "char", `.`, "", false),
- newLexEntry([]string{"default"}, "push_1", `-> 1`, "", false),
- newLexEntry([]string{"mode_1"}, "push_2", `-> 2`, "", false),
- newLexEntry([]string{"mode_1"}, "pop_1", `<-`, "", false),
- newLexEntry([]string{"mode_2"}, "pop_2", `<-`, "", false),
- },
- },
- src: `-> 1 -> 2 <- <- a`,
- tokens: []*Token{
- withPos(newToken(1, 3, 3, []byte(`-> 1`)), 0, 4, 0, 0),
- withPos(newToken(2, 1, 1, []byte(` `)), 4, 1, 0, 4),
- withPos(newToken(2, 4, 2, []byte(`-> 2`)), 5, 4, 0, 5),
- withPos(newToken(3, 1, 1, []byte(` `)), 9, 1, 0, 9),
- withPos(newToken(3, 6, 2, []byte(`<-`)), 10, 2, 0, 10),
- withPos(newToken(2, 1, 1, []byte(` `)), 12, 1, 0, 12),
- withPos(newToken(2, 5, 3, []byte(`<-`)), 13, 2, 0, 13),
- withPos(newToken(1, 1, 1, []byte(` `)), 15, 1, 0, 15),
- withPos(newToken(1, 2, 2, []byte(`a`)), 16, 1, 0, 16),
- withPos(newEOFTokenDefault(), 17, 0, 0, 17),
- },
- passiveModeTran: true,
- tran: func(l *Lexer, tok *Token) error {
- switch l.spec.ModeName(l.Mode()) {
- case "default":
- switch tok.KindID {
- case 3: // push_1
- l.PushMode(2)
- }
- case "mode_1":
- switch tok.KindID {
- case 4: // push_2
- l.PushMode(3)
- case 5: // pop_1
- return l.PopMode()
- }
- case "mode_2":
- switch tok.KindID {
- case 6: // pop_2
- return l.PopMode()
- }
- }
- return nil
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntry([]string{"default", "mode_1", "mode_2"}, "white_space", ` *`, "", false),
- newLexEntry([]string{"default"}, "char", `.`, "", false),
- newLexEntry([]string{"default"}, "push_1", `-> 1`, "mode_1", false),
- newLexEntry([]string{"mode_1"}, "push_2", `-> 2`, "", false),
- newLexEntry([]string{"mode_1"}, "pop_1", `<-`, "", false),
- newLexEntry([]string{"mode_2"}, "pop_2", `<-`, "", true),
- },
- },
- src: `-> 1 -> 2 <- <- a`,
- tokens: []*Token{
- withPos(newToken(1, 3, 3, []byte(`-> 1`)), 0, 4, 0, 0),
- withPos(newToken(2, 1, 1, []byte(` `)), 4, 1, 0, 4),
- withPos(newToken(2, 4, 2, []byte(`-> 2`)), 5, 4, 0, 5),
- withPos(newToken(3, 1, 1, []byte(` `)), 9, 1, 0, 9),
- withPos(newToken(3, 6, 2, []byte(`<-`)), 10, 2, 0, 10),
- withPos(newToken(2, 1, 1, []byte(` `)), 12, 1, 0, 12),
- withPos(newToken(2, 5, 3, []byte(`<-`)), 13, 2, 0, 13),
- withPos(newToken(1, 1, 1, []byte(` `)), 15, 1, 0, 15),
- withPos(newToken(1, 2, 2, []byte(`a`)), 16, 1, 0, 16),
- withPos(newEOFTokenDefault(), 17, 0, 0, 17),
- },
- // Active mode transition and an external transition function can be used together.
- passiveModeTran: false,
- tran: func(l *Lexer, tok *Token) error {
- switch l.spec.ModeName(l.Mode()) {
- case "mode_1":
- switch tok.KindID {
- case 4: // push_2
- l.PushMode(3)
- case 5: // pop_1
- return l.PopMode()
- }
- }
- return nil
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("dot", spec.EscapePattern(`.`)),
- newLexEntryDefaultNOP("star", spec.EscapePattern(`*`)),
- newLexEntryDefaultNOP("plus", spec.EscapePattern(`+`)),
- newLexEntryDefaultNOP("question", spec.EscapePattern(`?`)),
- newLexEntryDefaultNOP("vbar", spec.EscapePattern(`|`)),
- newLexEntryDefaultNOP("lparen", spec.EscapePattern(`(`)),
- newLexEntryDefaultNOP("rparen", spec.EscapePattern(`)`)),
- newLexEntryDefaultNOP("lbrace", spec.EscapePattern(`[`)),
- newLexEntryDefaultNOP("backslash", spec.EscapePattern(`\`)),
- },
- },
- src: `.*+?|()[\`,
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte(`.`)), 0, 1, 0, 0),
- withPos(newTokenDefault(2, 2, []byte(`*`)), 1, 1, 0, 1),
- withPos(newTokenDefault(3, 3, []byte(`+`)), 2, 1, 0, 2),
- withPos(newTokenDefault(4, 4, []byte(`?`)), 3, 1, 0, 3),
- withPos(newTokenDefault(5, 5, []byte(`|`)), 4, 1, 0, 4),
- withPos(newTokenDefault(6, 6, []byte(`(`)), 5, 1, 0, 5),
- withPos(newTokenDefault(7, 7, []byte(`)`)), 6, 1, 0, 6),
- withPos(newTokenDefault(8, 8, []byte(`[`)), 7, 1, 0, 7),
- withPos(newTokenDefault(9, 9, []byte(`\`)), 8, 1, 0, 8),
- withPos(newEOFTokenDefault(), 9, 0, 0, 9),
- },
- },
- // Character properties are available in a bracket expression.
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("letter", `[\p{Letter}]+`),
- newLexEntryDefaultNOP("non_letter", `[^\p{Letter}]+`),
- },
- },
- src: `foo123`,
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte(`foo`)), 0, 3, 0, 0),
- withPos(newTokenDefault(2, 2, []byte(`123`)), 3, 3, 0, 3),
- withPos(newEOFTokenDefault(), 6, 0, 0, 6),
- },
- },
- // The driver can continue lexical analysis even after it detects an invalid token.
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("lower", `[a-z]+`),
- },
- },
- src: `foo123bar`,
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte(`foo`)), 0, 3, 0, 0),
- withPos(newInvalidTokenDefault([]byte(`123`)), 3, 3, 0, 3),
- withPos(newTokenDefault(1, 1, []byte(`bar`)), 6, 3, 0, 6),
- withPos(newEOFTokenDefault(), 9, 0, 0, 9),
- },
- },
- // The driver can detect an invalid token immediately preceding an EOF.
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("lower", `[a-z]+`),
- },
- },
- src: `foo123`,
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte(`foo`)), 0, 3, 0, 0),
- withPos(newInvalidTokenDefault([]byte(`123`)), 3, 3, 0, 3),
- withPos(newEOFTokenDefault(), 6, 0, 0, 6),
- },
- },
- }
- for i, tt := range test {
- for compLv := lexical.CompressionLevelMin; compLv <= lexical.CompressionLevelMax; compLv++ {
- t.Run(fmt.Sprintf("#%v-%v", i, compLv), func(t *testing.T) {
- clspec, err, cerrs := lexical.Compile(tt.lspec, compLv)
- if err != nil {
- for _, cerr := range cerrs {
- t.Logf("%#v", cerr)
- }
- t.Fatalf("unexpected error: %v", err)
- }
- opts := []LexerOption{}
- if tt.passiveModeTran {
- opts = append(opts, DisableModeTransition())
- }
- lexer, err := NewLexer(NewLexSpec(clspec), strings.NewReader(tt.src), opts...)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- for _, eTok := range tt.tokens {
- tok, err := lexer.Next()
- if err != nil {
- t.Log(err)
- break
- }
- testToken(t, eTok, tok)
-
- if tok.EOF {
- break
- }
-
- if tt.tran != nil {
- err := tt.tran(lexer, tok)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- }
- }
- })
- }
- }
-}
-
-func TestLexer_Next_WithPosition(t *testing.T) {
- lspec := &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("newline", `\u{000A}+`),
- newLexEntryDefaultNOP("any", `.`),
- },
- }
-
- clspec, err, _ := lexical.Compile(lspec, lexical.CompressionLevelMax)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
- src := string([]byte{
- 0x00,
- 0x7F,
- 0x0A,
-
- 0xC2, 0x80,
- 0xDF, 0xBF,
- 0x0A,
-
- 0xE0, 0xA0, 0x80,
- 0xE0, 0xBF, 0xBF,
- 0xE1, 0x80, 0x80,
- 0xEC, 0xBF, 0xBF,
- 0xED, 0x80, 0x80,
- 0xED, 0x9F, 0xBF,
- 0xEE, 0x80, 0x80,
- 0xEF, 0xBF, 0xBF,
- 0x0A,
-
- 0xF0, 0x90, 0x80, 0x80,
- 0xF0, 0xBF, 0xBF, 0xBF,
- 0xF1, 0x80, 0x80, 0x80,
- 0xF3, 0xBF, 0xBF, 0xBF,
- 0xF4, 0x80, 0x80, 0x80,
- 0xF4, 0x8F, 0xBF, 0xBF,
- 0x0A,
- 0x0A,
- 0x0A,
- })
-
- expected := []*Token{
- withPos(newTokenDefault(2, 2, []byte{0x00}), 0, 1, 0, 0),
- withPos(newTokenDefault(2, 2, []byte{0x7F}), 1, 1, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0x0A}), 2, 1, 0, 2),
-
- withPos(newTokenDefault(2, 2, []byte{0xC2, 0x80}), 3, 2, 1, 0),
- withPos(newTokenDefault(2, 2, []byte{0xDF, 0xBF}), 5, 2, 1, 1),
- withPos(newTokenDefault(1, 1, []byte{0x0A}), 7, 1, 1, 2),
-
- withPos(newTokenDefault(2, 2, []byte{0xE0, 0xA0, 0x80}), 8, 3, 2, 0),
- withPos(newTokenDefault(2, 2, []byte{0xE0, 0xBF, 0xBF}), 11, 3, 2, 1),
- withPos(newTokenDefault(2, 2, []byte{0xE1, 0x80, 0x80}), 14, 3, 2, 2),
- withPos(newTokenDefault(2, 2, []byte{0xEC, 0xBF, 0xBF}), 17, 3, 2, 3),
- withPos(newTokenDefault(2, 2, []byte{0xED, 0x80, 0x80}), 20, 3, 2, 4),
- withPos(newTokenDefault(2, 2, []byte{0xED, 0x9F, 0xBF}), 23, 3, 2, 5),
- withPos(newTokenDefault(2, 2, []byte{0xEE, 0x80, 0x80}), 26, 3, 2, 6),
- withPos(newTokenDefault(2, 2, []byte{0xEF, 0xBF, 0xBF}), 29, 3, 2, 7),
- withPos(newTokenDefault(1, 1, []byte{0x0A}), 32, 1, 2, 8),
-
- withPos(newTokenDefault(2, 2, []byte{0xF0, 0x90, 0x80, 0x80}), 33, 4, 3, 0),
- withPos(newTokenDefault(2, 2, []byte{0xF0, 0xBF, 0xBF, 0xBF}), 37, 4, 3, 1),
- withPos(newTokenDefault(2, 2, []byte{0xF1, 0x80, 0x80, 0x80}), 41, 4, 3, 2),
- withPos(newTokenDefault(2, 2, []byte{0xF3, 0xBF, 0xBF, 0xBF}), 45, 4, 3, 3),
- withPos(newTokenDefault(2, 2, []byte{0xF4, 0x80, 0x80, 0x80}), 49, 4, 3, 4),
- withPos(newTokenDefault(2, 2, []byte{0xF4, 0x8F, 0xBF, 0xBF}), 53, 4, 3, 5),
- // When a token contains multiple line breaks, the driver sets the token position to
- // the line number where a lexeme first appears.
- withPos(newTokenDefault(1, 1, []byte{0x0A, 0x0A, 0x0A}), 57, 3, 3, 6),
-
- withPos(newEOFTokenDefault(), 60, 0, 6, 0),
- }
-
- lexer, err := NewLexer(NewLexSpec(clspec), strings.NewReader(src))
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
- for _, eTok := range expected {
- tok, err := lexer.Next()
- if err != nil {
- t.Fatal(err)
- }
-
- testToken(t, eTok, tok)
-
- if tok.EOF {
- break
- }
- }
-}
-
-func testToken(t *testing.T, expected, actual *Token) {
- t.Helper()
-
- if actual.ModeID != expected.ModeID ||
- actual.KindID != expected.KindID ||
- actual.ModeKindID != expected.ModeKindID ||
- !bytes.Equal(actual.Lexeme, expected.Lexeme) ||
- actual.EOF != expected.EOF ||
- actual.Invalid != expected.Invalid {
- t.Fatalf(`unexpected token; want: %+v, got: %+v`, expected, actual)
- }
-
- if actual.BytePos != expected.BytePos || actual.ByteLen != expected.ByteLen ||
- actual.Row != expected.Row || actual.Col != expected.Col {
- t.Fatalf(`unexpected token; want: %+v, got: %+v`, expected, actual)
- }
-}
diff --git a/driver/lexer/spec.go b/driver/lexer/spec.go
deleted file mode 100644
index 4b1a218..0000000
--- a/driver/lexer/spec.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package lexer
-
-import spec "spec/grammar"
-
-type lexSpec struct {
- spec *spec.LexicalSpec
-}
-
-func NewLexSpec(spec *spec.LexicalSpec) *lexSpec {
- return &lexSpec{
- spec: spec,
- }
-}
-
-func (s *lexSpec) InitialMode() ModeID {
- return ModeID(s.spec.InitialModeID.Int())
-}
-
-func (s *lexSpec) Pop(mode ModeID, modeKind ModeKindID) bool {
- return s.spec.Specs[mode].Pop[modeKind] == 1
-}
-
-func (s *lexSpec) Push(mode ModeID, modeKind ModeKindID) (ModeID, bool) {
- modeID := s.spec.Specs[mode].Push[modeKind]
- return ModeID(modeID.Int()), !modeID.IsNil()
-}
-
-func (s *lexSpec) ModeName(mode ModeID) string {
- return s.spec.ModeNames[mode].String()
-}
-
-func (s *lexSpec) InitialState(mode ModeID) StateID {
- return StateID(s.spec.Specs[mode].DFA.InitialStateID.Int())
-}
-
-func (s *lexSpec) NextState(mode ModeID, state StateID, v int) (StateID, bool) {
- switch s.spec.CompressionLevel {
- case 2:
- tran := s.spec.Specs[mode].DFA.Transition
- rowNum := tran.RowNums[state]
- d := tran.UniqueEntries.RowDisplacement[rowNum]
- if tran.UniqueEntries.Bounds[d+v] != rowNum {
- return StateID(tran.UniqueEntries.EmptyValue.Int()), false
- }
- return StateID(tran.UniqueEntries.Entries[d+v].Int()), true
- case 1:
- tran := s.spec.Specs[mode].DFA.Transition
- next := tran.UncompressedUniqueEntries[tran.RowNums[state]*tran.OriginalColCount+v]
- if next == spec.StateIDNil {
- return StateID(spec.StateIDNil.Int()), false
- }
- return StateID(next.Int()), true
- }
-
- modeSpec := s.spec.Specs[mode]
- next := modeSpec.DFA.UncompressedTransition[state.Int()*modeSpec.DFA.ColCount+v]
- if next == spec.StateIDNil {
- return StateID(spec.StateIDNil), false
- }
- return StateID(next.Int()), true
-}
-
-func (s *lexSpec) Accept(mode ModeID, state StateID) (ModeKindID, bool) {
- modeKindID := s.spec.Specs[mode].DFA.AcceptingStates[state]
- return ModeKindID(modeKindID.Int()), modeKindID != spec.LexModeKindIDNil
-}
-
-func (s *lexSpec) KindIDAndName(mode ModeID, modeKind ModeKindID) (KindID, string) {
- kindID := s.spec.KindIDs[mode][modeKind]
- return KindID(kindID.Int()), s.spec.KindNames[kindID].String()
-}
diff --git a/driver/lexer/template.go b/driver/lexer/template.go
deleted file mode 100644
index c5d0778..0000000
--- a/driver/lexer/template.go
+++ /dev/null
@@ -1,760 +0,0 @@
-package lexer
-
-import (
- "bytes"
- _ "embed"
- "fmt"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "strings"
- "text/template"
-
- "grammar/lexical"
- spec "spec/grammar"
-)
-
-// go:embed lexer.go
-var lexerCoreSrc string
-
-func GenLexer(lexSpec *spec.LexicalSpec, pkgName string) ([]byte, error) {
- var lexerSrc string
- {
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, "lexer.go", lexerCoreSrc, parser.ParseComments)
- if err != nil {
- return nil, err
- }
-
- var b strings.Builder
- err = format.Node(&b, fset, f)
- if err != nil {
- return nil, err
- }
-
- lexerSrc = b.String()
- }
-
- var modeIDsSrc string
- {
- var b strings.Builder
- fmt.Fprintf(&b, "const (\n")
- for i, k := range lexSpec.ModeNames {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, " ModeIDNil ModeID = %v\n", i)
- continue
- }
- fmt.Fprintf(&b, " ModeID%v ModeID = %v\n", lexical.SnakeCaseToUpperCamelCase(k.String()), i)
- }
- fmt.Fprintf(&b, ")")
-
- modeIDsSrc = b.String()
- }
-
- var modeNamesSrc string
- {
- var b strings.Builder
- fmt.Fprintf(&b, "const (\n")
- for i, k := range lexSpec.ModeNames {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, " ModeNameNil = %#v\n", "")
- continue
- }
- fmt.Fprintf(&b, " ModeName%v = %#v\n", lexical.SnakeCaseToUpperCamelCase(k.String()), k)
- }
- fmt.Fprintf(&b, ")")
-
- modeNamesSrc = b.String()
- }
-
- var modeIDToNameSrc string
- {
- var b strings.Builder
- fmt.Fprintf(&b, `
-// ModeIDToName converts a mode ID to a name.
-func ModeIDToName(id ModeID) string {
- switch id {`)
- for i, k := range lexSpec.ModeNames {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, `
- case ModeIDNil:
- return ModeNameNil`)
- continue
- }
- name := lexical.SnakeCaseToUpperCamelCase(k.String())
- fmt.Fprintf(&b, `
- case ModeID%v:
- return ModeName%v`, name, name)
- }
- fmt.Fprintf(&b, `
- }
- return ""
-}
-`)
-
- modeIDToNameSrc = b.String()
- }
-
- var kindIDsSrc string
- {
- var b strings.Builder
- fmt.Fprintf(&b, "const (\n")
- for i, k := range lexSpec.KindNames {
- if i == spec.LexKindIDNil.Int() {
- fmt.Fprintf(&b, " KindIDNil KindID = %v\n", i)
- continue
- }
- fmt.Fprintf(&b, " KindID%v KindID = %v\n", lexical.SnakeCaseToUpperCamelCase(k.String()), i)
- }
- fmt.Fprintf(&b, ")")
-
- kindIDsSrc = b.String()
- }
-
- var kindNamesSrc string
- {
- var b strings.Builder
- fmt.Fprintf(&b, "const (\n")
- fmt.Fprintf(&b, " KindNameNil = %#v\n", "")
- for _, k := range lexSpec.KindNames[1:] {
- fmt.Fprintf(&b, " KindName%v = %#v\n", lexical.SnakeCaseToUpperCamelCase(k.String()), k)
- }
- fmt.Fprintf(&b, ")")
-
- kindNamesSrc = b.String()
- }
-
- var kindIDToNameSrc string
- {
- var b strings.Builder
- fmt.Fprintf(&b, `
-// KindIDToName converts a kind ID to a name.
-func KindIDToName(id KindID) string {
- switch id {`)
- for i, k := range lexSpec.KindNames {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, `
- case KindIDNil:
- return KindNameNil`)
- continue
- }
- name := lexical.SnakeCaseToUpperCamelCase(k.String())
- fmt.Fprintf(&b, `
- case KindID%v:
- return KindName%v`, name, name)
- }
- fmt.Fprintf(&b, `
- }
- return ""
-}
-`)
-
- kindIDToNameSrc = b.String()
- }
-
- var specSrc string
- {
- t, err := template.New("").Funcs(genTemplateFuncs(lexSpec)).Parse(lexSpecTemplate)
- if err != nil {
- return nil, err
- }
-
- var b strings.Builder
- err = t.Execute(&b, map[string]interface{}{
- "initialModeID": "ModeID" + lexical.SnakeCaseToUpperCamelCase(lexSpec.ModeNames[lexSpec.InitialModeID].String()),
- "modeIDNil": "ModeIDNil",
- "modeKindIDNil": spec.LexModeKindIDNil,
- "stateIDNil": spec.StateIDNil,
- "compressionLevel": lexSpec.CompressionLevel,
- })
- if err != nil {
- return nil, err
- }
-
- specSrc = b.String()
- }
-
- var src string
- {
- tmpl := `// Code generated by vartan-go. DO NOT EDIT.
-{{ .lexerSrc }}
-
-{{ .modeIDsSrc }}
-
-{{ .modeNamesSrc }}
-
-{{ .modeIDToNameSrc }}
-
-{{ .kindIDsSrc }}
-
-{{ .kindNamesSrc }}
-
-{{ .kindIDToNameSrc }}
-
-{{ .specSrc }}
-`
-
- t, err := template.New("").Parse(tmpl)
- if err != nil {
- return nil, err
- }
-
- var b strings.Builder
- err = t.Execute(&b, map[string]string{
- "lexerSrc": lexerSrc,
- "modeIDsSrc": modeIDsSrc,
- "modeNamesSrc": modeNamesSrc,
- "modeIDToNameSrc": modeIDToNameSrc,
- "kindIDsSrc": kindIDsSrc,
- "kindNamesSrc": kindNamesSrc,
- "kindIDToNameSrc": kindIDToNameSrc,
- "specSrc": specSrc,
- })
- if err != nil {
- return nil, err
- }
-
- src = b.String()
- }
-
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
- if err != nil {
- return nil, err
- }
-
- f.Name = ast.NewIdent(pkgName)
-
- var b bytes.Buffer
- err = format.Node(&b, fset, f)
- if err != nil {
- return nil, err
- }
-
- return b.Bytes(), nil
-}
-
-const lexSpecTemplate = `
-type lexSpec struct {
- pop [][]bool
- push [][]ModeID
- modeNames []string
- initialStates []StateID
- acceptances [][]ModeKindID
- kindIDs [][]KindID
- kindNames []string
- initialModeID ModeID
- modeIDNil ModeID
- modeKindIDNil ModeKindID
- stateIDNil StateID
-
- rowNums [][]int
- rowDisplacements [][]int
- bounds [][]int
- entries [][]StateID
- originalColCounts []int
-}
-
-func NewLexSpec() *lexSpec {
- return &lexSpec{
- pop: {{ genPopTable }},
- push: {{ genPushTable }},
- modeNames: {{ genModeNameTable }},
- initialStates: {{ genInitialStateTable }},
- acceptances: {{ genAcceptTable }},
- kindIDs: {{ genKindIDTable }},
- kindNames: {{ genKindNameTable }},
- initialModeID: {{ .initialModeID }},
- modeIDNil: {{ .modeIDNil }},
- modeKindIDNil: {{ .modeKindIDNil }},
- stateIDNil: {{ .stateIDNil }},
-
- rowNums: {{ genRowNums }},
- rowDisplacements: {{ genRowDisplacements }},
- bounds: {{ genBounds }},
- entries: {{ genEntries }},
- originalColCounts: {{ genOriginalColCounts }},
- }
-}
-
-func (s *lexSpec) InitialMode() ModeID {
- return s.initialModeID
-}
-
-func (s *lexSpec) Pop(mode ModeID, modeKind ModeKindID) bool {
- return s.pop[mode][modeKind]
-}
-
-func (s *lexSpec) Push(mode ModeID, modeKind ModeKindID) (ModeID, bool) {
- id := s.push[mode][modeKind]
- return id, id != s.modeIDNil
-}
-
-func (s *lexSpec) ModeName(mode ModeID) string {
- return s.modeNames[mode]
-}
-
-func (s *lexSpec) InitialState(mode ModeID) StateID {
- return s.initialStates[mode]
-}
-
-func (s *lexSpec) NextState(mode ModeID, state StateID, v int) (StateID, bool) {
-{{ if eq .compressionLevel 2 -}}
- rowNum := s.rowNums[mode][state]
- d := s.rowDisplacements[mode][rowNum]
- if s.bounds[mode][d+v] != rowNum {
- return s.stateIDNil, false
- }
- return s.entries[mode][d+v], true
-{{ else if eq .compressionLevel 1 -}}
- rowNum := s.rowNums[mode][state]
- colCount := s.originalColCounts[mode]
- next := s.entries[mode][rowNum*colCount+v]
- if next == s.stateIDNil {
- return s.stateIDNil, false
- }
- return next, true
-{{ else -}}
- colCount := s.originalColCounts[mode]
- next := s.entries[mode][int(state)*colCount+v]
- if next == s.stateIDNil {
- return s.stateIDNil, false
- }
- return next, true
-{{ end -}}
-}
-
-func (s *lexSpec) Accept(mode ModeID, state StateID) (ModeKindID, bool) {
- id := s.acceptances[mode][state]
- return id, id != s.modeKindIDNil
-}
-
-func (s *lexSpec) KindIDAndName(mode ModeID, modeKind ModeKindID) (KindID, string) {
- id := s.kindIDs[mode][modeKind]
- return id, s.kindNames[id]
-}
-`
-
-func genTemplateFuncs(lexSpec *spec.LexicalSpec) template.FuncMap {
- fns := template.FuncMap{
- "genPopTable": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]bool{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.Pop {
- fmt.Fprintf(&b, "%v, ", v != 0)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genPushTable": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]ModeID{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.Push {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genModeNameTable": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]string{\n")
- for i, name := range lexSpec.ModeNames {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "ModeNameNil,\n")
- continue
- }
- fmt.Fprintf(&b, "ModeName%v,\n", lexical.SnakeCaseToUpperCamelCase(name.String()))
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genInitialStateTable": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]StateID{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "%v,\n", spec.StateIDNil)
- continue
- }
-
- fmt.Fprintf(&b, "%v,\n", s.DFA.InitialStateID)
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genAcceptTable": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]ModeKindID{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.DFA.AcceptingStates {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genKindIDTable": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]KindID{\n")
- for i, ids := range lexSpec.KindIDs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- fmt.Fprintf(&b, "{\n")
- for j, id := range ids {
- if j == spec.LexModeKindIDNil.Int() {
- fmt.Fprintf(&b, "KindIDNil,\n")
- continue
- }
- fmt.Fprintf(&b, "KindID%v,\n", lexical.SnakeCaseToUpperCamelCase(string(lexSpec.KindNames[id].String())))
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genKindNameTable": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]string{\n")
- for i, name := range lexSpec.KindNames {
- if i == spec.LexKindIDNil.Int() {
- fmt.Fprintf(&b, "KindNameNil,\n")
- continue
- }
- fmt.Fprintf(&b, "KindName%v,\n", lexical.SnakeCaseToUpperCamelCase(name.String()))
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- }
-
- switch lexSpec.CompressionLevel {
- case 2:
- fns["genRowNums"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]int{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.DFA.Transition.RowNums {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
-
- fns["genRowDisplacements"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]int{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, d := range s.DFA.Transition.UniqueEntries.RowDisplacement {
- fmt.Fprintf(&b, "%v,", d)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
-
- fns["genBounds"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]int{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.DFA.Transition.UniqueEntries.Bounds {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
-
- fns["genEntries"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]StateID{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.DFA.Transition.UniqueEntries.Entries {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
-
- fns["genOriginalColCounts"] = func() string {
- return "nil"
- }
- case 1:
- fns["genRowNums"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]int{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.DFA.Transition.RowNums {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
-
- fns["genRowDisplacements"] = func() string {
- return "nil"
- }
-
- fns["genBounds"] = func() string {
- return "nil"
- }
-
- fns["genEntries"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]StateID{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.DFA.Transition.UncompressedUniqueEntries {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
-
- fns["genOriginalColCounts"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]int{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "0,\n")
- continue
- }
-
- fmt.Fprintf(&b, "%v,\n", s.DFA.Transition.OriginalColCount)
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
- default:
- fns["genRowNums"] = func() string {
- return "nil"
- }
-
- fns["genRowDisplacements"] = func() string {
- return "nil"
- }
-
- fns["genBounds"] = func() string {
- return "nil"
- }
-
- fns["genEntries"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]StateID{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.DFA.UncompressedTransition {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
-
- fns["genOriginalColCounts"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]int{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "0,\n")
- continue
- }
-
- fmt.Fprintf(&b, "%v,\n", s.DFA.ColCount)
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
- }
-
- return fns
-}
diff --git a/driver/parser/conflict_test.go b/driver/parser/conflict_test.go
deleted file mode 100644
index 1a75483..0000000
--- a/driver/parser/conflict_test.go
+++ /dev/null
@@ -1,524 +0,0 @@
-package parser
-
-import (
- "strings"
- "testing"
-
- "grammar"
- "spec/grammar/parser"
-)
-
-func TestParserWithConflicts(t *testing.T) {
- tests := []struct {
- caption string
- specSrc string
- src string
- cst *Node
- }{
- {
- caption: "when a shift/reduce conflict occurred, we prioritize the shift action",
- specSrc: `
-#name test;
-
-expr
- : expr assign expr
- | id
- ;
-
-id: "[A-Za-z0-9_]+";
-assign: '=';
-`,
- src: `foo=bar=baz`,
- cst: nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "foo"),
- ),
- termNode("assign", "="),
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "bar"),
- ),
- termNode("assign", "="),
- nonTermNode("expr",
- termNode("id", "baz"),
- ),
- ),
- ),
- },
- {
- caption: "when a reduce/reduce conflict occurred, we prioritize the production defined earlier in the grammar",
- specSrc: `
-#name test;
-
-s
- : a
- | b
- ;
-a
- : id
- ;
-b
- : id
- ;
-
-id: "[A-Za-z0-9_]+";
-`,
- src: `foo`,
- cst: nonTermNode("s",
- nonTermNode("a",
- termNode("id", "foo"),
- ),
- ),
- },
- {
- caption: "left associativities defined earlier in the grammar have higher precedence",
- specSrc: `
-#name test;
-
-#prec (
- #left mul
- #left add
-);
-
-expr
- : expr add expr
- | expr mul expr
- | id
- ;
-
-id: "[A-Za-z0-9_]+";
-add: '+';
-mul: '*';
-`,
- src: `a+b*c*d+e`,
- cst: nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "a"),
- ),
- termNode("add", "+"),
- nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "b"),
- ),
- termNode("mul", "*"),
- nonTermNode("expr",
- termNode("id", "c"),
- ),
- ),
- termNode("mul", "*"),
- nonTermNode("expr",
- termNode("id", "d"),
- ),
- ),
- ),
- termNode("add", "+"),
- nonTermNode("expr",
- termNode("id", "e"),
- ),
- ),
- },
- {
- caption: "left associativities defined in the same line have the same precedence",
- specSrc: `
-#name test;
-
-#prec (
- #left add sub
-);
-
-expr
- : expr add expr
- | expr sub expr
- | id
- ;
-
-id: "[A-Za-z0-9_]+";
-add: '+';
-sub: '-';
-`,
- src: `a-b+c+d-e`,
- cst: nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "a"),
- ),
- termNode("sub", "-"),
- nonTermNode("expr",
- termNode("id", "b"),
- ),
- ),
- termNode("add", "+"),
- nonTermNode("expr",
- termNode("id", "c"),
- ),
- ),
- termNode("add", "+"),
- nonTermNode("expr",
- termNode("id", "d"),
- ),
- ),
- termNode("sub", "-"),
- nonTermNode("expr",
- termNode("id", "e"),
- ),
- ),
- },
- {
- caption: "right associativities defined earlier in the grammar have higher precedence",
- specSrc: `
-#name test;
-
-#prec (
- #right r1
- #right r2
-);
-
-expr
- : expr r2 expr
- | expr r1 expr
- | id
- ;
-
-whitespaces #skip
- : "[\u{0009}\u{0020}]+";
-r1
- : 'r1';
-r2
- : 'r2';
-id
- : "[A-Za-z0-9_]+";
-`,
- src: `a r2 b r1 c r1 d r2 e`,
- cst: nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "a"),
- ),
- termNode("r2", "r2"),
- nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "b"),
- ),
- termNode("r1", "r1"),
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "c"),
- ),
- termNode("r1", "r1"),
- nonTermNode("expr",
- termNode("id", "d"),
- ),
- ),
- ),
- termNode("r2", "r2"),
- nonTermNode("expr",
- termNode("id", "e"),
- ),
- ),
- ),
- },
- {
- caption: "right associativities defined in the same line have the same precedence",
- specSrc: `
-#name test;
-
-#prec (
- #right r1 r2
-);
-
-expr
- : expr r2 expr
- | expr r1 expr
- | id
- ;
-
-whitespaces #skip
- : "[\u{0009}\u{0020}]+";
-r1
- : 'r1';
-r2
- : 'r2';
-id
- : "[A-Za-z0-9_]+";
-`,
- src: `a r2 b r1 c r1 d r2 e`,
- cst: nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "a"),
- ),
- termNode("r2", "r2"),
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "b"),
- ),
- termNode("r1", "r1"),
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "c"),
- ),
- termNode("r1", "r1"),
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "d"),
- ),
- termNode("r2", "r2"),
- nonTermNode("expr",
- termNode("id", "e"),
- ),
- ),
- ),
- ),
- ),
- },
- {
- caption: "terminal symbols with an #assign directive defined earlier in the grammar have higher precedence",
- specSrc: `
-#name test;
-
-#prec (
- #assign a1
- #assign a2
-);
-
-expr
- : expr a2 expr
- | expr a1 expr
- | id
- ;
-
-whitespaces #skip
- : "[\u{0009}\u{0020}]+";
-a1
- : 'a1';
-a2
- : 'a2';
-id
- : "[A-Za-z0-9_]+";
-`,
- src: `a a2 b a1 c a1 d a2 e`,
- cst: nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "a"),
- ),
- termNode("a2", "a2"),
- nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "b"),
- ),
- termNode("a1", "a1"),
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "c"),
- ),
- termNode("a1", "a1"),
- nonTermNode("expr",
- termNode("id", "d"),
- ),
- ),
- ),
- termNode("a2", "a2"),
- nonTermNode("expr",
- termNode("id", "e"),
- ),
- ),
- ),
- },
- {
- caption: "terminal symbols with an #assign directive defined in the same line have the same precedence",
- specSrc: `
-#name test;
-
-#prec (
- #assign a1 a2
-);
-
-expr
- : expr a2 expr
- | expr a1 expr
- | id
- ;
-
-whitespaces #skip
- : "[\u{0009}\u{0020}]+";
-a1
- : 'a1';
-a2
- : 'a2';
-id
- : "[A-Za-z0-9_]+";
-`,
- src: `a a2 b a1 c a1 d a2 e`,
- cst: nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "a"),
- ),
- termNode("a2", "a2"),
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "b"),
- ),
- termNode("a1", "a1"),
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "c"),
- ),
- termNode("a1", "a1"),
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "d"),
- ),
- termNode("a2", "a2"),
- nonTermNode("expr",
- termNode("id", "e"),
- ),
- ),
- ),
- ),
- ),
- },
- {
- caption: "#left, #right, and #assign can be mixed",
- specSrc: `
-#name test;
-
-#prec (
- #left mul div
- #left add sub
- #assign else
- #assign then
- #right assign
-);
-
-expr
- : expr add expr
- | expr sub expr
- | expr mul expr
- | expr div expr
- | expr assign expr
- | if expr then expr
- | if expr then expr else expr
- | id
- ;
-
-ws #skip: "[\u{0009}\u{0020}]+";
-if: 'if';
-then: 'then';
-else: 'else';
-id: "[A-Za-z0-9_]+";
-add: '+';
-sub: '-';
-mul: '*';
-div: '/';
-assign: '=';
-`,
- src: `x = y = a + b * c - d / e + if f then if g then h else i`,
- cst: nonTermNode(
- "expr",
- nonTermNode("expr",
- termNode("id", "x"),
- ),
- termNode("assign", "="),
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "y"),
- ),
- termNode("assign", "="),
- nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "a"),
- ),
- termNode("add", "+"),
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "b"),
- ),
- termNode("mul", "*"),
- nonTermNode("expr",
- termNode("id", "c"),
- ),
- ),
- ),
- termNode("sub", "-"),
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("id", "d"),
- ),
- termNode("div", "/"),
- nonTermNode("expr",
- termNode("id", "e"),
- ),
- ),
- ),
- termNode("add", "+"),
- nonTermNode("expr",
- termNode("if", "if"),
- nonTermNode("expr",
- termNode("id", "f"),
- ),
- termNode("then", "then"),
- nonTermNode("expr",
- termNode("if", "if"),
- nonTermNode("expr",
- termNode("id", "g"),
- ),
- termNode("then", "then"),
- nonTermNode("expr",
- termNode("id", "h"),
- ),
- termNode("else", "else"),
- nonTermNode("expr",
- termNode("id", "i"),
- ),
- ),
- ),
- ),
- ),
- ),
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.caption, func(t *testing.T) {
- ast, err := parser.Parse(strings.NewReader(tt.specSrc))
- if err != nil {
- t.Fatal(err)
- }
-
- b := grammar.GrammarBuilder{
- AST: ast,
- }
- cg, _, err := b.Build()
- if err != nil {
- t.Fatal(err)
- }
-
- toks, err := NewTokenStream(cg, strings.NewReader(tt.src))
- if err != nil {
- t.Fatal(err)
- }
-
- gram := NewGrammar(cg)
- tb := NewDefaultSyntaxTreeBuilder()
- p, err := NewParser(toks, gram, SemanticAction(NewCSTActionSet(gram, tb)))
- if err != nil {
- t.Fatal(err)
- }
-
- err = p.Parse()
- if err != nil {
- t.Fatal(err)
- }
-
- if tt.cst != nil {
- testTree(t, tb.Tree(), tt.cst)
- }
- })
- }
-}
diff --git a/driver/parser/lac_test.go b/driver/parser/lac_test.go
deleted file mode 100644
index ed9a0b8..0000000
--- a/driver/parser/lac_test.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package parser
-
-import (
- "strings"
- "testing"
-
- "grammar"
- "spec/grammar/parser"
-)
-
-func TestParserWithLAC(t *testing.T) {
- specSrc := `
-#name test;
-
-s
- : t t
- ;
-t
- : c t
- | d
- ;
-
-c: 'c';
-d: 'd';
-`
-
- src := `ccd`
-
- actLogWithLAC := []string{
- "shift/c",
- "shift/c",
- "shift/d",
- "miss",
- }
-
- actLogWithoutLAC := []string{
- "shift/c",
- "shift/c",
- "shift/d",
- "reduce/t",
- "reduce/t",
- "reduce/t",
- "miss",
- }
-
- ast, err := parser.Parse(strings.NewReader(specSrc))
- if err != nil {
- t.Fatal(err)
- }
-
- b := grammar.GrammarBuilder{
- AST: ast,
- }
- gram, _, err := b.Build()
- if err != nil {
- t.Fatal(err)
- }
-
- t.Run("LAC is enabled", func(t *testing.T) {
- semAct := &testSemAct{
- gram: gram,
- }
-
- toks, err := NewTokenStream(gram, strings.NewReader(src))
- if err != nil {
- t.Fatal(err)
- }
-
- p, err := NewParser(toks, NewGrammar(gram), SemanticAction(semAct))
- if err != nil {
- t.Fatal(err)
- }
-
- err = p.Parse()
- if err != nil {
- t.Fatal(err)
- }
-
- if len(semAct.actLog) != len(actLogWithLAC) {
- t.Fatalf("unexpected action log; want: %+v, got: %+v", actLogWithLAC, semAct.actLog)
- }
-
- for i, e := range actLogWithLAC {
- if semAct.actLog[i] != e {
- t.Fatalf("unexpected action log; want: %+v, got: %+v", actLogWithLAC, semAct.actLog)
- }
- }
- })
-
- t.Run("LAC is disabled", func(t *testing.T) {
- semAct := &testSemAct{
- gram: gram,
- }
-
- toks, err := NewTokenStream(gram, strings.NewReader(src))
- if err != nil {
- t.Fatal(err)
- }
-
- p, err := NewParser(toks, NewGrammar(gram), SemanticAction(semAct), DisableLAC())
- if err != nil {
- t.Fatal(err)
- }
-
- err = p.Parse()
- if err != nil {
- t.Fatal(err)
- }
-
- if len(semAct.actLog) != len(actLogWithoutLAC) {
- t.Fatalf("unexpected action log; want: %+v, got: %+v", actLogWithoutLAC, semAct.actLog)
- }
-
- for i, e := range actLogWithoutLAC {
- if semAct.actLog[i] != e {
- t.Fatalf("unexpected action log; want: %+v, got: %+v", actLogWithoutLAC, semAct.actLog)
- }
- }
- })
-}
diff --git a/driver/parser/parser.go b/driver/parser/parser.go
deleted file mode 100644
index 2eaa678..0000000
--- a/driver/parser/parser.go
+++ /dev/null
@@ -1,416 +0,0 @@
-package parser
-
-import (
- "fmt"
-)
-
-type Grammar interface {
- // InitialState returns the initial state of a parser.
- InitialState() int
-
- // StartProduction returns the start production of grammar.
- StartProduction() int
-
- // Action returns an ACTION entry corresponding to a (state, terminal symbol) pair.
- Action(state int, terminal int) int
-
- // GoTo returns a GOTO entry corresponding to a (state, non-terminal symbol) pair.
- GoTo(state int, lhs int) int
-
- // ErrorTrapperState returns true when a state can shift the error symbol.
- ErrorTrapperState(state int) bool
-
- // LHS returns a LHS symbol of a production.
- LHS(prod int) int
-
- // AlternativeSymbolCount returns a symbol count of p production.
- AlternativeSymbolCount(prod int) int
-
- // RecoverProduction returns true when a production has the recover directive.
- RecoverProduction(prod int) bool
-
- // NonTerminal retuns a string representaion of a non-terminal symbol.
- NonTerminal(nonTerminal int) string
-
- // TerminalCount returns a terminal symbol count of grammar.
- TerminalCount() int
-
- // SkipTerminal returns true when a terminal symbol must be skipped on syntax analysis.
- SkipTerminal(terminal int) bool
-
- // EOF returns the EOF symbol.
- EOF() int
-
- // Error returns the error symbol.
- Error() int
-
- // Terminal retuns a string representaion of a terminal symbol.
- Terminal(terminal int) string
-
- // ASTAction returns an AST action entries.
- ASTAction(prod int) []int
-}
-
-type VToken interface {
- // TerminalID returns a terminal ID.
- TerminalID() int
-
- // Lexeme returns a lexeme.
- Lexeme() []byte
-
- // EOF returns true when a token represents EOF.
- EOF() bool
-
- // Invalid returns true when a token is invalid.
- Invalid() bool
-
- // BytePosition returns (position, length) pair.
- // `position` is a byte position where a token appears and `length` is a length in bytes.
- BytePosition() (int, int)
-
- // Position returns (row, column) pair.
- Position() (int, int)
-}
-
-type TokenStream interface {
- Next() (VToken, error)
-}
-
-type SyntaxError struct {
- Row int
- Col int
- Message string
- Token VToken
- ExpectedTerminals []string
-}
-
-type ParserOption func(p *Parser) error
-
-// DisableLAC disables LAC (lookahead correction). LAC is enabled by default.
-func DisableLAC() ParserOption {
- return func(p *Parser) error {
- p.disableLAC = true
- return nil
- }
-}
-
-func SemanticAction(semAct SemanticActionSet) ParserOption {
- return func(p *Parser) error {
- p.semAct = semAct
- return nil
- }
-}
-
-type Parser struct {
- toks TokenStream
- gram Grammar
- stateStack *stateStack
- semAct SemanticActionSet
- disableLAC bool
- onError bool
- shiftCount int
- synErrs []*SyntaxError
-}
-
-func NewParser(toks TokenStream, gram Grammar, opts ...ParserOption) (*Parser, error) {
- p := &Parser{
- toks: toks,
- gram: gram,
- stateStack: &stateStack{},
- }
-
- for _, opt := range opts {
- err := opt(p)
- if err != nil {
- return nil, err
- }
- }
-
- return p, nil
-}
-
-func (p *Parser) Parse() error {
- p.stateStack.push(p.gram.InitialState())
- tok, err := p.nextToken()
- if err != nil {
- return err
- }
-
-ACTION_LOOP:
- for {
- act := p.lookupAction(tok)
-
- switch {
- case act < 0: // Shift
- nextState := act * -1
-
- recovered := false
- if p.onError {
- p.shiftCount++
-
- // When the parser performs shift three times, the parser recovers from the error state.
- if p.shiftCount >= 3 {
- p.onError = false
- p.shiftCount = 0
- recovered = true
- }
- }
-
- p.shift(nextState)
-
- if p.semAct != nil {
- p.semAct.Shift(tok, recovered)
- }
-
- tok, err = p.nextToken()
- if err != nil {
- return err
- }
- case act > 0: // Reduce
- prodNum := act
-
- recovered := false
- if p.onError && p.gram.RecoverProduction(prodNum) {
- p.onError = false
- p.shiftCount = 0
- recovered = true
- }
-
- accepted := p.reduce(prodNum)
- if accepted {
- if p.semAct != nil {
- p.semAct.Accept()
- }
-
- return nil
- }
-
- if p.semAct != nil {
- p.semAct.Reduce(prodNum, recovered)
- }
- default: // Error
- if p.onError {
- tok, err = p.nextToken()
- if err != nil {
- return err
- }
- if tok.EOF() {
- if p.semAct != nil {
- p.semAct.MissError(tok)
- }
-
- return nil
- }
-
- continue ACTION_LOOP
- }
-
- row, col := tok.Position()
- p.synErrs = append(p.synErrs, &SyntaxError{
- Row: row,
- Col: col,
- Message: "unexpected token",
- Token: tok,
- ExpectedTerminals: p.searchLookahead(p.stateStack.top()),
- })
-
- count, ok := p.trapError()
- if !ok {
- if p.semAct != nil {
- p.semAct.MissError(tok)
- }
-
- return nil
- }
-
- p.onError = true
- p.shiftCount = 0
-
- act, err := p.lookupActionOnError()
- if err != nil {
- return err
- }
-
- p.shift(act * -1)
-
- if p.semAct != nil {
- p.semAct.TrapAndShiftError(tok, count)
- }
- }
- }
-}
-
-// validateLookahead validates whether `term` is a valid lookahead in the current context. When `term` is valid,
-// this method returns `true`.
-func (p *Parser) validateLookahead(term int) bool {
- p.stateStack.enableExploratoryMode()
- defer p.stateStack.disableExploratoryMode()
-
- for {
- act := p.gram.Action(p.stateStack.topExploratorily(), term)
-
- switch {
- case act < 0: // Shift
- return true
- case act > 0: // Reduce
- prodNum := act
-
- lhs := p.gram.LHS(prodNum)
- if lhs == p.gram.LHS(p.gram.StartProduction()) {
- return true
- }
- n := p.gram.AlternativeSymbolCount(prodNum)
- p.stateStack.popExploratorily(n)
- state := p.gram.GoTo(p.stateStack.topExploratorily(), lhs)
- p.stateStack.pushExploratorily(state)
- default: // Error
- return false
- }
- }
-}
-
-func (p *Parser) nextToken() (VToken, error) {
- for {
- // We don't have to check whether the token is invalid because the kind ID of the invalid token is 0,
- // and the parsing table doesn't have an entry corresponding to the kind ID 0. Thus we can detect
- // a syntax error because the parser cannot find an entry corresponding to the invalid token.
- tok, err := p.toks.Next()
- if err != nil {
- return nil, err
- }
-
- if p.gram.SkipTerminal(tok.TerminalID()) {
- continue
- }
-
- return tok, nil
- }
-}
-
-func (p *Parser) tokenToTerminal(tok VToken) int {
- if tok.EOF() {
- return p.gram.EOF()
- }
-
- return tok.TerminalID()
-}
-
-func (p *Parser) lookupAction(tok VToken) int {
- if !p.disableLAC {
- term := p.tokenToTerminal(tok)
- if !p.validateLookahead(term) {
- return 0
- }
- }
-
- return p.gram.Action(p.stateStack.top(), p.tokenToTerminal(tok))
-}
-
-func (p *Parser) lookupActionOnError() (int, error) {
- act := p.gram.Action(p.stateStack.top(), p.gram.Error())
- if act >= 0 {
- return 0, fmt.Errorf("an entry must be a shift action by the error symbol; entry: %v, state: %v, symbol: %v", act, p.stateStack.top(), p.gram.Terminal(p.gram.Error()))
- }
-
- return act, nil
-}
-
-func (p *Parser) shift(nextState int) {
- p.stateStack.push(nextState)
-}
-
-func (p *Parser) reduce(prodNum int) bool {
- lhs := p.gram.LHS(prodNum)
- if lhs == p.gram.LHS(p.gram.StartProduction()) {
- return true
- }
- n := p.gram.AlternativeSymbolCount(prodNum)
- p.stateStack.pop(n)
- nextState := p.gram.GoTo(p.stateStack.top(), lhs)
- p.stateStack.push(nextState)
- return false
-}
-
-func (p *Parser) trapError() (int, bool) {
- count := 0
- for {
- if p.gram.ErrorTrapperState(p.stateStack.top()) {
- return count, true
- }
-
- if p.stateStack.top() != p.gram.InitialState() {
- p.stateStack.pop(1)
- count++
- } else {
- return 0, false
- }
- }
-}
-
-func (p *Parser) SyntaxErrors() []*SyntaxError {
- return p.synErrs
-}
-
-func (p *Parser) searchLookahead(state int) []string {
- kinds := []string{}
- termCount := p.gram.TerminalCount()
- for term := 0; term < termCount; term++ {
- if p.disableLAC {
- if p.gram.Action(p.stateStack.top(), term) == 0 {
- continue
- }
- } else {
- if !p.validateLookahead(term) {
- continue
- }
- }
-
- // We don't add the error symbol to the look-ahead symbols because users cannot input the error symbol
- // intentionally.
- if term == p.gram.Error() {
- continue
- }
-
- kinds = append(kinds, p.gram.Terminal(term))
- }
-
- return kinds
-}
-
-type stateStack struct {
- items []int
- itemsExp []int
-}
-
-func (s *stateStack) enableExploratoryMode() {
- s.itemsExp = make([]int, len(s.items))
- copy(s.itemsExp, s.items)
-}
-
-func (s *stateStack) disableExploratoryMode() {
- s.itemsExp = nil
-}
-
-func (s *stateStack) top() int {
- return s.items[len(s.items)-1]
-}
-
-func (s *stateStack) topExploratorily() int {
- return s.itemsExp[len(s.itemsExp)-1]
-}
-
-func (s *stateStack) push(state int) {
- s.items = append(s.items, state)
-}
-
-func (s *stateStack) pushExploratorily(state int) {
- s.itemsExp = append(s.itemsExp, state)
-}
-
-func (s *stateStack) pop(n int) {
- s.items = s.items[:len(s.items)-n]
-}
-
-func (s *stateStack) popExploratorily(n int) {
- s.itemsExp = s.itemsExp[:len(s.itemsExp)-n]
-}
diff --git a/driver/parser/parser_test.go b/driver/parser/parser_test.go
deleted file mode 100644
index cdfd922..0000000
--- a/driver/parser/parser_test.go
+++ /dev/null
@@ -1,833 +0,0 @@
-package parser
-
-import (
- "fmt"
- "strings"
- "testing"
-
- "grammar"
- "spec/grammar/parser"
-)
-
-func termNode(kind string, text string, children ...*Node) *Node {
- return &Node{
- Type: NodeTypeTerminal,
- KindName: kind,
- Text: text,
- Children: children,
- }
-}
-
-func errorNode() *Node {
- return &Node{
- Type: NodeTypeError,
- KindName: "error",
- }
-}
-
-func nonTermNode(kind string, children ...*Node) *Node {
- return &Node{
- Type: NodeTypeNonTerminal,
- KindName: kind,
- Children: children,
- }
-}
-
-func TestParser_Parse(t *testing.T) {
- tests := []struct {
- specSrc string
- src string
- synErr bool
- cst *Node
- ast *Node
- }{
- {
- specSrc: `
-#name test;
-
-expr
- : expr add term
- | term
- ;
-term
- : term mul factor
- | factor
- ;
-factor
- : l_paren expr r_paren
- | id
- ;
-
-add
- : '+';
-mul
- : '*';
-l_paren
- : '(';
-r_paren
- : ')';
-id
- : "[A-Za-z_][0-9A-Za-z_]*";
-`,
- src: `(a+(b+c))*d+e`,
- cst: nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("term",
- nonTermNode("term",
- nonTermNode("factor",
- termNode("l_paren", "("),
- nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("term",
- nonTermNode("factor",
- termNode("id", "a"),
- ),
- ),
- ),
- termNode("add", "+"),
- nonTermNode("term",
- nonTermNode("factor",
- termNode("l_paren", "("),
- nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("term",
- nonTermNode("factor",
- termNode("id", "b"),
- ),
- ),
- ),
- termNode("add", "+"),
- nonTermNode("term",
- nonTermNode("factor",
- termNode("id", "c"),
- ),
- ),
- ),
- termNode("r_paren", ")"),
- ),
- ),
- ),
- termNode("r_paren", ")"),
- ),
- ),
- termNode("mul", "*"),
- nonTermNode("factor",
- termNode("id", "d"),
- ),
- ),
- ),
- termNode("add", "+"),
- nonTermNode("term",
- nonTermNode("factor",
- termNode("id", "e"),
- ),
- ),
- ),
- },
- // Fragments (\f{}), code point expressions (\u{}), and character property expressions (\p{}) are
- // not allowed in string literals.
- {
- specSrc: `
-#name test;
-
-s
- : a b c
- ;
-
-a
- : '\f{foo}';
-b
- : '\u{0000}';
-c
- : '\p{gc=Letter}';
-`,
- src: `\f{foo}\u{0000}\p{gc=Letter}`,
- cst: nonTermNode("s",
- termNode("a", `\f{foo}`),
- termNode("b", `\u{0000}`),
- termNode("c", `\p{gc=Letter}`),
- ),
- },
- // The driver can reduce productions that have the empty alternative and can generate a CST (and AST) node.
- {
- specSrc: `
-#name test;
-
-s
- : foo bar
- ;
-foo
- :
- ;
-bar
- : bar_text
- |
- ;
-bar_text: "bar";
-`,
- src: ``,
- cst: nonTermNode("s",
- nonTermNode("foo"),
- nonTermNode("bar"),
- ),
- },
- // The driver can reduce productions that have the empty alternative and can generate a CST (and AST) node.
- {
- specSrc: `
-#name test;
-
-s
- : foo bar
- ;
-foo
- :
- ;
-bar
- : bar_text
- |
- ;
-
-bar_text
- : "bar";
-`,
- src: `bar`,
- cst: nonTermNode("s",
- nonTermNode("foo"),
- nonTermNode("bar",
- termNode("bar_text", "bar"),
- ),
- ),
- },
- // A production can have multiple alternative productions.
- {
- specSrc: `
-#name test;
-
-#prec (
- #assign $uminus
- #left mul div
- #left add sub
-);
-
-expr
- : expr add expr
- | expr sub expr
- | expr mul expr
- | expr div expr
- | int
- | sub int #prec $uminus // This 'sub' means the unary minus symbol.
- ;
-
-int
- : "0|[1-9][0-9]*";
-add
- : '+';
-sub
- : '-';
-mul
- : '*';
-div
- : '/';
-`,
- src: `-1*-2+3-4/5`,
- ast: nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("sub", "-"),
- termNode("int", "1"),
- ),
- termNode("mul", "*"),
- nonTermNode("expr",
- termNode("sub", "-"),
- termNode("int", "2"),
- ),
- ),
- termNode("add", "+"),
- nonTermNode("expr",
- termNode("int", "3"),
- ),
- ),
- termNode("sub", "-"),
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("int", "4"),
- ),
- termNode("div", "/"),
- nonTermNode("expr",
- termNode("int", "5"),
- ),
- ),
- ),
- },
- // A lexical production can have multiple production directives.
- {
- specSrc: `
-#name test;
-
-s
- : push_a push_b pop pop
- ;
-
-push_a #mode default #push a
- : '->a';
-push_b #mode a #push b
- : '->b';
-pop #mode a b #pop
- : '<-';
-`,
- src: `->a->b<-<-`,
- ast: nonTermNode("s",
- termNode("push_a", "->a"),
- termNode("push_b", "->b"),
- termNode("pop", "<-"),
- termNode("pop", "<-"),
- ),
- },
- {
- specSrc: `
-#name test;
-
-mode_tran_seq
- : mode_tran_seq mode_tran
- | mode_tran
- ;
-mode_tran
- : push_m1
- | push_m2
- | pop_m1
- | pop_m2
- ;
-
-push_m1 #push m1
- : "->";
-push_m2 #mode m1 #push m2
- : "-->";
-pop_m1 #mode m1 #pop
- : "<-";
-pop_m2 #mode m2 #pop
- : "<--";
-whitespace #mode default m1 m2 #skip
- : "\u{0020}+";
-`,
- src: ` -> --> <-- <- `,
- },
- {
- specSrc: `
-#name test;
-
-s
- : foo bar
- ;
-
-foo
- : "foo";
-bar #mode default
- : "bar";
-`,
- src: `foobar`,
- },
- // When #push and #pop are applied to the same symbol, #pop will run first, then #push.
- {
- specSrc: `
-#name test;
-
-s
- : foo bar baz
- ;
-
-foo #push m1
- : 'foo';
-bar #mode m1 #pop #push m2
- : 'bar';
-baz #mode m2
- : 'baz';
-`,
- src: `foobarbaz`,
- ast: nonTermNode("s",
- termNode("foo", "foo"),
- termNode("bar", "bar"),
- termNode("baz", "baz"),
- ),
- },
- // When #push and #pop are applied to the same symbol, #pop will run first, then #push, even if #push appears first
- // in a definition. That is, the order in which #push and #pop appear in grammar has nothing to do with the order in which
- // they are executed.
- {
- specSrc: `
-#name test;
-
-s
- : foo bar baz
- ;
-
-foo #push m1
- : 'foo';
-bar #mode m1 #push m2 #pop
- : 'bar';
-baz #mode m2
- : 'baz';
-`,
- src: `foobarbaz`,
- ast: nonTermNode("s",
- termNode("foo", "foo"),
- termNode("bar", "bar"),
- termNode("baz", "baz"),
- ),
- },
- // The parser can skips specified tokens.
- {
- specSrc: `
-#name test;
-
-s
- : foo bar
- ;
-
-foo
- : "foo";
-bar
- : "bar";
-white_space #skip
- : "[\u{0009}\u{0020}]+";
-`,
- src: `foo bar`,
- },
- // A grammar can contain fragments.
- {
- specSrc: `
-#name test;
-
-s
- : tagline
- ;
-tagline
- : "\f{words} IS OUT THERE.";
-fragment words
- : "[A-Za-z\u{0020}]+";
-`,
- src: `THE TRUTH IS OUT THERE.`,
- },
- // A grammar can contain ast actions.
- {
- specSrc: `
-#name test;
-
-list
- : l_bracket elems r_bracket #ast elems...
- ;
-elems
- : elems comma id #ast elems... id
- | id
- ;
-
-whitespace #skip
- : "\u{0020}+";
-l_bracket
- : '[';
-r_bracket
- : ']';
-comma
- : ',';
-id
- : "[A-Za-z]+";
-`,
- src: `[Byers, Frohike, Langly]`,
- cst: nonTermNode("list",
- termNode("x_1", "["),
- nonTermNode("elems",
- nonTermNode("elems",
- nonTermNode("elems",
- termNode("id", "Byers"),
- ),
- termNode("x_3", ","),
- termNode("id", "Frohike"),
- ),
- termNode("x_3", ","),
- termNode("id", "Langly"),
- ),
- termNode("x_2", "]"),
- ),
- ast: nonTermNode("list",
- termNode("id", "Byers"),
- termNode("id", "Frohike"),
- termNode("id", "Langly"),
- ),
- },
- // The '...' operator can expand child nodes.
- {
- specSrc: `
-#name test;
-
-s
- : a #ast a...
- ;
-a
- : a comma foo #ast a... foo
- | foo
- ;
-
-comma
- : ',';
-foo
- : 'foo';
-`,
- src: `foo,foo,foo`,
- ast: nonTermNode("s",
- termNode("foo", "foo"),
- termNode("foo", "foo"),
- termNode("foo", "foo"),
- ),
- },
- // The '...' operator also can applied to an element having no children.
- {
- specSrc: `
-#name test;
-
-s
- : a semi_colon #ast a...
- ;
-a
- :
- ;
-
-semi_colon
- : ';';
-`,
- src: `;`,
- ast: nonTermNode("s"),
- },
- // A label can be a parameter of #ast directive.
- {
- specSrc: `
-#name test;
-
-#prec (
- #left add sub
-);
-
-expr
- : expr@lhs add expr@rhs #ast add lhs rhs
- | expr@lhs sub expr@rhs #ast sub lhs rhs
- | num
- ;
-
-add
- : '+';
-sub
- : '-';
-num
- : "0|[1-9][0-9]*";
-`,
- src: `1+2-3`,
- ast: nonTermNode("expr",
- termNode("sub", "-"),
- nonTermNode("expr",
- termNode("add", "+"),
- nonTermNode("expr",
- termNode("num", "1"),
- ),
- nonTermNode("expr",
- termNode("num", "2"),
- ),
- ),
- nonTermNode("expr",
- termNode("num", "3"),
- ),
- ),
- },
- // An AST can contain a symbol name, even if the symbol has a label. That is, unused labels are allowed.
- {
- specSrc: `
-#name test;
-
-s
- : foo@x semi_colon #ast foo
- ;
-
-semi_colon
- : ';';
-foo
- : 'foo';
-`,
- src: `foo;`,
- ast: nonTermNode("s",
- termNode("foo", "foo"),
- ),
- },
- // A production has the same precedence and associativity as the right-most terminal symbol.
- {
- specSrc: `
-#name test;
-
-#prec (
- #left add
-);
-
-expr
- : expr add expr // This alternative has the same precedence and associativiry as 'add'.
- | int
- ;
-
-ws #skip
- : "[\u{0009}\u{0020}]+";
-int
- : "0|[1-9][0-9]*";
-add
- : '+';
-`,
- // This source is recognized as the following structure because the production `expr → expr add expr` has the same
- // precedence and associativity as the symbol 'add'.
- //
- // ((1+2)+3)
- //
- // If the symbol doesn't have the precedence and left associativity, the production also doesn't have the precedence
- // and associativity and this source will be recognized as the following structure.
- //
- // (1+(2+3))
- src: `1+2+3`,
- ast: nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("int", "1"),
- ),
- termNode("add", "+"),
- nonTermNode("expr",
- termNode("int", "2"),
- ),
- ),
- termNode("add", "+"),
- nonTermNode("expr",
- termNode("int", "3"),
- ),
- ),
- },
- // The 'prec' directive can set precedence of a production.
- {
- specSrc: `
-#name test;
-
-#prec (
- #assign $uminus
- #left mul div
- #left add sub
-);
-
-expr
- : expr add expr
- | expr sub expr
- | expr mul expr
- | expr div expr
- | int
- | sub int #prec $uminus // This 'sub' means a unary minus symbol.
- ;
-
-ws #skip
- : "[\u{0009}\u{0020}]+";
-int
- : "0|[1-9][0-9]*";
-add
- : '+';
-sub
- : '-';
-mul
- : '*';
-div
- : '/';
-`,
- // This source is recognized as the following structure because the production `expr → sub expr`
- // has the `#prec mul` directive and has the same precedence of the symbol `mul`.
- //
- // (((-1) * 20) / 5)
- //
- // If the production doesn't have the `#prec` directive, this source will be recognized as
- // the following structure.
- //
- // (- ((1 * 20) / 5))
- src: `-1*20/5`,
- cst: nonTermNode("expr",
- nonTermNode("expr",
- nonTermNode("expr",
- termNode("sub", "-"),
- termNode("int", "1"),
- ),
- termNode("mul", "*"),
- nonTermNode("expr",
- termNode("int", "20"),
- ),
- ),
- termNode("div", "/"),
- nonTermNode("expr",
- termNode("int", "5"),
- ),
- ),
- },
- // The grammar can contain the 'error' symbol.
- {
- specSrc: `
-#name test;
-
-s
- : id id id semi_colon
- | error semi_colon
- ;
-
-ws #skip
- : "[\u{0009}\u{0020}]+";
-semi_colon
- : ';';
-id
- : "[A-Za-z_]+";
-`,
- src: `foo bar baz ;`,
- },
- // The 'error' symbol can appear in an #ast directive.
- {
- specSrc: `
-#name test;
-
-s
- : foo semi_colon
- | error semi_colon #ast error
- ;
-
-semi_colon
- : ';';
-foo
- : 'foo';
-`,
- src: `bar;`,
- synErr: true,
- ast: nonTermNode("s",
- errorNode(),
- ),
- },
- // The 'error' symbol can have a label, and an #ast can reference it.
- {
- specSrc: `
-#name test;
-
-s
- : foo semi_colon
- | error@e semi_colon #ast e
- ;
-
-semi_colon
- : ';';
-foo
- : 'foo';
-`,
- src: `bar;`,
- synErr: true,
- ast: nonTermNode("s",
- errorNode(),
- ),
- },
- // The grammar can contain the 'recover' directive.
- {
- specSrc: `
-#name test;
-
-seq
- : seq elem
- | elem
- ;
-elem
- : id id id semi_colon
- | error semi_colon #recover
- ;
-
-ws #skip
- : "[\u{0009}\u{0020}]+";
-semi_colon
- : ';';
-id
- : "[A-Za-z_]+";
-`,
- src: `a b c ; d e f ;`,
- },
- // The same label can be used between different alternatives.
- {
- specSrc: `
-#name test;
-
-s
- : foo@x bar
- | foo@x
- ;
-
-foo: 'foo';
-bar: 'bar';
-`,
- src: `foo`,
- },
- }
-
- for i, tt := range tests {
- t.Run(fmt.Sprintf("#%v", i), func(t *testing.T) {
- ast, err := parser.Parse(strings.NewReader(tt.specSrc))
- if err != nil {
- t.Fatal(err)
- }
-
- b := grammar.GrammarBuilder{
- AST: ast,
- }
- cg, _, err := b.Build()
- if err != nil {
- t.Fatal(err)
- }
-
- toks, err := NewTokenStream(cg, strings.NewReader(tt.src))
- if err != nil {
- t.Fatal(err)
- }
-
- gram := NewGrammar(cg)
- tb := NewDefaultSyntaxTreeBuilder()
- var opt []ParserOption
- switch {
- case tt.ast != nil:
- opt = append(opt, SemanticAction(NewASTActionSet(gram, tb)))
- case tt.cst != nil:
- opt = append(opt, SemanticAction(NewCSTActionSet(gram, tb)))
- }
- p, err := NewParser(toks, gram, opt...)
- if err != nil {
- t.Fatal(err)
- }
-
- err = p.Parse()
- if err != nil {
- t.Fatal(err)
- }
-
- if !tt.synErr && len(p.SyntaxErrors()) > 0 {
- for _, synErr := range p.SyntaxErrors() {
- t.Fatalf("unexpected syntax errors occurred: %v", synErr)
- }
- }
-
- switch {
- case tt.ast != nil:
- testTree(t, tb.Tree(), tt.ast)
- case tt.cst != nil:
- testTree(t, tb.Tree(), tt.cst)
- }
- })
- }
-}
-
-func testTree(t *testing.T, node, expected *Node) {
- t.Helper()
-
- if node.Type != expected.Type || node.KindName != expected.KindName || node.Text != expected.Text {
- t.Fatalf("unexpected node; want: %+v, got: %+v", expected, node)
- }
- if len(node.Children) != len(expected.Children) {
- t.Fatalf("unexpected children; want: %v, got: %v", len(expected.Children), len(node.Children))
- }
- for i, c := range node.Children {
- testTree(t, c, expected.Children[i])
- }
-}
diff --git a/driver/parser/semantic_action.go b/driver/parser/semantic_action.go
deleted file mode 100644
index 6bb78cf..0000000
--- a/driver/parser/semantic_action.go
+++ /dev/null
@@ -1,371 +0,0 @@
-package parser
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "strconv"
-)
-
-// SemanticActionSet is a set of semantic actions a parser calls.
-type SemanticActionSet interface {
- // Shift runs when the parser shifts a symbol onto a state stack. `tok` is a token corresponding to the symbol.
- // When the parser recovered from an error state by shifting the token, `recovered` is true.
- Shift(tok VToken, recovered bool)
-
- // Reduce runs when the parser reduces an RHS of a production to its LHS. `prodNum` is a number of the production.
- // When the parser recovered from an error state by reducing the production, `recovered` is true.
- Reduce(prodNum int, recovered bool)
-
- // Accept runs when the parser accepts an input.
- Accept()
-
- // TrapAndShiftError runs when the parser traps a syntax error and shifts a error symbol onto the state stack.
- // `cause` is a token that caused a syntax error. `popped` is the number of frames that the parser discards
- // from the state stack.
- // Unlike `Shift` function, this function doesn't take a token to be shifted as an argument because a token
- // corresponding to the error symbol doesn't exist.
- TrapAndShiftError(cause VToken, popped int)
-
- // MissError runs when the parser fails to trap a syntax error. `cause` is a token that caused a syntax error.
- MissError(cause VToken)
-}
-
-var _ SemanticActionSet = &SyntaxTreeActionSet{}
-
-// SyntaxTreeNode is a node of a syntax tree. A node type used in SyntaxTreeActionSet must implement SyntaxTreeNode interface.
-type SyntaxTreeNode interface {
- // ChildCount returns a child count of a node. A parser calls this method to know the child count to be expanded by an `#ast`
- // directive with `...` operator.
- ChildCount() int
-
- // ExpandChildren returns children of a node. A parser calls this method to fetch the children to be expanded by an `#ast`
- // directive with `...` operator.
- ExpandChildren() []SyntaxTreeNode
-}
-
-var _ SyntaxTreeNode = &Node{}
-
-// SyntaxTreeBuilder allows you to construct a syntax tree containing arbitrary user-defined node types.
-// The parser uses SyntaxTreeBuilder interface as a part of semantic actions via SyntaxTreeActionSet interface.
-type SyntaxTreeBuilder interface {
- Shift(kindName string, tok VToken) SyntaxTreeNode
- ShiftError(kindName string) SyntaxTreeNode
- Reduce(kindName string, children []SyntaxTreeNode) SyntaxTreeNode
- Accept(f SyntaxTreeNode)
-}
-
-var _ SyntaxTreeBuilder = &DefaultSyntaxTreeBuilder{}
-
-// DefaultSyntaxTreeBuilder is a implementation of SyntaxTreeBuilder.
-type DefaultSyntaxTreeBuilder struct {
- tree *Node
-}
-
-// NewDefaultSyntaxTreeBuilder returns a new DefaultSyntaxTreeBuilder.
-func NewDefaultSyntaxTreeBuilder() *DefaultSyntaxTreeBuilder {
- return &DefaultSyntaxTreeBuilder{}
-}
-
-// Shift is a implementation of SyntaxTreeBuilder.Shift.
-func (b *DefaultSyntaxTreeBuilder) Shift(kindName string, tok VToken) SyntaxTreeNode {
- bytePos, byteLen := tok.BytePosition()
- row, col := tok.Position()
- return &Node{
- Type: NodeTypeTerminal,
- KindName: kindName,
- Text: string(tok.Lexeme()),
- BytePos: bytePos,
- ByteLen: byteLen,
- Row: row,
- Col: col,
- }
-}
-
-// ShiftError is a implementation of SyntaxTreeBuilder.ShiftError.
-func (b *DefaultSyntaxTreeBuilder) ShiftError(kindName string) SyntaxTreeNode {
- return &Node{
- Type: NodeTypeError,
- KindName: kindName,
- }
-}
-
-// Reduce is a implementation of SyntaxTreeBuilder.Reduce.
-func (b *DefaultSyntaxTreeBuilder) Reduce(kindName string, children []SyntaxTreeNode) SyntaxTreeNode {
- cNodes := make([]*Node, len(children))
- for i, c := range children {
- cNodes[i] = c.(*Node)
- }
- return &Node{
- Type: NodeTypeNonTerminal,
- KindName: kindName,
- Children: cNodes,
- }
-}
-
-// Accept is a implementation of SyntaxTreeBuilder.Accept.
-func (b *DefaultSyntaxTreeBuilder) Accept(f SyntaxTreeNode) {
- b.tree = f.(*Node)
-}
-
-// Tree returns a syntax tree when the parser has accepted an input. If a syntax error occurs, the return value is nil.
-func (b *DefaultSyntaxTreeBuilder) Tree() *Node {
- return b.tree
-}
-
-// SyntaxTreeActionSet is a implementation of SemanticActionSet interface and constructs a syntax tree.
-type SyntaxTreeActionSet struct {
- gram Grammar
- builder SyntaxTreeBuilder
- semStack *semanticStack
- disableASTAction bool
-}
-
-// NewASTActionSet returns a new SyntaxTreeActionSet that constructs an AST (Abstract Syntax Tree).
-// When grammar `gram` contains `#ast` directives, the new SyntaxTreeActionSet this function returns interprets them.
-func NewASTActionSet(gram Grammar, builder SyntaxTreeBuilder) *SyntaxTreeActionSet {
- return &SyntaxTreeActionSet{
- gram: gram,
- builder: builder,
- semStack: newSemanticStack(),
- }
-}
-
-// NewCSTTActionSet returns a new SyntaxTreeActionSet that constructs a CST (Concrete Syntax Tree).
-// Even if grammar `gram` contains `#ast` directives, the new SyntaxTreeActionSet this function returns ignores them.
-func NewCSTActionSet(gram Grammar, builder SyntaxTreeBuilder) *SyntaxTreeActionSet {
- return &SyntaxTreeActionSet{
- gram: gram,
- builder: builder,
- semStack: newSemanticStack(),
- disableASTAction: true,
- }
-}
-
-// Shift is a implementation of SemanticActionSet.Shift method.
-func (a *SyntaxTreeActionSet) Shift(tok VToken, recovered bool) {
- term := a.tokenToTerminal(tok)
- a.semStack.push(a.builder.Shift(a.gram.Terminal(term), tok))
-}
-
-// Reduce is a implementation of SemanticActionSet.Reduce method.
-func (a *SyntaxTreeActionSet) Reduce(prodNum int, recovered bool) {
- lhs := a.gram.LHS(prodNum)
-
- // When an alternative is empty, `n` will be 0, and `handle` will be empty slice.
- n := a.gram.AlternativeSymbolCount(prodNum)
- handle := a.semStack.pop(n)
-
- var astAct []int
- if !a.disableASTAction {
- astAct = a.gram.ASTAction(prodNum)
- }
- var children []SyntaxTreeNode
- if astAct != nil {
- // Count the number of children in advance to avoid frequent growth in a slice for children.
- {
- l := 0
- for _, e := range astAct {
- if e > 0 {
- l++
- } else {
- offset := e*-1 - 1
- l += handle[offset].ChildCount()
- }
- }
-
- children = make([]SyntaxTreeNode, l)
- }
-
- p := 0
- for _, e := range astAct {
- if e > 0 {
- offset := e - 1
- children[p] = handle[offset]
- p++
- } else {
- offset := e*-1 - 1
- for _, c := range handle[offset].ExpandChildren() {
- children[p] = c
- p++
- }
- }
- }
- } else {
- // If an alternative has no AST action, a driver generates
- // a node with the same structure as a CST.
- children = handle
- }
-
- a.semStack.push(a.builder.Reduce(a.gram.NonTerminal(lhs), children))
-}
-
-// Accept is a implementation of SemanticActionSet.Accept method.
-func (a *SyntaxTreeActionSet) Accept() {
- top := a.semStack.pop(1)
- a.builder.Accept(top[0])
-}
-
-// TrapAndShiftError is a implementation of SemanticActionSet.TrapAndShiftError method.
-func (a *SyntaxTreeActionSet) TrapAndShiftError(cause VToken, popped int) {
- a.semStack.pop(popped)
- a.semStack.push(a.builder.ShiftError(a.gram.Terminal(a.gram.Error())))
-}
-
-// MissError is a implementation of SemanticActionSet.MissError method.
-func (a *SyntaxTreeActionSet) MissError(cause VToken) {
-}
-
-func (a *SyntaxTreeActionSet) tokenToTerminal(tok VToken) int {
- if tok.EOF() {
- return a.gram.EOF()
- }
-
- return tok.TerminalID()
-}
-
-type semanticStack struct {
- frames []SyntaxTreeNode
-}
-
-func newSemanticStack() *semanticStack {
- return &semanticStack{
- frames: make([]SyntaxTreeNode, 0, 100),
- }
-}
-
-func (s *semanticStack) push(f SyntaxTreeNode) {
- s.frames = append(s.frames, f)
-}
-
-func (s *semanticStack) pop(n int) []SyntaxTreeNode {
- fs := s.frames[len(s.frames)-n:]
- s.frames = s.frames[:len(s.frames)-n]
-
- return fs
-}
-
-type NodeType int
-
-const (
- NodeTypeError = 0
- NodeTypeTerminal = 1
- NodeTypeNonTerminal = 2
-)
-
-// Node is a implementation of SyntaxTreeNode interface.
-type Node struct {
- Type NodeType
- KindName string
- Text string
- BytePos int
- ByteLen int
- Row int
- Col int
- Children []*Node
-}
-
-func (n *Node) MarshalJSON() ([]byte, error) {
- switch n.Type {
- case NodeTypeError:
- return json.Marshal(struct {
- Type NodeType `json:"type"`
- KindName string `json:"kind_name"`
- }{
- Type: n.Type,
- KindName: n.KindName,
- })
- case NodeTypeTerminal:
- if n.KindName == "" {
- return json.Marshal(struct {
- Type NodeType `json:"type"`
- Text string `json:"text"`
- Row int `json:"row"`
- Col int `json:"col"`
- }{
- Type: n.Type,
- Text: n.Text,
- Row: n.Row,
- Col: n.Col,
- })
- }
- return json.Marshal(struct {
- Type NodeType `json:"type"`
- KindName string `json:"kind_name"`
- Text string `json:"text"`
- Row int `json:"row"`
- Col int `json:"col"`
- }{
- Type: n.Type,
- KindName: n.KindName,
- Text: n.Text,
- Row: n.Row,
- Col: n.Col,
- })
- case NodeTypeNonTerminal:
- return json.Marshal(struct {
- Type NodeType `json:"type"`
- KindName string `json:"kind_name"`
- Children []*Node `json:"children"`
- }{
- Type: n.Type,
- KindName: n.KindName,
- Children: n.Children,
- })
- default:
- return nil, fmt.Errorf("invalid node type: %v", n.Type)
- }
-}
-
-// ChildCount is a implementation of SyntaxTreeNode.ChildCount.
-func (n *Node) ChildCount() int {
- return len(n.Children)
-}
-
-// ExpandChildren is a implementation of SyntaxTreeNode.ExpandChildren.
-func (n *Node) ExpandChildren() []SyntaxTreeNode {
- fs := make([]SyntaxTreeNode, len(n.Children))
- for i, n := range n.Children {
- fs[i] = n
- }
- return fs
-}
-
-// PrintTree prints a syntax tree whose root is `node`.
-func PrintTree(w io.Writer, node *Node) {
- printTree(w, node, "", "")
-}
-
-func printTree(w io.Writer, node *Node, ruledLine string, childRuledLinePrefix string) {
- if node == nil {
- return
- }
-
- switch node.Type {
- case NodeTypeError:
- fmt.Fprintf(w, "%v%v\n", ruledLine, node.KindName)
- case NodeTypeTerminal:
- fmt.Fprintf(w, "%v%v %v\n", ruledLine, node.KindName, strconv.Quote(node.Text))
- case NodeTypeNonTerminal:
- fmt.Fprintf(w, "%v%v\n", ruledLine, node.KindName)
-
- num := len(node.Children)
- for i, child := range node.Children {
- var line string
- if num > 1 && i < num-1 {
- line = "ā”œā”€ "
- } else {
- line = "└─ "
- }
-
- var prefix string
- if i >= num-1 {
- prefix = " "
- } else {
- prefix = "│ "
- }
-
- printTree(w, child, childRuledLinePrefix+line, childRuledLinePrefix+prefix)
- }
- }
-}
diff --git a/driver/parser/semantic_action_test.go b/driver/parser/semantic_action_test.go
deleted file mode 100644
index 8c224e2..0000000
--- a/driver/parser/semantic_action_test.go
+++ /dev/null
@@ -1,227 +0,0 @@
-package parser
-
-import (
- "fmt"
- "strings"
- "testing"
-
- "grammar"
- spec "spec/grammar"
- "spec/grammar/parser"
-)
-
-type testSemAct struct {
- gram *spec.CompiledGrammar
- actLog []string
-}
-
-func (a *testSemAct) Shift(tok VToken, recovered bool) {
- t := a.gram.Syntactic.Terminals[tok.TerminalID()]
- if recovered {
- a.actLog = append(a.actLog, fmt.Sprintf("shift/%v/recovered", t))
- } else {
- a.actLog = append(a.actLog, fmt.Sprintf("shift/%v", t))
- }
-}
-
-func (a *testSemAct) Reduce(prodNum int, recovered bool) {
- lhsSym := a.gram.Syntactic.LHSSymbols[prodNum]
- lhsText := a.gram.Syntactic.NonTerminals[lhsSym]
- if recovered {
- a.actLog = append(a.actLog, fmt.Sprintf("reduce/%v/recovered", lhsText))
- } else {
- a.actLog = append(a.actLog, fmt.Sprintf("reduce/%v", lhsText))
- }
-}
-
-func (a *testSemAct) Accept() {
- a.actLog = append(a.actLog, "accept")
-}
-
-func (a *testSemAct) TrapAndShiftError(cause VToken, popped int) {
- a.actLog = append(a.actLog, fmt.Sprintf("trap/%v/shift/error", popped))
-}
-
-func (a *testSemAct) MissError(cause VToken) {
- a.actLog = append(a.actLog, "miss")
-}
-
-func TestParserWithSemanticAction(t *testing.T) {
- specSrcWithErrorProd := `
-#name test;
-
-seq
- : seq elem semicolon
- | elem semicolon
- | error star star semicolon
- | error semicolon #recover
- ;
-elem
- : char char char
- ;
-
-ws #skip
- : "[\u{0009}\u{0020}]+";
-semicolon
- : ';';
-star
- : '*';
-char
- : "[a-z]";
-`
-
- specSrcWithoutErrorProd := `
-#name test;
-
-seq
- : seq elem semicolon
- | elem semicolon
- ;
-elem
- : char char char
- ;
-
-ws #skip
- : "[\u{0009}\u{0020}]+";
-semicolon
- : ';';
-char
- : "[a-z]";
-`
-
- tests := []struct {
- caption string
- specSrc string
- src string
- actLog []string
- }{
- {
- caption: "when an input contains no syntax error, the driver calls `Shift`, `Reduce`, and `Accept`.",
- specSrc: specSrcWithErrorProd,
- src: `a b c; d e f;`,
- actLog: []string{
- "shift/char",
- "shift/char",
- "shift/char",
- "reduce/elem",
- "shift/semicolon",
- "reduce/seq",
-
- "shift/char",
- "shift/char",
- "shift/char",
- "reduce/elem",
- "shift/semicolon",
- "reduce/seq",
-
- "accept",
- },
- },
- {
- caption: "when a grammar has `error` symbol, the driver calls `TrapAndShiftError`.",
- specSrc: specSrcWithErrorProd,
- src: `a; b !; c d !; e ! * *; h i j;`,
- actLog: []string{
- "shift/char",
- "trap/1/shift/error",
- "shift/semicolon",
- "reduce/seq/recovered",
-
- "shift/char",
- "trap/2/shift/error",
- "shift/semicolon",
- "reduce/seq/recovered",
-
- "shift/char",
- "shift/char",
- "trap/3/shift/error",
- "shift/semicolon",
- "reduce/seq/recovered",
-
- "shift/char",
- "trap/2/shift/error",
- "shift/star",
- "shift/star",
- // When the driver shifts three times, it recovers from an error.
- "shift/semicolon/recovered",
- "reduce/seq",
-
- "shift/char",
- "shift/char",
- "shift/char",
- "reduce/elem",
- "shift/semicolon",
- "reduce/seq",
-
- // Even if the input contains syntax errors, the driver calls `Accept` when the input is accepted
- // according to the error production.
- "accept",
- },
- },
- {
- caption: "when the input doesn't meet the error production, the driver calls `MissError`.",
- specSrc: specSrcWithErrorProd,
- src: `a !`,
- actLog: []string{
- "shift/char",
- "trap/1/shift/error",
-
- "miss",
- },
- },
- {
- caption: "when a syntax error isn't trapped, the driver calls `MissError`.",
- specSrc: specSrcWithoutErrorProd,
- src: `a !`,
- actLog: []string{
- "shift/char",
-
- "miss",
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.caption, func(t *testing.T) {
- ast, err := parser.Parse(strings.NewReader(tt.specSrc))
- if err != nil {
- t.Fatal(err)
- }
-
- b := grammar.GrammarBuilder{
- AST: ast,
- }
- gram, _, err := b.Build()
- if err != nil {
- t.Fatal(err)
- }
-
- toks, err := NewTokenStream(gram, strings.NewReader(tt.src))
- if err != nil {
- t.Fatal(err)
- }
-
- semAct := &testSemAct{
- gram: gram,
- }
- p, err := NewParser(toks, NewGrammar(gram), SemanticAction(semAct))
- if err != nil {
- t.Fatal(err)
- }
-
- err = p.Parse()
- if err != nil {
- t.Fatal(err)
- }
-
- if len(semAct.actLog) != len(tt.actLog) {
- t.Fatalf("unexpected action log; want: %+v, got: %+v", tt.actLog, semAct.actLog)
- }
-
- for i, e := range tt.actLog {
- if semAct.actLog[i] != e {
- t.Fatalf("unexpected action log; want: %+v, got: %+v", tt.actLog, semAct.actLog)
- }
- }
- })
- }
-}
diff --git a/driver/parser/spec.go b/driver/parser/spec.go
deleted file mode 100644
index b288f5f..0000000
--- a/driver/parser/spec.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package parser
-
-import spec "spec/grammar"
-
-type grammarImpl struct {
- g *spec.CompiledGrammar
-}
-
-func NewGrammar(g *spec.CompiledGrammar) *grammarImpl {
- return &grammarImpl{
- g: g,
- }
-}
-
-func (g *grammarImpl) InitialState() int {
- return g.g.Syntactic.InitialState
-}
-
-func (g *grammarImpl) StartProduction() int {
- return g.g.Syntactic.StartProduction
-}
-
-func (g *grammarImpl) RecoverProduction(prod int) bool {
- return g.g.Syntactic.RecoverProductions[prod] != 0
-}
-
-func (g *grammarImpl) Action(state int, terminal int) int {
- return g.g.Syntactic.Action[state*g.g.Syntactic.TerminalCount+terminal]
-}
-
-func (g *grammarImpl) GoTo(state int, lhs int) int {
- return g.g.Syntactic.GoTo[state*g.g.Syntactic.NonTerminalCount+lhs]
-}
-
-func (g *grammarImpl) AlternativeSymbolCount(prod int) int {
- return g.g.Syntactic.AlternativeSymbolCounts[prod]
-}
-
-func (g *grammarImpl) TerminalCount() int {
- return g.g.Syntactic.TerminalCount
-}
-
-func (g *grammarImpl) SkipTerminal(terminal int) bool {
- return g.g.Syntactic.TerminalSkip[terminal] == 1
-}
-
-func (g *grammarImpl) ErrorTrapperState(state int) bool {
- return g.g.Syntactic.ErrorTrapperStates[state] != 0
-}
-
-func (g *grammarImpl) NonTerminal(nonTerminal int) string {
- return g.g.Syntactic.NonTerminals[nonTerminal]
-}
-
-func (g *grammarImpl) LHS(prod int) int {
- return g.g.Syntactic.LHSSymbols[prod]
-}
-
-func (g *grammarImpl) EOF() int {
- return g.g.Syntactic.EOFSymbol
-}
-
-func (g *grammarImpl) Error() int {
- return g.g.Syntactic.ErrorSymbol
-}
-
-func (g *grammarImpl) Terminal(terminal int) string {
- return g.g.Syntactic.Terminals[terminal]
-}
-
-func (g *grammarImpl) ASTAction(prod int) []int {
- return g.g.ASTAction.Entries[prod]
-}
diff --git a/driver/parser/syntax_error_test.go b/driver/parser/syntax_error_test.go
deleted file mode 100644
index c508be6..0000000
--- a/driver/parser/syntax_error_test.go
+++ /dev/null
@@ -1,306 +0,0 @@
-package parser
-
-import (
- "fmt"
- "sort"
- "strings"
- "testing"
-
- "grammar"
- "spec/grammar/parser"
-)
-
-func TestParserWithSyntaxErrors(t *testing.T) {
- tests := []struct {
- caption string
- specSrc string
- src string
- synErrCount int
- }{
- {
- caption: "the parser can report a syntax error",
- specSrc: `
-#name test;
-
-s
- : foo
- ;
-
-foo
- : 'foo';
-`,
- src: `bar`,
- synErrCount: 1,
- },
- {
- caption: "when the parser reduced a production having the reduce directive, the parser will recover from an error state",
- specSrc: `
-#name test;
-
-seq
- : seq elem semi_colon
- | elem semi_colon
- | error semi_colon #recover
- ;
-elem
- : a b c
- ;
-
-ws #skip
- : "[\u{0009}\u{0020}]+";
-semi_colon
- : ';';
-a
- : 'a';
-b
- : 'b';
-c
- : 'c';
-`,
- src: `!; a!; ab!;`,
- synErrCount: 3,
- },
- {
- caption: "After the parser shifts the error symbol, symbols are ignored until a symbol the parser can perform shift appears",
- specSrc: `
-#name test;
-
-seq
- : seq elem semi_colon
- | elem semi_colon
- | error semi_colon #recover
- ;
-elem
- : a b c
- ;
-
-ws #skip
- : "[\u{0009}\u{0020}]+";
-semi_colon
- : ';';
-a
- : 'a';
-b
- : 'b';
-c
- : 'c';
-`,
- // After the parser trasits to the error state reading the first invalid symbol ('!'),
- // the second and third invalid symbols ('!') are ignored.
- src: `! ! !; a!; ab!;`,
- synErrCount: 3,
- },
- {
- caption: "when the parser performs shift three times, the parser recovers from the error state",
- specSrc: `
-#name test;
-
-seq
- : seq elem semi_colon
- | elem semi_colon
- | error star star semi_colon
- ;
-elem
- : a b c
- ;
-
-ws #skip
- : "[\u{0009}\u{0020}]+";
-semi_colon
- : ';';
-star
- : '*';
-a
- : 'a';
-b
- : 'b';
-c
- : 'c';
-`,
- src: `!**; a!**; ab!**; abc!`,
- synErrCount: 4,
- },
- }
- for i, tt := range tests {
- t.Run(fmt.Sprintf("#%v", i), func(t *testing.T) {
- ast, err := parser.Parse(strings.NewReader(tt.specSrc))
- if err != nil {
- t.Fatal(err)
- }
-
- b := grammar.GrammarBuilder{
- AST: ast,
- }
- gram, _, err := b.Build()
- if err != nil {
- t.Fatal(err)
- }
-
- toks, err := NewTokenStream(gram, strings.NewReader(tt.src))
- if err != nil {
- t.Fatal(err)
- }
-
- p, err := NewParser(toks, NewGrammar(gram))
- if err != nil {
- t.Fatal(err)
- }
-
- err = p.Parse()
- if err != nil {
- t.Fatal(err)
- }
-
- synErrs := p.SyntaxErrors()
- if len(synErrs) != tt.synErrCount {
- t.Fatalf("unexpected syntax error; want: %v error(s), got: %v error(s)", tt.synErrCount, len(synErrs))
- }
- })
- }
-}
-
-func TestParserWithSyntaxErrorAndExpectedLookahead(t *testing.T) {
- tests := []struct {
- caption string
- specSrc string
- src string
- cause string
- expected []string
- }{
- {
- caption: "the parser reports an expected lookahead symbol",
- specSrc: `
-#name test;
-
-s
- : foo
- ;
-
-foo
- : 'foo';
-`,
- src: `bar`,
- cause: `bar`,
- expected: []string{
- "foo",
- },
- },
- {
- caption: "the parser reports expected lookahead symbols",
- specSrc: `
-#name test;
-
-s
- : foo
- | bar
- ;
-
-foo
- : 'foo';
-bar
- : 'bar';
-`,
- src: `baz`,
- cause: `baz`,
- expected: []string{
- "foo",
- "bar",
- },
- },
- {
- caption: "the parser may report the EOF as an expected lookahead symbol",
- specSrc: `
-#name test;
-
-s
- : foo
- ;
-
-foo
- : 'foo';
-`,
- src: `foobar`,
- cause: `bar`,
- expected: []string{
- "<eof>",
- },
- },
- {
- caption: "the parser may report the EOF and others as expected lookahead symbols",
- specSrc: `
-#name test;
-
-s
- : foo
- |
- ;
-
-foo
- : 'foo';
-`,
- src: `bar`,
- cause: `bar`,
- expected: []string{
- "foo",
- "<eof>",
- },
- },
- }
- for i, tt := range tests {
- t.Run(fmt.Sprintf("#%v", i), func(t *testing.T) {
- ast, err := parser.Parse(strings.NewReader(tt.specSrc))
- if err != nil {
- t.Fatal(err)
- }
-
- b := grammar.GrammarBuilder{
- AST: ast,
- }
- gram, _, err := b.Build()
- if err != nil {
- t.Fatal(err)
- }
-
- toks, err := NewTokenStream(gram, strings.NewReader(tt.src))
- if err != nil {
- t.Fatal(err)
- }
-
- p, err := NewParser(toks, NewGrammar(gram))
- if err != nil {
- t.Fatal(err)
- }
-
- err = p.Parse()
- if err != nil {
- t.Fatal(err)
- }
-
- synErrs := p.SyntaxErrors()
- if synErrs == nil {
- t.Fatalf("expected one syntax error, but it didn't occur")
- }
- if len(synErrs) != 1 {
- t.Fatalf("too many syntax errors: %v errors", len(synErrs))
- }
- synErr := synErrs[0]
- if string(synErr.Token.Lexeme()) != tt.cause {
- t.Fatalf("unexpected lexeme: want: %v, got: %v", tt.cause, string(synErr.Token.Lexeme()))
- }
- if len(synErr.ExpectedTerminals) != len(tt.expected) {
- t.Fatalf("unexpected lookahead symbols: want: %v, got: %v", tt.expected, synErr.ExpectedTerminals)
- }
- sort.Slice(tt.expected, func(i, j int) bool {
- return tt.expected[i] < tt.expected[j]
- })
- sort.Slice(synErr.ExpectedTerminals, func(i, j int) bool {
- return synErr.ExpectedTerminals[i] < synErr.ExpectedTerminals[j]
- })
- for i, e := range tt.expected {
- if synErr.ExpectedTerminals[i] != e {
- t.Errorf("unexpected lookahead symbol: want: %v, got: %v", e, synErr.ExpectedTerminals[i])
- }
- }
- })
- }
-}
diff --git a/driver/parser/template.go b/driver/parser/template.go
deleted file mode 100644
index 3e25193..0000000
--- a/driver/parser/template.go
+++ /dev/null
@@ -1,535 +0,0 @@
-package parser
-
-import (
- "bytes"
- _ "embed"
- "fmt"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- goToken "go/token"
- "strconv"
- "strings"
- "text/template"
-
- spec "spec/grammar"
-)
-
-// go:embed parser.go
-var parserCoreSrc string
-
-// go:embed semantic_action.go
-var semActSrc string
-
-func GenParser(cgram *spec.CompiledGrammar, pkgName string) ([]byte, error) {
- var parserSrc string
- {
- fset := goToken.NewFileSet()
- f, err := parser.ParseFile(fset, "parser.go", parserCoreSrc, parser.ParseComments)
- if err != nil {
- return nil, err
- }
-
- var b strings.Builder
- err = format.Node(&b, fset, f)
- if err != nil {
- return nil, err
- }
-
- parserSrc = b.String()
- }
-
- var grammarSrc string
- {
- t, err := template.New("").Funcs(genGrammarTemplateFuncs(cgram)).Parse(grammarSrcTmplate)
- if err != nil {
- return nil, err
- }
-
- var b strings.Builder
- err = t.Execute(&b, map[string]interface{}{
- "initialState": cgram.Syntactic.InitialState,
- "startProduction": cgram.Syntactic.StartProduction,
- "terminalCount": cgram.Syntactic.TerminalCount,
- "nonTerminalCount": cgram.Syntactic.NonTerminalCount,
- "eofSymbol": cgram.Syntactic.EOFSymbol,
- "errorSymbol": cgram.Syntactic.ErrorSymbol,
- })
- if err != nil {
- return nil, err
- }
-
- grammarSrc = b.String()
- }
-
- var lexerSrc string
- {
- t, err := template.New("").Funcs(genLexerTemplateFuncs(cgram)).Parse(lexerSrcTmplate)
- if err != nil {
- return nil, err
- }
-
- var b strings.Builder
- err = t.Execute(&b, nil)
- if err != nil {
- return nil, err
- }
-
- lexerSrc = b.String()
- }
-
- var src string
- {
- tmpl := `// Code generated by vartan-go. DO NOT EDIT.
-{{ .parserSrc }}
-
-{{ .grammarSrc }}
-
-{{ .lexerSrc }}
-`
- t, err := template.New("").Parse(tmpl)
- if err != nil {
- return nil, err
- }
-
- var b strings.Builder
- err = t.Execute(&b, map[string]string{
- "parserSrc": parserSrc,
- "grammarSrc": grammarSrc,
- "lexerSrc": lexerSrc,
- })
- if err != nil {
- return nil, err
- }
-
- src = b.String()
- }
-
- fset := goToken.NewFileSet()
- f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
- if err != nil {
- return nil, err
- }
-
- f.Name = ast.NewIdent(pkgName)
-
- // Complete an import statement.
- for _, d := range f.Decls {
- gd, ok := d.(*ast.GenDecl)
- if !ok || gd.Tok != token.IMPORT {
- continue
- }
- gd.Specs = append(gd.Specs, &ast.ImportSpec{
- Path: &ast.BasicLit{
- Value: `"io"`,
- },
- })
- break
- }
-
- var b bytes.Buffer
- err = format.Node(&b, fset, f)
- if err != nil {
- return nil, err
- }
-
- return b.Bytes(), nil
-}
-
-const grammarSrcTmplate = `
-type grammarImpl struct {
- recoverProductions []int
- action []int
- goTo []int
- alternativeSymbolCounts []int
- errorTrapperStates []int
- nonTerminals []string
- lhsSymbols []int
- terminals []string
- terminalSkip []int
- astActions [][]int
-}
-
-func NewGrammar() *grammarImpl {
- return &grammarImpl{
- recoverProductions: {{ genRecoverProductions }},
- action: {{ genAction }},
- goTo: {{ genGoTo }},
- alternativeSymbolCounts: {{ genAlternativeSymbolCounts }},
- errorTrapperStates: {{ genErrorTrapperStates }},
- nonTerminals: {{ genNonTerminals }},
- lhsSymbols: {{ genLHSSymbols }},
- terminals: {{ genTerminals }},
- terminalSkip: {{ genTerminalSkip }},
- astActions: {{ genASTActions }},
- }
-}
-
-func (g *grammarImpl) InitialState() int {
- return {{ .initialState }}
-}
-
-func (g *grammarImpl) StartProduction() int {
- return {{ .startProduction }}
-}
-
-func (g *grammarImpl) RecoverProduction(prod int) bool {
- return g.recoverProductions[prod] != 0
-}
-
-func (g *grammarImpl) Action(state int, terminal int) int {
- return g.action[state*{{ .terminalCount }}+terminal]
-}
-
-func (g *grammarImpl) GoTo(state int, lhs int) int {
- return g.goTo[state*{{ .nonTerminalCount }}+lhs]
-}
-
-func (g *grammarImpl) AlternativeSymbolCount(prod int) int {
- return g.alternativeSymbolCounts[prod]
-}
-
-func (g *grammarImpl) TerminalCount() int {
- return {{ .terminalCount }}
-}
-
-func (g *grammarImpl) SkipTerminal(terminal int) bool {
- return g.terminalSkip[terminal] == 1
-}
-
-func (g *grammarImpl) ErrorTrapperState(state int) bool {
- return g.errorTrapperStates[state] != 0
-}
-
-func (g *grammarImpl) NonTerminal(nonTerminal int) string {
- return g.nonTerminals[nonTerminal]
-}
-
-func (g *grammarImpl) LHS(prod int) int {
- return g.lhsSymbols[prod]
-}
-
-func (g *grammarImpl) EOF() int {
- return {{ .eofSymbol }}
-}
-
-func (g *grammarImpl) Error() int {
- return {{ .errorSymbol }}
-}
-
-func (g *grammarImpl) Terminal(terminal int) string {
- return g.terminals[terminal]
-}
-
-func (g *grammarImpl) ASTAction(prod int) []int {
- return g.astActions[prod]
-}
-`
-
-func genGrammarTemplateFuncs(cgram *spec.CompiledGrammar) template.FuncMap {
- return template.FuncMap{
- "genRecoverProductions": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]int{\n")
- c := 1
- for _, v := range cgram.Syntactic.RecoverProductions {
- fmt.Fprintf(&b, "%v, ", v)
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genAction": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]int{\n")
- c := 1
- for _, v := range cgram.Syntactic.Action {
- fmt.Fprintf(&b, "%v, ", v)
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genGoTo": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]int{\n")
- c := 1
- for _, v := range cgram.Syntactic.GoTo {
- fmt.Fprintf(&b, "%v, ", v)
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genAlternativeSymbolCounts": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]int{\n")
- c := 1
- for _, v := range cgram.Syntactic.AlternativeSymbolCounts {
- fmt.Fprintf(&b, "%v, ", v)
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genErrorTrapperStates": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]int{\n")
- c := 1
- for _, v := range cgram.Syntactic.ErrorTrapperStates {
- fmt.Fprintf(&b, "%v, ", v)
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genNonTerminals": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]string{\n")
- for _, v := range cgram.Syntactic.NonTerminals {
- fmt.Fprintf(&b, "%v,\n", strconv.Quote(v))
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genLHSSymbols": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]int{\n")
- c := 1
- for _, v := range cgram.Syntactic.LHSSymbols {
- fmt.Fprintf(&b, "%v, ", v)
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genTerminals": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]string{\n")
- for _, v := range cgram.Syntactic.Terminals {
- fmt.Fprintf(&b, "%v,\n", strconv.Quote(v))
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genTerminalSkip": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]int{\n")
- c := 1
- for _, v := range cgram.Syntactic.TerminalSkip {
- fmt.Fprintf(&b, "%v, ", v)
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genASTActions": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]int{\n")
- for _, entries := range cgram.ASTAction.Entries {
- if len(entries) == 0 {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- fmt.Fprintf(&b, "{\n")
- c := 1
- for _, v := range entries {
- fmt.Fprintf(&b, "%v, ", v)
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- }
-}
-
-const lexerSrcTmplate = `
-type vToken struct {
- terminalID int
- tok *Token
-}
-
-func (t *vToken) TerminalID() int {
- return t.terminalID
-}
-
-func (t *vToken) Lexeme() []byte {
- return t.tok.Lexeme
-}
-
-func (t *vToken) EOF() bool {
- return t.tok.EOF
-}
-
-func (t *vToken) Invalid() bool {
- return t.tok.Invalid
-}
-
-func (t *vToken) BytePosition() (int, int) {
- return t.tok.BytePos, t.tok.ByteLen
-}
-
-func (t *vToken) Position() (int, int) {
- return t.tok.Row, t.tok.Col
-}
-
-var kindToTerminal = {{ genKindToTerminal }}
-
-type tokenStream struct {
- lex *Lexer
- kindToTerminal []int
-}
-
-func NewTokenStream(src io.Reader) (*tokenStream, error) {
- lex, err := NewLexer(NewLexSpec(), src)
- if err != nil {
- return nil, err
- }
-
- return &tokenStream{
- lex: lex,
- }, nil
-}
-
-func (t *tokenStream) Next() (VToken, error) {
- tok, err := t.lex.Next()
- if err != nil {
- return nil, err
- }
- return &vToken{
- terminalID: kindToTerminal[tok.KindID],
- tok: tok,
- }, nil
-}
-`
-
-func genLexerTemplateFuncs(cgram *spec.CompiledGrammar) template.FuncMap {
- return template.FuncMap{
- "genKindToTerminal": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]int{\n")
- c := 1
- for _, v := range cgram.Syntactic.KindToTerminal {
- fmt.Fprintf(&b, "%v, ", v)
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- }
-}
-
-func GenSemanticAction(pkgName string) ([]byte, error) {
- var src string
- {
- tmpl := `// Code generated by vartan-go. DO NOT EDIT.
-{{ .semActSrc }}
-`
- t, err := template.New("").Parse(tmpl)
- if err != nil {
- return nil, err
- }
-
- var b strings.Builder
- err = t.Execute(&b, map[string]string{
- "semActSrc": semActSrc,
- })
- if err != nil {
- return nil, err
- }
-
- src = b.String()
- }
-
- fset := goToken.NewFileSet()
- f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
- if err != nil {
- return nil, err
- }
-
- f.Name = ast.NewIdent(pkgName)
-
- var b bytes.Buffer
- err = format.Node(&b, fset, f)
- if err != nil {
- return nil, err
- }
-
- return b.Bytes(), nil
-}
diff --git a/driver/parser/token_stream.go b/driver/parser/token_stream.go
deleted file mode 100644
index b15e8bf..0000000
--- a/driver/parser/token_stream.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package parser
-
-import (
- "io"
-
- "driver/lexer"
- spec "spec/grammar"
-)
-
-type vToken struct {
- terminalID int
- tok *lexer.Token
-}
-
-func (t *vToken) TerminalID() int {
- return t.terminalID
-}
-
-func (t *vToken) Lexeme() []byte {
- return t.tok.Lexeme
-}
-
-func (t *vToken) EOF() bool {
- return t.tok.EOF
-}
-
-func (t *vToken) Invalid() bool {
- return t.tok.Invalid
-}
-
-func (t *vToken) BytePosition() (int, int) {
- return t.tok.BytePos, t.tok.ByteLen
-}
-
-func (t *vToken) Position() (int, int) {
- return t.tok.Row, t.tok.Col
-}
-
-type tokenStream struct {
- lex *lexer.Lexer
- kindToTerminal []int
-}
-
-func NewTokenStream(g *spec.CompiledGrammar, src io.Reader) (TokenStream, error) {
- lex, err := lexer.NewLexer(lexer.NewLexSpec(g.Lexical), src)
- if err != nil {
- return nil, err
- }
-
- return &tokenStream{
- lex: lex,
- kindToTerminal: g.Syntactic.KindToTerminal,
- }, nil
-}
-
-func (l *tokenStream) Next() (VToken, error) {
- tok, err := l.lex.Next()
- if err != nil {
- return nil, err
- }
- return &vToken{
- terminalID: l.kindToTerminal[tok.KindID],
- tok: tok,
- }, nil
-}