aboutsummaryrefslogtreecommitdiff
path: root/driver/lexer
diff options
context:
space:
mode:
Diffstat (limited to 'driver/lexer')
-rw-r--r--driver/lexer/lexer.go335
-rw-r--r--driver/lexer/lexer_test.go932
-rw-r--r--driver/lexer/spec.go71
-rw-r--r--driver/lexer/template.go760
4 files changed, 0 insertions, 2098 deletions
diff --git a/driver/lexer/lexer.go b/driver/lexer/lexer.go
deleted file mode 100644
index 3f9712e..0000000
--- a/driver/lexer/lexer.go
+++ /dev/null
@@ -1,335 +0,0 @@
-package lexer
-
-import (
- "fmt"
- "io"
-)
-
-type ModeID int
-
-func (id ModeID) Int() int {
- return int(id)
-}
-
-type StateID int
-
-func (id StateID) Int() int {
- return int(id)
-}
-
-type KindID int
-
-func (id KindID) Int() int {
- return int(id)
-}
-
-type ModeKindID int
-
-func (id ModeKindID) Int() int {
- return int(id)
-}
-
-type LexSpec interface {
- InitialMode() ModeID
- Pop(mode ModeID, modeKind ModeKindID) bool
- Push(mode ModeID, modeKind ModeKindID) (ModeID, bool)
- ModeName(mode ModeID) string
- InitialState(mode ModeID) StateID
- NextState(mode ModeID, state StateID, v int) (StateID, bool)
- Accept(mode ModeID, state StateID) (ModeKindID, bool)
- KindIDAndName(mode ModeID, modeKind ModeKindID) (KindID, string)
-}
-
-// Token representes a token.
-type Token struct {
- // ModeID is an ID of a lex mode.
- ModeID ModeID
-
- // KindID is an ID of a kind. This is unique among all modes.
- KindID KindID
-
- // ModeKindID is an ID of a lexical kind. This is unique only within a mode.
- // Note that you need to use KindID field if you want to identify a kind across all modes.
- ModeKindID ModeKindID
-
- // BytePos is a byte position where a token appears.
- BytePos int
-
- // ByteLen is a length of a token.
- ByteLen int
-
- // Row is a row number where a token appears.
- Row int
-
- // Col is a column number where a token appears.
- // Note that Col is counted in code points, not bytes.
- Col int
-
- // Lexeme is a byte sequence matched a pattern of a lexical specification.
- Lexeme []byte
-
- // When this field is true, it means the token is the EOF token.
- EOF bool
-
- // When this field is true, it means the token is an error token.
- Invalid bool
-}
-
-type LexerOption func(l *Lexer) error
-
-// DisableModeTransition disables the active mode transition. Thus, even if the lexical specification has the push and pop
-// operations, the lexer doesn't perform these operations. When the lexical specification has multiple modes, and this option is
-// enabled, you need to call the Lexer.Push and Lexer.Pop methods to perform the mode transition. You can use the Lexer.Mode method
-// to know the current lex mode.
-func DisableModeTransition() LexerOption {
- return func(l *Lexer) error {
- l.passiveModeTran = true
- return nil
- }
-}
-
-type lexerState struct {
- srcPtr int
- row int
- col int
-}
-
-type Lexer struct {
- spec LexSpec
- src []byte
- state lexerState
- lastAcceptedState lexerState
- tokBuf []*Token
- modeStack []ModeID
- passiveModeTran bool
-}
-
-// NewLexer returns a new lexer.
-func NewLexer(spec LexSpec, src io.Reader, opts ...LexerOption) (*Lexer, error) {
- b, err := io.ReadAll(src)
- if err != nil {
- return nil, err
- }
- l := &Lexer{
- spec: spec,
- src: b,
- state: lexerState{
- srcPtr: 0,
- row: 0,
- col: 0,
- },
- lastAcceptedState: lexerState{
- srcPtr: 0,
- row: 0,
- col: 0,
- },
- modeStack: []ModeID{
- spec.InitialMode(),
- },
- passiveModeTran: false,
- }
- for _, opt := range opts {
- err := opt(l)
- if err != nil {
- return nil, err
- }
- }
-
- return l, nil
-}
-
-// Next returns a next token.
-func (l *Lexer) Next() (*Token, error) {
- if len(l.tokBuf) > 0 {
- tok := l.tokBuf[0]
- l.tokBuf = l.tokBuf[1:]
- return tok, nil
- }
-
- tok, err := l.nextAndTransition()
- if err != nil {
- return nil, err
- }
- if !tok.Invalid {
- return tok, nil
- }
- errTok := tok
- for {
- tok, err = l.nextAndTransition()
- if err != nil {
- return nil, err
- }
- if !tok.Invalid {
- break
- }
- errTok.ByteLen += tok.ByteLen
- errTok.Lexeme = append(errTok.Lexeme, tok.Lexeme...)
- }
- l.tokBuf = append(l.tokBuf, tok)
-
- return errTok, nil
-}
-
-func (l *Lexer) nextAndTransition() (*Token, error) {
- tok, err := l.next()
- if err != nil {
- return nil, err
- }
- if tok.EOF || tok.Invalid {
- return tok, nil
- }
- if l.passiveModeTran {
- return tok, nil
- }
- mode := l.Mode()
- if l.spec.Pop(mode, tok.ModeKindID) {
- err := l.PopMode()
- if err != nil {
- return nil, err
- }
- }
- if mode, ok := l.spec.Push(mode, tok.ModeKindID); ok {
- l.PushMode(mode)
- }
- // The checking length of the mode stack must be at after pop and push operations because those operations can be performed
- // at the same time. When the mode stack has just one element and popped it, the mode stack will be temporarily emptied.
- // However, since a push operation may be performed immediately after it, the lexer allows the stack to be temporarily empty.
- if len(l.modeStack) == 0 {
- return nil, fmt.Errorf("a mode stack must have at least one element")
- }
- return tok, nil
-}
-
-func (l *Lexer) next() (*Token, error) {
- mode := l.Mode()
- state := l.spec.InitialState(mode)
- buf := []byte{}
- startPos := l.state.srcPtr
- row := l.state.row
- col := l.state.col
- var tok *Token
- for {
- v, eof := l.read()
- if eof {
- if tok != nil {
- l.revert()
- return tok, nil
- }
- // When `buf` has unaccepted data and reads the EOF, the lexer treats the buffered data as an invalid token.
- if len(buf) > 0 {
- return &Token{
- ModeID: mode,
- ModeKindID: 0,
- BytePos: startPos,
- ByteLen: l.state.srcPtr - startPos,
- Lexeme: buf,
- Row: row,
- Col: col,
- Invalid: true,
- }, nil
- }
- return &Token{
- ModeID: mode,
- ModeKindID: 0,
- BytePos: startPos,
- Row: row,
- Col: col,
- EOF: true,
- }, nil
- }
- buf = append(buf, v)
- nextState, ok := l.spec.NextState(mode, state, int(v))
- if !ok {
- if tok != nil {
- l.revert()
- return tok, nil
- }
- return &Token{
- ModeID: mode,
- ModeKindID: 0,
- BytePos: startPos,
- ByteLen: l.state.srcPtr - startPos,
- Lexeme: buf,
- Row: row,
- Col: col,
- Invalid: true,
- }, nil
- }
- state = nextState
- if modeKindID, ok := l.spec.Accept(mode, state); ok {
- kindID, _ := l.spec.KindIDAndName(mode, modeKindID)
- tok = &Token{
- ModeID: mode,
- KindID: kindID,
- ModeKindID: modeKindID,
- BytePos: startPos,
- ByteLen: l.state.srcPtr - startPos,
- Lexeme: buf,
- Row: row,
- Col: col,
- }
- l.accept()
- }
- }
-}
-
-// Mode returns the current lex mode.
-func (l *Lexer) Mode() ModeID {
- return l.modeStack[len(l.modeStack)-1]
-}
-
-// PushMode adds a lex mode onto the mode stack.
-func (l *Lexer) PushMode(mode ModeID) {
- l.modeStack = append(l.modeStack, mode)
-}
-
-// PopMode removes a lex mode from the top of the mode stack.
-func (l *Lexer) PopMode() error {
- sLen := len(l.modeStack)
- if sLen == 0 {
- return fmt.Errorf("cannot pop a lex mode from a lex mode stack any more")
- }
- l.modeStack = l.modeStack[:sLen-1]
- return nil
-}
-
-func (l *Lexer) read() (byte, bool) {
- if l.state.srcPtr >= len(l.src) {
- return 0, true
- }
-
- b := l.src[l.state.srcPtr]
- l.state.srcPtr++
-
- // Count the token positions.
- // The driver treats LF as the end of lines and counts columns in code points, not bytes.
- // To count in code points, we refer to the First Byte column in the Table 3-6.
- //
- // Reference:
- // - [Table 3-6] https://www.unicode.org/versions/Unicode13.0.0/ch03.pdf > Table 3-6. UTF-8 Bit Distribution
- if b < 128 {
- // 0x0A is LF.
- if b == 0x0A {
- l.state.row++
- l.state.col = 0
- } else {
- l.state.col++
- }
- } else if b>>5 == 6 || b>>4 == 14 || b>>3 == 30 {
- l.state.col++
- }
-
- return b, false
-}
-
-// accept saves the current state.
-func (l *Lexer) accept() {
- l.lastAcceptedState = l.state
-}
-
-// revert reverts the lexer state to the last accepted state.
-//
-// We must not call this function consecutively.
-func (l *Lexer) revert() {
- l.state = l.lastAcceptedState
-}
diff --git a/driver/lexer/lexer_test.go b/driver/lexer/lexer_test.go
deleted file mode 100644
index d32b087..0000000
--- a/driver/lexer/lexer_test.go
+++ /dev/null
@@ -1,932 +0,0 @@
-package lexer
-
-import (
- "bytes"
- "fmt"
- "strings"
- "testing"
-
- "grammar/lexical"
- spec "spec/grammar"
-)
-
-func newLexEntry(modes []string, kind string, pattern string, push string, pop bool) *lexical.LexEntry {
- ms := []spec.LexModeName{}
- for _, m := range modes {
- ms = append(ms, spec.LexModeName(m))
- }
- return &lexical.LexEntry{
- Kind: spec.LexKindName(kind),
- Pattern: pattern,
- Modes: ms,
- Push: spec.LexModeName(push),
- Pop: pop,
- }
-}
-
-func newLexEntryDefaultNOP(kind string, pattern string) *lexical.LexEntry {
- return &lexical.LexEntry{
- Kind: spec.LexKindName(kind),
- Pattern: pattern,
- Modes: []spec.LexModeName{
- spec.LexModeNameDefault,
- },
- }
-}
-
-func newLexEntryFragment(kind string, pattern string) *lexical.LexEntry {
- return &lexical.LexEntry{
- Kind: spec.LexKindName(kind),
- Pattern: pattern,
- Fragment: true,
- }
-}
-
-func newToken(modeID ModeID, kindID KindID, modeKindID ModeKindID, lexeme []byte) *Token {
- return &Token{
- ModeID: modeID,
- KindID: kindID,
- ModeKindID: modeKindID,
- Lexeme: lexeme,
- }
-}
-
-func newTokenDefault(kindID int, modeKindID int, lexeme []byte) *Token {
- return newToken(
- ModeID(spec.LexModeIDDefault.Int()),
- KindID(spec.LexKindID(kindID).Int()),
- ModeKindID(spec.LexModeKindID(modeKindID).Int()),
- lexeme,
- )
-}
-
-func newEOFToken(modeID ModeID, modeName string) *Token {
- return &Token{
- ModeID: modeID,
- ModeKindID: 0,
- EOF: true,
- }
-}
-
-func newEOFTokenDefault() *Token {
- return newEOFToken(ModeID(spec.LexModeIDDefault.Int()), spec.LexModeNameDefault.String())
-}
-
-func newInvalidTokenDefault(lexeme []byte) *Token {
- return &Token{
- ModeID: ModeID(spec.LexModeIDDefault.Int()),
- ModeKindID: 0,
- Lexeme: lexeme,
- Invalid: true,
- }
-}
-
-func withPos(tok *Token, bytePos int, byteLen int, row int, col int) *Token {
- tok.BytePos = bytePos
- tok.ByteLen = byteLen
- tok.Row = row
- tok.Col = col
- return tok
-}
-
-func TestLexer_Next(t *testing.T) {
- test := []struct {
- lspec *lexical.LexSpec
- src string
- tokens []*Token
- passiveModeTran bool
- tran func(l *Lexer, tok *Token) error
- }{
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("t1", "(a|b)*abb"),
- newLexEntryDefaultNOP("t2", " +"),
- },
- },
- src: "abb aabb aaabb babb bbabb abbbabb",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte("abb")), 0, 3, 0, 0),
- withPos(newTokenDefault(2, 2, []byte(" ")), 3, 1, 0, 3),
- withPos(newTokenDefault(1, 1, []byte("aabb")), 4, 4, 0, 4),
- withPos(newTokenDefault(2, 2, []byte(" ")), 8, 1, 0, 8),
- withPos(newTokenDefault(1, 1, []byte("aaabb")), 9, 5, 0, 9),
- withPos(newTokenDefault(2, 2, []byte(" ")), 14, 1, 0, 14),
- withPos(newTokenDefault(1, 1, []byte("babb")), 15, 4, 0, 15),
- withPos(newTokenDefault(2, 2, []byte(" ")), 19, 1, 0, 19),
- withPos(newTokenDefault(1, 1, []byte("bbabb")), 20, 5, 0, 20),
- withPos(newTokenDefault(2, 2, []byte(" ")), 25, 1, 0, 25),
- withPos(newTokenDefault(1, 1, []byte("abbbabb")), 26, 7, 0, 26),
- withPos(newEOFTokenDefault(), 33, 0, 0, 33),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("t1", "b?a+"),
- newLexEntryDefaultNOP("t2", "(ab)?(cd)+"),
- newLexEntryDefaultNOP("t3", " +"),
- },
- },
- src: "ba baaa a aaa abcd abcdcdcd cd cdcdcd",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte("ba")), 0, 2, 0, 0),
- withPos(newTokenDefault(3, 3, []byte(" ")), 2, 1, 0, 2),
- withPos(newTokenDefault(1, 1, []byte("baaa")), 3, 4, 0, 3),
- withPos(newTokenDefault(3, 3, []byte(" ")), 7, 1, 0, 7),
- withPos(newTokenDefault(1, 1, []byte("a")), 8, 1, 0, 8),
- withPos(newTokenDefault(3, 3, []byte(" ")), 9, 1, 0, 9),
- withPos(newTokenDefault(1, 1, []byte("aaa")), 10, 3, 0, 10),
- withPos(newTokenDefault(3, 3, []byte(" ")), 13, 1, 0, 13),
- withPos(newTokenDefault(2, 2, []byte("abcd")), 14, 4, 0, 14),
- withPos(newTokenDefault(3, 3, []byte(" ")), 18, 1, 0, 18),
- withPos(newTokenDefault(2, 2, []byte("abcdcdcd")), 19, 8, 0, 19),
- withPos(newTokenDefault(3, 3, []byte(" ")), 27, 1, 0, 27),
- withPos(newTokenDefault(2, 2, []byte("cd")), 28, 2, 0, 28),
- withPos(newTokenDefault(3, 3, []byte(" ")), 30, 1, 0, 30),
- withPos(newTokenDefault(2, 2, []byte("cdcdcd")), 31, 6, 0, 31),
- withPos(newEOFTokenDefault(), 37, 0, 0, 37),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("t1", "."),
- },
- },
- src: string([]byte{
- 0x00,
- 0x7f,
- 0xc2, 0x80,
- 0xdf, 0xbf,
- 0xe1, 0x80, 0x80,
- 0xec, 0xbf, 0xbf,
- 0xed, 0x80, 0x80,
- 0xed, 0x9f, 0xbf,
- 0xee, 0x80, 0x80,
- 0xef, 0xbf, 0xbf,
- 0xf0, 0x90, 0x80, 0x80,
- 0xf0, 0xbf, 0xbf, 0xbf,
- 0xf1, 0x80, 0x80, 0x80,
- 0xf3, 0xbf, 0xbf, 0xbf,
- 0xf4, 0x80, 0x80, 0x80,
- 0xf4, 0x8f, 0xbf, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0x00}), 0, 1, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0x7f}), 1, 1, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xc2, 0x80}), 2, 2, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xdf, 0xbf}), 4, 2, 0, 3),
- withPos(newTokenDefault(1, 1, []byte{0xe1, 0x80, 0x80}), 6, 3, 0, 4),
- withPos(newTokenDefault(1, 1, []byte{0xec, 0xbf, 0xbf}), 9, 3, 0, 5),
- withPos(newTokenDefault(1, 1, []byte{0xed, 0x80, 0x80}), 12, 3, 0, 6),
- withPos(newTokenDefault(1, 1, []byte{0xed, 0x9f, 0xbf}), 15, 3, 0, 7),
- withPos(newTokenDefault(1, 1, []byte{0xee, 0x80, 0x80}), 18, 3, 0, 8),
- withPos(newTokenDefault(1, 1, []byte{0xef, 0xbf, 0xbf}), 21, 3, 0, 9),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), 24, 4, 0, 10),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbf}), 28, 4, 0, 11),
- withPos(newTokenDefault(1, 1, []byte{0xf1, 0x80, 0x80, 0x80}), 32, 4, 0, 12),
- withPos(newTokenDefault(1, 1, []byte{0xf3, 0xbf, 0xbf, 0xbf}), 36, 4, 0, 13),
- withPos(newTokenDefault(1, 1, []byte{0xf4, 0x80, 0x80, 0x80}), 40, 4, 0, 14),
- withPos(newTokenDefault(1, 1, []byte{0xf4, 0x8f, 0xbf, 0xbf}), 44, 4, 0, 15),
- withPos(newEOFTokenDefault(), 48, 0, 0, 16),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("t1", "[ab.*+?|()[\\]]"),
- },
- },
- src: "ab.*+?|()[]",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte("a")), 0, 1, 0, 0),
- withPos(newTokenDefault(1, 1, []byte("b")), 1, 1, 0, 1),
- withPos(newTokenDefault(1, 1, []byte(".")), 2, 1, 0, 2),
- withPos(newTokenDefault(1, 1, []byte("*")), 3, 1, 0, 3),
- withPos(newTokenDefault(1, 1, []byte("+")), 4, 1, 0, 4),
- withPos(newTokenDefault(1, 1, []byte("?")), 5, 1, 0, 5),
- withPos(newTokenDefault(1, 1, []byte("|")), 6, 1, 0, 6),
- withPos(newTokenDefault(1, 1, []byte("(")), 7, 1, 0, 7),
- withPos(newTokenDefault(1, 1, []byte(")")), 8, 1, 0, 8),
- withPos(newTokenDefault(1, 1, []byte("[")), 9, 1, 0, 9),
- withPos(newTokenDefault(1, 1, []byte("]")), 10, 1, 0, 10),
- withPos(newEOFTokenDefault(), 11, 0, 0, 11),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // all 1 byte characters except null character (U+0000)
- //
- // NOTE:
- // vartan cannot handle the null character in patterns because lexical.lexer,
- // specifically read() and restore(), recognizes the null characters as that a symbol doesn't exist.
- // If a pattern needs a null character, use code point expression \u{0000}.
- newLexEntryDefaultNOP("char_1_byte", "[\x01-\x7f]"),
- },
- },
- src: string([]byte{
- 0x01,
- 0x02,
- 0x7e,
- 0x7f,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0x01}), 0, 1, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0x02}), 1, 1, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0x7e}), 2, 1, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0x7f}), 3, 1, 0, 3),
- withPos(newEOFTokenDefault(), 4, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // all 2 byte characters
- newLexEntryDefaultNOP("char_2_byte", "[\xc2\x80-\xdf\xbf]"),
- },
- },
- src: string([]byte{
- 0xc2, 0x80,
- 0xc2, 0x81,
- 0xdf, 0xbe,
- 0xdf, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xc2, 0x80}), 0, 2, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xc2, 0x81}), 2, 2, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xdf, 0xbe}), 4, 2, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xdf, 0xbf}), 6, 2, 0, 3),
- withPos(newEOFTokenDefault(), 8, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // All bytes are the same.
- newLexEntryDefaultNOP("char_3_byte", "[\xe0\xa0\x80-\xe0\xa0\x80]"),
- },
- },
- src: string([]byte{
- 0xe0, 0xa0, 0x80,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}), 0, 3, 0, 0),
- withPos(newEOFTokenDefault(), 3, 0, 0, 1),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // The first two bytes are the same.
- newLexEntryDefaultNOP("char_3_byte", "[\xe0\xa0\x80-\xe0\xa0\xbf]"),
- },
- },
- src: string([]byte{
- 0xe0, 0xa0, 0x80,
- 0xe0, 0xa0, 0x81,
- 0xe0, 0xa0, 0xbe,
- 0xe0, 0xa0, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}), 0, 3, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x81}), 3, 3, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0xbe}), 6, 3, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0xbf}), 9, 3, 0, 3),
- withPos(newEOFTokenDefault(), 12, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // The first byte are the same.
- newLexEntryDefaultNOP("char_3_byte", "[\xe0\xa0\x80-\xe0\xbf\xbf]"),
- },
- },
- src: string([]byte{
- 0xe0, 0xa0, 0x80,
- 0xe0, 0xa0, 0x81,
- 0xe0, 0xbf, 0xbe,
- 0xe0, 0xbf, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}), 0, 3, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x81}), 3, 3, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbe}), 6, 3, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbf}), 9, 3, 0, 3),
- withPos(newEOFTokenDefault(), 12, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // all 3 byte characters
- newLexEntryDefaultNOP("char_3_byte", "[\xe0\xa0\x80-\xef\xbf\xbf]"),
- },
- },
- src: string([]byte{
- 0xe0, 0xa0, 0x80,
- 0xe0, 0xa0, 0x81,
- 0xe0, 0xbf, 0xbe,
- 0xe0, 0xbf, 0xbf,
- 0xe1, 0x80, 0x80,
- 0xe1, 0x80, 0x81,
- 0xec, 0xbf, 0xbe,
- 0xec, 0xbf, 0xbf,
- 0xed, 0x80, 0x80,
- 0xed, 0x80, 0x81,
- 0xed, 0x9f, 0xbe,
- 0xed, 0x9f, 0xbf,
- 0xee, 0x80, 0x80,
- 0xee, 0x80, 0x81,
- 0xef, 0xbf, 0xbe,
- 0xef, 0xbf, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}), 0, 3, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x81}), 3, 3, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbe}), 6, 3, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbf}), 9, 3, 0, 3),
- withPos(newTokenDefault(1, 1, []byte{0xe1, 0x80, 0x80}), 12, 3, 0, 4),
- withPos(newTokenDefault(1, 1, []byte{0xe1, 0x80, 0x81}), 15, 3, 0, 5),
- withPos(newTokenDefault(1, 1, []byte{0xec, 0xbf, 0xbe}), 18, 3, 0, 6),
- withPos(newTokenDefault(1, 1, []byte{0xec, 0xbf, 0xbf}), 21, 3, 0, 7),
- withPos(newTokenDefault(1, 1, []byte{0xed, 0x80, 0x80}), 24, 3, 0, 8),
- withPos(newTokenDefault(1, 1, []byte{0xed, 0x80, 0x81}), 27, 3, 0, 9),
- withPos(newTokenDefault(1, 1, []byte{0xed, 0x9f, 0xbe}), 30, 3, 0, 10),
- withPos(newTokenDefault(1, 1, []byte{0xed, 0x9f, 0xbf}), 33, 3, 0, 11),
- withPos(newTokenDefault(1, 1, []byte{0xee, 0x80, 0x80}), 36, 3, 0, 12),
- withPos(newTokenDefault(1, 1, []byte{0xee, 0x80, 0x81}), 39, 3, 0, 13),
- withPos(newTokenDefault(1, 1, []byte{0xef, 0xbf, 0xbe}), 42, 3, 0, 14),
- withPos(newTokenDefault(1, 1, []byte{0xef, 0xbf, 0xbf}), 45, 3, 0, 15),
- withPos(newEOFTokenDefault(), 48, 0, 0, 16),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // All bytes are the same.
- newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf0\x90\x80\x80]"),
- },
- },
- src: string([]byte{
- 0xf0, 0x90, 0x80, 0x80,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), 0, 4, 0, 0),
- withPos(newEOFTokenDefault(), 4, 0, 0, 1),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // The first 3 bytes are the same.
- newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf0\x90\x80\xbf]"),
- },
- },
- src: string([]byte{
- 0xf0, 0x90, 0x80, 0x80,
- 0xf0, 0x90, 0x80, 0x81,
- 0xf0, 0x90, 0x80, 0xbe,
- 0xf0, 0x90, 0x80, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), 0, 4, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}), 4, 4, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0xbe}), 8, 4, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0xbf}), 12, 4, 0, 3),
- withPos(newEOFTokenDefault(), 16, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // The first 2 bytes are the same.
- newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf0\x90\xbf\xbf]"),
- },
- },
- src: string([]byte{
- 0xf0, 0x90, 0x80, 0x80,
- 0xf0, 0x90, 0x80, 0x81,
- 0xf0, 0x90, 0xbf, 0xbe,
- 0xf0, 0x90, 0xbf, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), 0, 4, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}), 4, 4, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0xbf, 0xbe}), 8, 4, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0xbf, 0xbf}), 12, 4, 0, 3),
- withPos(newEOFTokenDefault(), 16, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // The first byte are the same.
- newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf0\xbf\xbf\xbf]"),
- },
- },
- src: string([]byte{
- 0xf0, 0x90, 0x80, 0x80,
- 0xf0, 0x90, 0x80, 0x81,
- 0xf0, 0xbf, 0xbf, 0xbe,
- 0xf0, 0xbf, 0xbf, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), 0, 4, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}), 4, 4, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbe}), 8, 4, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbf}), 12, 4, 0, 3),
- withPos(newEOFTokenDefault(), 16, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // all 4 byte characters
- newLexEntryDefaultNOP("char_4_byte", "[\xf0\x90\x80\x80-\xf4\x8f\xbf\xbf]"),
- },
- },
- src: string([]byte{
- 0xf0, 0x90, 0x80, 0x80,
- 0xf0, 0x90, 0x80, 0x81,
- 0xf0, 0xbf, 0xbf, 0xbe,
- 0xf0, 0xbf, 0xbf, 0xbf,
- 0xf1, 0x80, 0x80, 0x80,
- 0xf1, 0x80, 0x80, 0x81,
- 0xf3, 0xbf, 0xbf, 0xbe,
- 0xf3, 0xbf, 0xbf, 0xbf,
- 0xf4, 0x80, 0x80, 0x80,
- 0xf4, 0x80, 0x80, 0x81,
- 0xf4, 0x8f, 0xbf, 0xbe,
- 0xf4, 0x8f, 0xbf, 0xbf,
- }),
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), 0, 4, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}), 4, 4, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbe}), 8, 4, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbf}), 12, 4, 0, 3),
- withPos(newTokenDefault(1, 1, []byte{0xf1, 0x80, 0x80, 0x80}), 16, 4, 0, 4),
- withPos(newTokenDefault(1, 1, []byte{0xf1, 0x80, 0x80, 0x81}), 20, 4, 0, 5),
- withPos(newTokenDefault(1, 1, []byte{0xf3, 0xbf, 0xbf, 0xbe}), 24, 4, 0, 6),
- withPos(newTokenDefault(1, 1, []byte{0xf3, 0xbf, 0xbf, 0xbf}), 28, 4, 0, 7),
- withPos(newTokenDefault(1, 1, []byte{0xf4, 0x80, 0x80, 0x80}), 32, 4, 0, 8),
- withPos(newTokenDefault(1, 1, []byte{0xf4, 0x80, 0x80, 0x81}), 36, 4, 0, 9),
- withPos(newTokenDefault(1, 1, []byte{0xf4, 0x8f, 0xbf, 0xbe}), 40, 4, 0, 10),
- withPos(newTokenDefault(1, 1, []byte{0xf4, 0x8f, 0xbf, 0xbf}), 44, 4, 0, 11),
- withPos(newEOFTokenDefault(), 48, 0, 0, 12),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("non_number", "[^0-9]+[0-9]"),
- },
- },
- src: "foo9",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte("foo9")), 0, 4, 0, 0),
- withPos(newEOFTokenDefault(), 4, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("char_1_byte", "\\u{006E}"),
- newLexEntryDefaultNOP("char_2_byte", "\\u{03BD}"),
- newLexEntryDefaultNOP("char_3_byte", "\\u{306B}"),
- newLexEntryDefaultNOP("char_4_byte", "\\u{01F638}"),
- },
- },
- src: "nνに😸",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0x6E}), 0, 1, 0, 0),
- withPos(newTokenDefault(2, 2, []byte{0xCE, 0xBD}), 1, 2, 0, 1),
- withPos(newTokenDefault(3, 3, []byte{0xE3, 0x81, 0xAB}), 3, 3, 0, 2),
- withPos(newTokenDefault(4, 4, []byte{0xF0, 0x9F, 0x98, 0xB8}), 6, 4, 0, 3),
- withPos(newEOFTokenDefault(), 10, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("code_points_alt", "[\\u{006E}\\u{03BD}\\u{306B}\\u{01F638}]"),
- },
- },
- src: "nνに😸",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte{0x6E}), 0, 1, 0, 0),
- withPos(newTokenDefault(1, 1, []byte{0xCE, 0xBD}), 1, 2, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0xE3, 0x81, 0xAB}), 3, 3, 0, 2),
- withPos(newTokenDefault(1, 1, []byte{0xF0, 0x9F, 0x98, 0xB8}), 6, 4, 0, 3),
- withPos(newEOFTokenDefault(), 10, 0, 0, 4),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("t1", "\\f{a2c}\\f{d2f}+"),
- newLexEntryFragment("a2c", "abc"),
- newLexEntryFragment("d2f", "def"),
- },
- },
- src: "abcdefdefabcdef",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte("abcdefdef")), 0, 9, 0, 0),
- withPos(newTokenDefault(1, 1, []byte("abcdef")), 9, 6, 0, 9),
- withPos(newEOFTokenDefault(), 15, 0, 0, 15),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("t1", "(\\f{a2c}|\\f{d2f})+"),
- newLexEntryFragment("a2c", "abc"),
- newLexEntryFragment("d2f", "def"),
- },
- },
- src: "abcdefdefabc",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte("abcdefdefabc")), 0, 12, 0, 0),
- withPos(newEOFTokenDefault(), 12, 0, 0, 12),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("t1", "\\f{a2c_or_d2f}+"),
- newLexEntryFragment("a2c_or_d2f", "\\f{a2c}|\\f{d2f}"),
- newLexEntryFragment("a2c", "abc"),
- newLexEntryFragment("d2f", "def"),
- },
- },
- src: "abcdefdefabc",
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte("abcdefdefabc")), 0, 12, 0, 0),
- withPos(newEOFTokenDefault(), 12, 0, 0, 12),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("white_space", ` *`),
- newLexEntry([]string{"default"}, "string_open", `"`, "string", false),
- newLexEntry([]string{"string"}, "escape_sequence", `\\[n"\\]`, "", false),
- newLexEntry([]string{"string"}, "char_sequence", `[^"\\]*`, "", false),
- newLexEntry([]string{"string"}, "string_close", `"`, "", true),
- },
- },
- src: `"" "Hello world.\n\"Hello world.\""`,
- tokens: []*Token{
- withPos(newToken(1, 2, 2, []byte(`"`)), 0, 1, 0, 0),
- withPos(newToken(2, 5, 3, []byte(`"`)), 1, 1, 0, 1),
- withPos(newToken(1, 1, 1, []byte(` `)), 2, 1, 0, 2),
- withPos(newToken(1, 2, 2, []byte(`"`)), 3, 1, 0, 3),
- withPos(newToken(2, 4, 2, []byte(`Hello world.`)), 4, 12, 0, 4),
- withPos(newToken(2, 3, 1, []byte(`\n`)), 16, 2, 0, 16),
- withPos(newToken(2, 3, 1, []byte(`\"`)), 18, 2, 0, 18),
- withPos(newToken(2, 4, 2, []byte(`Hello world.`)), 20, 12, 0, 20),
- withPos(newToken(2, 3, 1, []byte(`\"`)), 32, 2, 0, 32),
- withPos(newToken(2, 5, 3, []byte(`"`)), 34, 1, 0, 34),
- withPos(newEOFTokenDefault(), 35, 0, 0, 35),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- // `white_space` is enabled in multiple modes.
- newLexEntry([]string{"default", "state_a", "state_b"}, "white_space", ` *`, "", false),
- newLexEntry([]string{"default"}, "char_a", `a`, "state_a", false),
- newLexEntry([]string{"state_a"}, "char_b", `b`, "state_b", false),
- newLexEntry([]string{"state_a"}, "back_from_a", `<`, "", true),
- newLexEntry([]string{"state_b"}, "back_from_b", `<`, "", true),
- },
- },
- src: ` a b < < `,
- tokens: []*Token{
- withPos(newToken(1, 1, 1, []byte(` `)), 0, 1, 0, 0),
- withPos(newToken(1, 2, 2, []byte(`a`)), 1, 1, 0, 1),
- withPos(newToken(2, 1, 1, []byte(` `)), 2, 1, 0, 2),
- withPos(newToken(2, 3, 2, []byte(`b`)), 3, 1, 0, 3),
- withPos(newToken(3, 1, 1, []byte(` `)), 4, 1, 0, 4),
- withPos(newToken(3, 5, 2, []byte(`<`)), 5, 1, 0, 5),
- withPos(newToken(2, 1, 1, []byte(` `)), 6, 1, 0, 6),
- withPos(newToken(2, 4, 3, []byte(`<`)), 7, 1, 0, 7),
- withPos(newToken(1, 1, 1, []byte(` `)), 8, 1, 0, 8),
- withPos(newEOFTokenDefault(), 9, 0, 0, 9),
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntry([]string{"default", "mode_1", "mode_2"}, "white_space", ` *`, "", false),
- newLexEntry([]string{"default"}, "char", `.`, "", false),
- newLexEntry([]string{"default"}, "push_1", `-> 1`, "", false),
- newLexEntry([]string{"mode_1"}, "push_2", `-> 2`, "", false),
- newLexEntry([]string{"mode_1"}, "pop_1", `<-`, "", false),
- newLexEntry([]string{"mode_2"}, "pop_2", `<-`, "", false),
- },
- },
- src: `-> 1 -> 2 <- <- a`,
- tokens: []*Token{
- withPos(newToken(1, 3, 3, []byte(`-> 1`)), 0, 4, 0, 0),
- withPos(newToken(2, 1, 1, []byte(` `)), 4, 1, 0, 4),
- withPos(newToken(2, 4, 2, []byte(`-> 2`)), 5, 4, 0, 5),
- withPos(newToken(3, 1, 1, []byte(` `)), 9, 1, 0, 9),
- withPos(newToken(3, 6, 2, []byte(`<-`)), 10, 2, 0, 10),
- withPos(newToken(2, 1, 1, []byte(` `)), 12, 1, 0, 12),
- withPos(newToken(2, 5, 3, []byte(`<-`)), 13, 2, 0, 13),
- withPos(newToken(1, 1, 1, []byte(` `)), 15, 1, 0, 15),
- withPos(newToken(1, 2, 2, []byte(`a`)), 16, 1, 0, 16),
- withPos(newEOFTokenDefault(), 17, 0, 0, 17),
- },
- passiveModeTran: true,
- tran: func(l *Lexer, tok *Token) error {
- switch l.spec.ModeName(l.Mode()) {
- case "default":
- switch tok.KindID {
- case 3: // push_1
- l.PushMode(2)
- }
- case "mode_1":
- switch tok.KindID {
- case 4: // push_2
- l.PushMode(3)
- case 5: // pop_1
- return l.PopMode()
- }
- case "mode_2":
- switch tok.KindID {
- case 6: // pop_2
- return l.PopMode()
- }
- }
- return nil
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntry([]string{"default", "mode_1", "mode_2"}, "white_space", ` *`, "", false),
- newLexEntry([]string{"default"}, "char", `.`, "", false),
- newLexEntry([]string{"default"}, "push_1", `-> 1`, "mode_1", false),
- newLexEntry([]string{"mode_1"}, "push_2", `-> 2`, "", false),
- newLexEntry([]string{"mode_1"}, "pop_1", `<-`, "", false),
- newLexEntry([]string{"mode_2"}, "pop_2", `<-`, "", true),
- },
- },
- src: `-> 1 -> 2 <- <- a`,
- tokens: []*Token{
- withPos(newToken(1, 3, 3, []byte(`-> 1`)), 0, 4, 0, 0),
- withPos(newToken(2, 1, 1, []byte(` `)), 4, 1, 0, 4),
- withPos(newToken(2, 4, 2, []byte(`-> 2`)), 5, 4, 0, 5),
- withPos(newToken(3, 1, 1, []byte(` `)), 9, 1, 0, 9),
- withPos(newToken(3, 6, 2, []byte(`<-`)), 10, 2, 0, 10),
- withPos(newToken(2, 1, 1, []byte(` `)), 12, 1, 0, 12),
- withPos(newToken(2, 5, 3, []byte(`<-`)), 13, 2, 0, 13),
- withPos(newToken(1, 1, 1, []byte(` `)), 15, 1, 0, 15),
- withPos(newToken(1, 2, 2, []byte(`a`)), 16, 1, 0, 16),
- withPos(newEOFTokenDefault(), 17, 0, 0, 17),
- },
- // Active mode transition and an external transition function can be used together.
- passiveModeTran: false,
- tran: func(l *Lexer, tok *Token) error {
- switch l.spec.ModeName(l.Mode()) {
- case "mode_1":
- switch tok.KindID {
- case 4: // push_2
- l.PushMode(3)
- case 5: // pop_1
- return l.PopMode()
- }
- }
- return nil
- },
- },
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("dot", spec.EscapePattern(`.`)),
- newLexEntryDefaultNOP("star", spec.EscapePattern(`*`)),
- newLexEntryDefaultNOP("plus", spec.EscapePattern(`+`)),
- newLexEntryDefaultNOP("question", spec.EscapePattern(`?`)),
- newLexEntryDefaultNOP("vbar", spec.EscapePattern(`|`)),
- newLexEntryDefaultNOP("lparen", spec.EscapePattern(`(`)),
- newLexEntryDefaultNOP("rparen", spec.EscapePattern(`)`)),
- newLexEntryDefaultNOP("lbrace", spec.EscapePattern(`[`)),
- newLexEntryDefaultNOP("backslash", spec.EscapePattern(`\`)),
- },
- },
- src: `.*+?|()[\`,
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte(`.`)), 0, 1, 0, 0),
- withPos(newTokenDefault(2, 2, []byte(`*`)), 1, 1, 0, 1),
- withPos(newTokenDefault(3, 3, []byte(`+`)), 2, 1, 0, 2),
- withPos(newTokenDefault(4, 4, []byte(`?`)), 3, 1, 0, 3),
- withPos(newTokenDefault(5, 5, []byte(`|`)), 4, 1, 0, 4),
- withPos(newTokenDefault(6, 6, []byte(`(`)), 5, 1, 0, 5),
- withPos(newTokenDefault(7, 7, []byte(`)`)), 6, 1, 0, 6),
- withPos(newTokenDefault(8, 8, []byte(`[`)), 7, 1, 0, 7),
- withPos(newTokenDefault(9, 9, []byte(`\`)), 8, 1, 0, 8),
- withPos(newEOFTokenDefault(), 9, 0, 0, 9),
- },
- },
- // Character properties are available in a bracket expression.
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("letter", `[\p{Letter}]+`),
- newLexEntryDefaultNOP("non_letter", `[^\p{Letter}]+`),
- },
- },
- src: `foo123`,
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte(`foo`)), 0, 3, 0, 0),
- withPos(newTokenDefault(2, 2, []byte(`123`)), 3, 3, 0, 3),
- withPos(newEOFTokenDefault(), 6, 0, 0, 6),
- },
- },
- // The driver can continue lexical analysis even after it detects an invalid token.
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("lower", `[a-z]+`),
- },
- },
- src: `foo123bar`,
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte(`foo`)), 0, 3, 0, 0),
- withPos(newInvalidTokenDefault([]byte(`123`)), 3, 3, 0, 3),
- withPos(newTokenDefault(1, 1, []byte(`bar`)), 6, 3, 0, 6),
- withPos(newEOFTokenDefault(), 9, 0, 0, 9),
- },
- },
- // The driver can detect an invalid token immediately preceding an EOF.
- {
- lspec: &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("lower", `[a-z]+`),
- },
- },
- src: `foo123`,
- tokens: []*Token{
- withPos(newTokenDefault(1, 1, []byte(`foo`)), 0, 3, 0, 0),
- withPos(newInvalidTokenDefault([]byte(`123`)), 3, 3, 0, 3),
- withPos(newEOFTokenDefault(), 6, 0, 0, 6),
- },
- },
- }
- for i, tt := range test {
- for compLv := lexical.CompressionLevelMin; compLv <= lexical.CompressionLevelMax; compLv++ {
- t.Run(fmt.Sprintf("#%v-%v", i, compLv), func(t *testing.T) {
- clspec, err, cerrs := lexical.Compile(tt.lspec, compLv)
- if err != nil {
- for _, cerr := range cerrs {
- t.Logf("%#v", cerr)
- }
- t.Fatalf("unexpected error: %v", err)
- }
- opts := []LexerOption{}
- if tt.passiveModeTran {
- opts = append(opts, DisableModeTransition())
- }
- lexer, err := NewLexer(NewLexSpec(clspec), strings.NewReader(tt.src), opts...)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- for _, eTok := range tt.tokens {
- tok, err := lexer.Next()
- if err != nil {
- t.Log(err)
- break
- }
- testToken(t, eTok, tok)
-
- if tok.EOF {
- break
- }
-
- if tt.tran != nil {
- err := tt.tran(lexer, tok)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- }
- }
- })
- }
- }
-}
-
-func TestLexer_Next_WithPosition(t *testing.T) {
- lspec := &lexical.LexSpec{
- Entries: []*lexical.LexEntry{
- newLexEntryDefaultNOP("newline", `\u{000A}+`),
- newLexEntryDefaultNOP("any", `.`),
- },
- }
-
- clspec, err, _ := lexical.Compile(lspec, lexical.CompressionLevelMax)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
- src := string([]byte{
- 0x00,
- 0x7F,
- 0x0A,
-
- 0xC2, 0x80,
- 0xDF, 0xBF,
- 0x0A,
-
- 0xE0, 0xA0, 0x80,
- 0xE0, 0xBF, 0xBF,
- 0xE1, 0x80, 0x80,
- 0xEC, 0xBF, 0xBF,
- 0xED, 0x80, 0x80,
- 0xED, 0x9F, 0xBF,
- 0xEE, 0x80, 0x80,
- 0xEF, 0xBF, 0xBF,
- 0x0A,
-
- 0xF0, 0x90, 0x80, 0x80,
- 0xF0, 0xBF, 0xBF, 0xBF,
- 0xF1, 0x80, 0x80, 0x80,
- 0xF3, 0xBF, 0xBF, 0xBF,
- 0xF4, 0x80, 0x80, 0x80,
- 0xF4, 0x8F, 0xBF, 0xBF,
- 0x0A,
- 0x0A,
- 0x0A,
- })
-
- expected := []*Token{
- withPos(newTokenDefault(2, 2, []byte{0x00}), 0, 1, 0, 0),
- withPos(newTokenDefault(2, 2, []byte{0x7F}), 1, 1, 0, 1),
- withPos(newTokenDefault(1, 1, []byte{0x0A}), 2, 1, 0, 2),
-
- withPos(newTokenDefault(2, 2, []byte{0xC2, 0x80}), 3, 2, 1, 0),
- withPos(newTokenDefault(2, 2, []byte{0xDF, 0xBF}), 5, 2, 1, 1),
- withPos(newTokenDefault(1, 1, []byte{0x0A}), 7, 1, 1, 2),
-
- withPos(newTokenDefault(2, 2, []byte{0xE0, 0xA0, 0x80}), 8, 3, 2, 0),
- withPos(newTokenDefault(2, 2, []byte{0xE0, 0xBF, 0xBF}), 11, 3, 2, 1),
- withPos(newTokenDefault(2, 2, []byte{0xE1, 0x80, 0x80}), 14, 3, 2, 2),
- withPos(newTokenDefault(2, 2, []byte{0xEC, 0xBF, 0xBF}), 17, 3, 2, 3),
- withPos(newTokenDefault(2, 2, []byte{0xED, 0x80, 0x80}), 20, 3, 2, 4),
- withPos(newTokenDefault(2, 2, []byte{0xED, 0x9F, 0xBF}), 23, 3, 2, 5),
- withPos(newTokenDefault(2, 2, []byte{0xEE, 0x80, 0x80}), 26, 3, 2, 6),
- withPos(newTokenDefault(2, 2, []byte{0xEF, 0xBF, 0xBF}), 29, 3, 2, 7),
- withPos(newTokenDefault(1, 1, []byte{0x0A}), 32, 1, 2, 8),
-
- withPos(newTokenDefault(2, 2, []byte{0xF0, 0x90, 0x80, 0x80}), 33, 4, 3, 0),
- withPos(newTokenDefault(2, 2, []byte{0xF0, 0xBF, 0xBF, 0xBF}), 37, 4, 3, 1),
- withPos(newTokenDefault(2, 2, []byte{0xF1, 0x80, 0x80, 0x80}), 41, 4, 3, 2),
- withPos(newTokenDefault(2, 2, []byte{0xF3, 0xBF, 0xBF, 0xBF}), 45, 4, 3, 3),
- withPos(newTokenDefault(2, 2, []byte{0xF4, 0x80, 0x80, 0x80}), 49, 4, 3, 4),
- withPos(newTokenDefault(2, 2, []byte{0xF4, 0x8F, 0xBF, 0xBF}), 53, 4, 3, 5),
- // When a token contains multiple line breaks, the driver sets the token position to
- // the line number where a lexeme first appears.
- withPos(newTokenDefault(1, 1, []byte{0x0A, 0x0A, 0x0A}), 57, 3, 3, 6),
-
- withPos(newEOFTokenDefault(), 60, 0, 6, 0),
- }
-
- lexer, err := NewLexer(NewLexSpec(clspec), strings.NewReader(src))
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
- for _, eTok := range expected {
- tok, err := lexer.Next()
- if err != nil {
- t.Fatal(err)
- }
-
- testToken(t, eTok, tok)
-
- if tok.EOF {
- break
- }
- }
-}
-
-func testToken(t *testing.T, expected, actual *Token) {
- t.Helper()
-
- if actual.ModeID != expected.ModeID ||
- actual.KindID != expected.KindID ||
- actual.ModeKindID != expected.ModeKindID ||
- !bytes.Equal(actual.Lexeme, expected.Lexeme) ||
- actual.EOF != expected.EOF ||
- actual.Invalid != expected.Invalid {
- t.Fatalf(`unexpected token; want: %+v, got: %+v`, expected, actual)
- }
-
- if actual.BytePos != expected.BytePos || actual.ByteLen != expected.ByteLen ||
- actual.Row != expected.Row || actual.Col != expected.Col {
- t.Fatalf(`unexpected token; want: %+v, got: %+v`, expected, actual)
- }
-}
diff --git a/driver/lexer/spec.go b/driver/lexer/spec.go
deleted file mode 100644
index 4b1a218..0000000
--- a/driver/lexer/spec.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package lexer
-
-import spec "spec/grammar"
-
-type lexSpec struct {
- spec *spec.LexicalSpec
-}
-
-func NewLexSpec(spec *spec.LexicalSpec) *lexSpec {
- return &lexSpec{
- spec: spec,
- }
-}
-
-func (s *lexSpec) InitialMode() ModeID {
- return ModeID(s.spec.InitialModeID.Int())
-}
-
-func (s *lexSpec) Pop(mode ModeID, modeKind ModeKindID) bool {
- return s.spec.Specs[mode].Pop[modeKind] == 1
-}
-
-func (s *lexSpec) Push(mode ModeID, modeKind ModeKindID) (ModeID, bool) {
- modeID := s.spec.Specs[mode].Push[modeKind]
- return ModeID(modeID.Int()), !modeID.IsNil()
-}
-
-func (s *lexSpec) ModeName(mode ModeID) string {
- return s.spec.ModeNames[mode].String()
-}
-
-func (s *lexSpec) InitialState(mode ModeID) StateID {
- return StateID(s.spec.Specs[mode].DFA.InitialStateID.Int())
-}
-
-func (s *lexSpec) NextState(mode ModeID, state StateID, v int) (StateID, bool) {
- switch s.spec.CompressionLevel {
- case 2:
- tran := s.spec.Specs[mode].DFA.Transition
- rowNum := tran.RowNums[state]
- d := tran.UniqueEntries.RowDisplacement[rowNum]
- if tran.UniqueEntries.Bounds[d+v] != rowNum {
- return StateID(tran.UniqueEntries.EmptyValue.Int()), false
- }
- return StateID(tran.UniqueEntries.Entries[d+v].Int()), true
- case 1:
- tran := s.spec.Specs[mode].DFA.Transition
- next := tran.UncompressedUniqueEntries[tran.RowNums[state]*tran.OriginalColCount+v]
- if next == spec.StateIDNil {
- return StateID(spec.StateIDNil.Int()), false
- }
- return StateID(next.Int()), true
- }
-
- modeSpec := s.spec.Specs[mode]
- next := modeSpec.DFA.UncompressedTransition[state.Int()*modeSpec.DFA.ColCount+v]
- if next == spec.StateIDNil {
- return StateID(spec.StateIDNil), false
- }
- return StateID(next.Int()), true
-}
-
-func (s *lexSpec) Accept(mode ModeID, state StateID) (ModeKindID, bool) {
- modeKindID := s.spec.Specs[mode].DFA.AcceptingStates[state]
- return ModeKindID(modeKindID.Int()), modeKindID != spec.LexModeKindIDNil
-}
-
-func (s *lexSpec) KindIDAndName(mode ModeID, modeKind ModeKindID) (KindID, string) {
- kindID := s.spec.KindIDs[mode][modeKind]
- return KindID(kindID.Int()), s.spec.KindNames[kindID].String()
-}
diff --git a/driver/lexer/template.go b/driver/lexer/template.go
deleted file mode 100644
index c5d0778..0000000
--- a/driver/lexer/template.go
+++ /dev/null
@@ -1,760 +0,0 @@
-package lexer
-
-import (
- "bytes"
- _ "embed"
- "fmt"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "strings"
- "text/template"
-
- "grammar/lexical"
- spec "spec/grammar"
-)
-
-// go:embed lexer.go
-var lexerCoreSrc string
-
-func GenLexer(lexSpec *spec.LexicalSpec, pkgName string) ([]byte, error) {
- var lexerSrc string
- {
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, "lexer.go", lexerCoreSrc, parser.ParseComments)
- if err != nil {
- return nil, err
- }
-
- var b strings.Builder
- err = format.Node(&b, fset, f)
- if err != nil {
- return nil, err
- }
-
- lexerSrc = b.String()
- }
-
- var modeIDsSrc string
- {
- var b strings.Builder
- fmt.Fprintf(&b, "const (\n")
- for i, k := range lexSpec.ModeNames {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, " ModeIDNil ModeID = %v\n", i)
- continue
- }
- fmt.Fprintf(&b, " ModeID%v ModeID = %v\n", lexical.SnakeCaseToUpperCamelCase(k.String()), i)
- }
- fmt.Fprintf(&b, ")")
-
- modeIDsSrc = b.String()
- }
-
- var modeNamesSrc string
- {
- var b strings.Builder
- fmt.Fprintf(&b, "const (\n")
- for i, k := range lexSpec.ModeNames {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, " ModeNameNil = %#v\n", "")
- continue
- }
- fmt.Fprintf(&b, " ModeName%v = %#v\n", lexical.SnakeCaseToUpperCamelCase(k.String()), k)
- }
- fmt.Fprintf(&b, ")")
-
- modeNamesSrc = b.String()
- }
-
- var modeIDToNameSrc string
- {
- var b strings.Builder
- fmt.Fprintf(&b, `
-// ModeIDToName converts a mode ID to a name.
-func ModeIDToName(id ModeID) string {
- switch id {`)
- for i, k := range lexSpec.ModeNames {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, `
- case ModeIDNil:
- return ModeNameNil`)
- continue
- }
- name := lexical.SnakeCaseToUpperCamelCase(k.String())
- fmt.Fprintf(&b, `
- case ModeID%v:
- return ModeName%v`, name, name)
- }
- fmt.Fprintf(&b, `
- }
- return ""
-}
-`)
-
- modeIDToNameSrc = b.String()
- }
-
- var kindIDsSrc string
- {
- var b strings.Builder
- fmt.Fprintf(&b, "const (\n")
- for i, k := range lexSpec.KindNames {
- if i == spec.LexKindIDNil.Int() {
- fmt.Fprintf(&b, " KindIDNil KindID = %v\n", i)
- continue
- }
- fmt.Fprintf(&b, " KindID%v KindID = %v\n", lexical.SnakeCaseToUpperCamelCase(k.String()), i)
- }
- fmt.Fprintf(&b, ")")
-
- kindIDsSrc = b.String()
- }
-
- var kindNamesSrc string
- {
- var b strings.Builder
- fmt.Fprintf(&b, "const (\n")
- fmt.Fprintf(&b, " KindNameNil = %#v\n", "")
- for _, k := range lexSpec.KindNames[1:] {
- fmt.Fprintf(&b, " KindName%v = %#v\n", lexical.SnakeCaseToUpperCamelCase(k.String()), k)
- }
- fmt.Fprintf(&b, ")")
-
- kindNamesSrc = b.String()
- }
-
- var kindIDToNameSrc string
- {
- var b strings.Builder
- fmt.Fprintf(&b, `
-// KindIDToName converts a kind ID to a name.
-func KindIDToName(id KindID) string {
- switch id {`)
- for i, k := range lexSpec.KindNames {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, `
- case KindIDNil:
- return KindNameNil`)
- continue
- }
- name := lexical.SnakeCaseToUpperCamelCase(k.String())
- fmt.Fprintf(&b, `
- case KindID%v:
- return KindName%v`, name, name)
- }
- fmt.Fprintf(&b, `
- }
- return ""
-}
-`)
-
- kindIDToNameSrc = b.String()
- }
-
- var specSrc string
- {
- t, err := template.New("").Funcs(genTemplateFuncs(lexSpec)).Parse(lexSpecTemplate)
- if err != nil {
- return nil, err
- }
-
- var b strings.Builder
- err = t.Execute(&b, map[string]interface{}{
- "initialModeID": "ModeID" + lexical.SnakeCaseToUpperCamelCase(lexSpec.ModeNames[lexSpec.InitialModeID].String()),
- "modeIDNil": "ModeIDNil",
- "modeKindIDNil": spec.LexModeKindIDNil,
- "stateIDNil": spec.StateIDNil,
- "compressionLevel": lexSpec.CompressionLevel,
- })
- if err != nil {
- return nil, err
- }
-
- specSrc = b.String()
- }
-
- var src string
- {
- tmpl := `// Code generated by vartan-go. DO NOT EDIT.
-{{ .lexerSrc }}
-
-{{ .modeIDsSrc }}
-
-{{ .modeNamesSrc }}
-
-{{ .modeIDToNameSrc }}
-
-{{ .kindIDsSrc }}
-
-{{ .kindNamesSrc }}
-
-{{ .kindIDToNameSrc }}
-
-{{ .specSrc }}
-`
-
- t, err := template.New("").Parse(tmpl)
- if err != nil {
- return nil, err
- }
-
- var b strings.Builder
- err = t.Execute(&b, map[string]string{
- "lexerSrc": lexerSrc,
- "modeIDsSrc": modeIDsSrc,
- "modeNamesSrc": modeNamesSrc,
- "modeIDToNameSrc": modeIDToNameSrc,
- "kindIDsSrc": kindIDsSrc,
- "kindNamesSrc": kindNamesSrc,
- "kindIDToNameSrc": kindIDToNameSrc,
- "specSrc": specSrc,
- })
- if err != nil {
- return nil, err
- }
-
- src = b.String()
- }
-
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
- if err != nil {
- return nil, err
- }
-
- f.Name = ast.NewIdent(pkgName)
-
- var b bytes.Buffer
- err = format.Node(&b, fset, f)
- if err != nil {
- return nil, err
- }
-
- return b.Bytes(), nil
-}
-
-const lexSpecTemplate = `
-type lexSpec struct {
- pop [][]bool
- push [][]ModeID
- modeNames []string
- initialStates []StateID
- acceptances [][]ModeKindID
- kindIDs [][]KindID
- kindNames []string
- initialModeID ModeID
- modeIDNil ModeID
- modeKindIDNil ModeKindID
- stateIDNil StateID
-
- rowNums [][]int
- rowDisplacements [][]int
- bounds [][]int
- entries [][]StateID
- originalColCounts []int
-}
-
-func NewLexSpec() *lexSpec {
- return &lexSpec{
- pop: {{ genPopTable }},
- push: {{ genPushTable }},
- modeNames: {{ genModeNameTable }},
- initialStates: {{ genInitialStateTable }},
- acceptances: {{ genAcceptTable }},
- kindIDs: {{ genKindIDTable }},
- kindNames: {{ genKindNameTable }},
- initialModeID: {{ .initialModeID }},
- modeIDNil: {{ .modeIDNil }},
- modeKindIDNil: {{ .modeKindIDNil }},
- stateIDNil: {{ .stateIDNil }},
-
- rowNums: {{ genRowNums }},
- rowDisplacements: {{ genRowDisplacements }},
- bounds: {{ genBounds }},
- entries: {{ genEntries }},
- originalColCounts: {{ genOriginalColCounts }},
- }
-}
-
-func (s *lexSpec) InitialMode() ModeID {
- return s.initialModeID
-}
-
-func (s *lexSpec) Pop(mode ModeID, modeKind ModeKindID) bool {
- return s.pop[mode][modeKind]
-}
-
-func (s *lexSpec) Push(mode ModeID, modeKind ModeKindID) (ModeID, bool) {
- id := s.push[mode][modeKind]
- return id, id != s.modeIDNil
-}
-
-func (s *lexSpec) ModeName(mode ModeID) string {
- return s.modeNames[mode]
-}
-
-func (s *lexSpec) InitialState(mode ModeID) StateID {
- return s.initialStates[mode]
-}
-
-func (s *lexSpec) NextState(mode ModeID, state StateID, v int) (StateID, bool) {
-{{ if eq .compressionLevel 2 -}}
- rowNum := s.rowNums[mode][state]
- d := s.rowDisplacements[mode][rowNum]
- if s.bounds[mode][d+v] != rowNum {
- return s.stateIDNil, false
- }
- return s.entries[mode][d+v], true
-{{ else if eq .compressionLevel 1 -}}
- rowNum := s.rowNums[mode][state]
- colCount := s.originalColCounts[mode]
- next := s.entries[mode][rowNum*colCount+v]
- if next == s.stateIDNil {
- return s.stateIDNil, false
- }
- return next, true
-{{ else -}}
- colCount := s.originalColCounts[mode]
- next := s.entries[mode][int(state)*colCount+v]
- if next == s.stateIDNil {
- return s.stateIDNil, false
- }
- return next, true
-{{ end -}}
-}
-
-func (s *lexSpec) Accept(mode ModeID, state StateID) (ModeKindID, bool) {
- id := s.acceptances[mode][state]
- return id, id != s.modeKindIDNil
-}
-
-func (s *lexSpec) KindIDAndName(mode ModeID, modeKind ModeKindID) (KindID, string) {
- id := s.kindIDs[mode][modeKind]
- return id, s.kindNames[id]
-}
-`
-
-func genTemplateFuncs(lexSpec *spec.LexicalSpec) template.FuncMap {
- fns := template.FuncMap{
- "genPopTable": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]bool{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.Pop {
- fmt.Fprintf(&b, "%v, ", v != 0)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genPushTable": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]ModeID{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.Push {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genModeNameTable": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]string{\n")
- for i, name := range lexSpec.ModeNames {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "ModeNameNil,\n")
- continue
- }
- fmt.Fprintf(&b, "ModeName%v,\n", lexical.SnakeCaseToUpperCamelCase(name.String()))
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genInitialStateTable": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]StateID{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "%v,\n", spec.StateIDNil)
- continue
- }
-
- fmt.Fprintf(&b, "%v,\n", s.DFA.InitialStateID)
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genAcceptTable": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]ModeKindID{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.DFA.AcceptingStates {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genKindIDTable": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]KindID{\n")
- for i, ids := range lexSpec.KindIDs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- fmt.Fprintf(&b, "{\n")
- for j, id := range ids {
- if j == spec.LexModeKindIDNil.Int() {
- fmt.Fprintf(&b, "KindIDNil,\n")
- continue
- }
- fmt.Fprintf(&b, "KindID%v,\n", lexical.SnakeCaseToUpperCamelCase(string(lexSpec.KindNames[id].String())))
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- "genKindNameTable": func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]string{\n")
- for i, name := range lexSpec.KindNames {
- if i == spec.LexKindIDNil.Int() {
- fmt.Fprintf(&b, "KindNameNil,\n")
- continue
- }
- fmt.Fprintf(&b, "KindName%v,\n", lexical.SnakeCaseToUpperCamelCase(name.String()))
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- },
- }
-
- switch lexSpec.CompressionLevel {
- case 2:
- fns["genRowNums"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]int{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.DFA.Transition.RowNums {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
-
- fns["genRowDisplacements"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]int{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, d := range s.DFA.Transition.UniqueEntries.RowDisplacement {
- fmt.Fprintf(&b, "%v,", d)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
-
- fns["genBounds"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]int{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.DFA.Transition.UniqueEntries.Bounds {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
-
- fns["genEntries"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]StateID{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.DFA.Transition.UniqueEntries.Entries {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
-
- fns["genOriginalColCounts"] = func() string {
- return "nil"
- }
- case 1:
- fns["genRowNums"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]int{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.DFA.Transition.RowNums {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
-
- fns["genRowDisplacements"] = func() string {
- return "nil"
- }
-
- fns["genBounds"] = func() string {
- return "nil"
- }
-
- fns["genEntries"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]StateID{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.DFA.Transition.UncompressedUniqueEntries {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
-
- fns["genOriginalColCounts"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]int{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "0,\n")
- continue
- }
-
- fmt.Fprintf(&b, "%v,\n", s.DFA.Transition.OriginalColCount)
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
- default:
- fns["genRowNums"] = func() string {
- return "nil"
- }
-
- fns["genRowDisplacements"] = func() string {
- return "nil"
- }
-
- fns["genBounds"] = func() string {
- return "nil"
- }
-
- fns["genEntries"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[][]StateID{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "nil,\n")
- continue
- }
-
- c := 1
- fmt.Fprintf(&b, "{\n")
- for _, v := range s.DFA.UncompressedTransition {
- fmt.Fprintf(&b, "%v,", v)
-
- if c == 20 {
- fmt.Fprintf(&b, "\n")
- c = 1
- } else {
- c++
- }
- }
- if c > 1 {
- fmt.Fprintf(&b, "\n")
- }
- fmt.Fprintf(&b, "},\n")
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
-
- fns["genOriginalColCounts"] = func() string {
- var b strings.Builder
- fmt.Fprintf(&b, "[]int{\n")
- for i, s := range lexSpec.Specs {
- if i == spec.LexModeIDNil.Int() {
- fmt.Fprintf(&b, "0,\n")
- continue
- }
-
- fmt.Fprintf(&b, "%v,\n", s.DFA.ColCount)
- }
- fmt.Fprintf(&b, "}")
- return b.String()
- }
- }
-
- return fns
-}