diff options
-rw-r--r-- | README.md | 20 | ||||
-rw-r--r-- | cmd/maleeni/lex.go | 32 | ||||
-rw-r--r-- | driver/lexer.go | 41 | ||||
-rw-r--r-- | driver/lexer_test.go | 424 | ||||
-rw-r--r-- | driver/template.go | 72 |
5 files changed, 316 insertions, 273 deletions
@@ -125,9 +125,9 @@ func main() { break } if tok.Invalid { - fmt.Printf("invalid: '%v'\n", string(tok.Lexeme)) + fmt.Printf("invalid: %#v\n", string(tok.Lexeme)) } else { - fmt.Printf("valid: %v: '%v'\n", tok.KindName, string(tok.Lexeme)) + fmt.Printf("valid: %v: %#v\n", KindIDToName(tok.KindID), string(tok.Lexeme)) } } } @@ -145,14 +145,14 @@ Now, you can perform the lexical analysis. ```sh $ echo -n 'I want to believe.' | go run main.go statement_lexer.go -valid: word: 'I' -valid: whitespace: ' ' -valid: word: 'want' -valid: whitespace: ' ' -valid: word: 'to' -valid: whitespace: ' ' -valid: word: 'believe' -valid: punctuation: '.' +valid: word: "I" +valid: whitespace: " " +valid: word: "want" +valid: whitespace: " " +valid: word: "to" +valid: whitespace: " " +valid: word: "believe" +valid: punctuation: "." ``` ## More Practical Usage diff --git a/cmd/maleeni/lex.go b/cmd/maleeni/lex.go index 7bc7126..37d7d71 100644 --- a/cmd/maleeni/lex.go +++ b/cmd/maleeni/lex.go @@ -67,12 +67,14 @@ func runLex(cmd *cobra.Command, args []string) (retErr error) { defer f.Close() w = f } + + tok2JSON := genTokenJSONMarshaler(clspec) for { tok, err := lex.Next() if err != nil { return err } - data, err := json.Marshal(tok) + data, err := tok2JSON(tok) if err != nil { return fmt.Errorf("failed to marshal a token; token: %v, error: %v\n", tok, err) } @@ -104,3 +106,31 @@ func readCompiledLexSpec(path string) (*spec.CompiledLexSpec, error) { } return clspec, nil } + +func genTokenJSONMarshaler(clspec *spec.CompiledLexSpec) func(tok *driver.Token) ([]byte, error) { + return func(tok *driver.Token) ([]byte, error) { + return json.Marshal(struct { + ModeID int `json:"mode_id"` + ModeName string `json:"mode_name"` + KindID int `json:"kind_id"` + ModeKindID int `json:"mode_kind_id"` + KindName string `json:"kind_name"` + Row int `json:"row"` + Col int `json:"col"` + Lexeme string `json:"lexeme"` + EOF bool `json:"eof"` + Invalid bool `json:"invalid"` + }{ + ModeID: tok.ModeID.Int(), + ModeName: clspec.ModeNames[tok.ModeID].String(), + KindID: tok.KindID.Int(), + ModeKindID: tok.ModeKindID.Int(), + KindName: clspec.KindNames[tok.KindID].String(), + Row: tok.Row, + Col: tok.Col, + Lexeme: string(tok.Lexeme), + EOF: tok.EOF, + Invalid: tok.Invalid, + }) + } +} diff --git a/driver/lexer.go b/driver/lexer.go index d8230d2..24caaf7 100644 --- a/driver/lexer.go +++ b/driver/lexer.go @@ -1,7 +1,6 @@ package driver import ( - "encoding/json" "fmt" "io" "io/ioutil" @@ -47,9 +46,6 @@ type Token struct { // ModeID is an ID of a lex mode. ModeID ModeID - // ModeName is a name of a lex mode. - ModeName string - // KindID is an ID of a kind. This is unique among all modes. KindID KindID @@ -57,9 +53,6 @@ type Token struct { // Note that you need to use KindID field if you want to identify a kind across all modes. ModeKindID ModeKindID - // KindName is a name of a lexical kind. - KindName string - // Row is a row number where a lexeme appears. Row int @@ -77,32 +70,6 @@ type Token struct { Invalid bool } -func (t *Token) MarshalJSON() ([]byte, error) { - return json.Marshal(struct { - ModeID int `json:"mode_id"` - ModeName string `json:"mode_name"` - KindID int `json:"kind_id"` - ModeKindID int `json:"mode_kind_id"` - KindName string `json:"kind_name"` - Row int `json:"row"` - Col int `json:"col"` - Lexeme string `json:"lexeme"` - EOF bool `json:"eof"` - Invalid bool `json:"invalid"` - }{ - ModeID: t.ModeID.Int(), - ModeName: t.ModeName, - KindID: t.KindID.Int(), - ModeKindID: t.ModeKindID.Int(), - KindName: t.KindName, - Row: t.Row, - Col: t.Col, - Lexeme: string(t.Lexeme), - EOF: t.EOF, - Invalid: t.Invalid, - }) -} - type LexerOption func(l *Lexer) error func DisableModeTransition() LexerOption { @@ -215,7 +182,6 @@ func (l *Lexer) nextAndTransition() (*Token, error) { func (l *Lexer) next() (*Token, error) { mode := l.Mode() - modeName := l.spec.ModeName(mode) state := l.spec.InitialState(mode) buf := []byte{} unfixedBufLen := 0 @@ -234,7 +200,6 @@ func (l *Lexer) next() (*Token, error) { if len(buf) > 0 { return &Token{ ModeID: mode, - ModeName: modeName, ModeKindID: 0, Lexeme: buf, Row: row, @@ -244,7 +209,6 @@ func (l *Lexer) next() (*Token, error) { } return &Token{ ModeID: mode, - ModeName: modeName, ModeKindID: 0, Row: 0, Col: 0, @@ -261,7 +225,6 @@ func (l *Lexer) next() (*Token, error) { } return &Token{ ModeID: mode, - ModeName: modeName, ModeKindID: 0, Lexeme: buf, Row: row, @@ -271,13 +234,11 @@ func (l *Lexer) next() (*Token, error) { } state = nextState if modeKindID, ok := l.spec.Accept(mode, state); ok { - kindID, kindName := l.spec.KindIDAndName(mode, modeKindID) + kindID, _ := l.spec.KindIDAndName(mode, modeKindID) tok = &Token{ ModeID: mode, - ModeName: modeName, KindID: kindID, ModeKindID: modeKindID, - KindName: kindName, Lexeme: buf, Row: row, Col: col, diff --git a/driver/lexer_test.go b/driver/lexer_test.go index 549800a..36a3e31 100644 --- a/driver/lexer_test.go +++ b/driver/lexer_test.go @@ -42,24 +42,20 @@ func newLexEntryFragment(kind string, pattern string) *spec.LexEntry { } } -func newToken(modeID ModeID, modeName string, kindID KindID, modeKindID ModeKindID, kindName string, lexeme []byte) *Token { +func newToken(modeID ModeID, kindID KindID, modeKindID ModeKindID, lexeme []byte) *Token { return &Token{ ModeID: modeID, - ModeName: modeName, KindID: kindID, ModeKindID: modeKindID, - KindName: kindName, Lexeme: lexeme, } } -func newTokenDefault(kindID int, modeKindID int, kindName string, lexeme []byte) *Token { +func newTokenDefault(kindID int, modeKindID int, lexeme []byte) *Token { return newToken( ModeID(spec.LexModeIDDefault.Int()), - spec.LexModeNameDefault.String(), KindID(spec.LexKindID(kindID).Int()), ModeKindID(spec.LexModeKindID(modeKindID).Int()), - spec.LexKindName(kindName).String(), lexeme, ) } @@ -67,7 +63,6 @@ func newTokenDefault(kindID int, modeKindID int, kindName string, lexeme []byte) func newEOFToken(modeID ModeID, modeName string) *Token { return &Token{ ModeID: modeID, - ModeName: modeName, ModeKindID: 0, EOF: true, } @@ -77,10 +72,9 @@ func newEOFTokenDefault() *Token { return newEOFToken(ModeID(spec.LexModeIDDefault.Int()), spec.LexModeNameDefault.String()) } -func newInvalidToken(modeID ModeID, modeName string, lexeme []byte) *Token { +func newInvalidToken(modeID ModeID, lexeme []byte) *Token { return &Token{ ModeID: modeID, - ModeName: modeName, ModeKindID: 0, Lexeme: lexeme, Invalid: true, @@ -111,17 +105,17 @@ func TestLexer_Next(t *testing.T) { }, src: "abb aabb aaabb babb bbabb abbbabb", tokens: []*Token{ - newTokenDefault(1, 1, "t1", []byte("abb")), - newTokenDefault(2, 2, "t2", []byte(" ")), - newTokenDefault(1, 1, "t1", []byte("aabb")), - newTokenDefault(2, 2, "t2", []byte(" ")), - newTokenDefault(1, 1, "t1", []byte("aaabb")), - newTokenDefault(2, 2, "t2", []byte(" ")), - newTokenDefault(1, 1, "t1", []byte("babb")), - newTokenDefault(2, 2, "t2", []byte(" ")), - newTokenDefault(1, 1, "t1", []byte("bbabb")), - newTokenDefault(2, 2, "t2", []byte(" ")), - newTokenDefault(1, 1, "t1", []byte("abbbabb")), + newTokenDefault(1, 1, []byte("abb")), + newTokenDefault(2, 2, []byte(" ")), + newTokenDefault(1, 1, []byte("aabb")), + newTokenDefault(2, 2, []byte(" ")), + newTokenDefault(1, 1, []byte("aaabb")), + newTokenDefault(2, 2, []byte(" ")), + newTokenDefault(1, 1, []byte("babb")), + newTokenDefault(2, 2, []byte(" ")), + newTokenDefault(1, 1, []byte("bbabb")), + newTokenDefault(2, 2, []byte(" ")), + newTokenDefault(1, 1, []byte("abbbabb")), newEOFTokenDefault(), }, }, @@ -136,21 +130,21 @@ func TestLexer_Next(t *testing.T) { }, src: "ba baaa a aaa abcd abcdcdcd cd cdcdcd", tokens: []*Token{ - newTokenDefault(1, 1, "t1", []byte("ba")), - newTokenDefault(3, 3, "t3", []byte(" ")), - newTokenDefault(1, 1, "t1", []byte("baaa")), - newTokenDefault(3, 3, "t3", []byte(" ")), - newTokenDefault(1, 1, "t1", []byte("a")), - newTokenDefault(3, 3, "t3", []byte(" ")), - newTokenDefault(1, 1, "t1", []byte("aaa")), - newTokenDefault(3, 3, "t3", []byte(" ")), - newTokenDefault(2, 2, "t2", []byte("abcd")), - newTokenDefault(3, 3, "t3", []byte(" ")), - newTokenDefault(2, 2, "t2", []byte("abcdcdcd")), - newTokenDefault(3, 3, "t3", []byte(" ")), - newTokenDefault(2, 2, "t2", []byte("cd")), - newTokenDefault(3, 3, "t3", []byte(" ")), - newTokenDefault(2, 2, "t2", []byte("cdcdcd")), + newTokenDefault(1, 1, []byte("ba")), + newTokenDefault(3, 3, []byte(" ")), + newTokenDefault(1, 1, []byte("baaa")), + newTokenDefault(3, 3, []byte(" ")), + newTokenDefault(1, 1, []byte("a")), + newTokenDefault(3, 3, []byte(" ")), + newTokenDefault(1, 1, []byte("aaa")), + newTokenDefault(3, 3, []byte(" ")), + newTokenDefault(2, 2, []byte("abcd")), + newTokenDefault(3, 3, []byte(" ")), + newTokenDefault(2, 2, []byte("abcdcdcd")), + newTokenDefault(3, 3, []byte(" ")), + newTokenDefault(2, 2, []byte("cd")), + newTokenDefault(3, 3, []byte(" ")), + newTokenDefault(2, 2, []byte("cdcdcd")), newEOFTokenDefault(), }, }, @@ -180,22 +174,22 @@ func TestLexer_Next(t *testing.T) { 0xf4, 0x8f, 0xbf, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "t1", []byte{0x00}), - newTokenDefault(1, 1, "t1", []byte{0x7f}), - newTokenDefault(1, 1, "t1", []byte{0xc2, 0x80}), - newTokenDefault(1, 1, "t1", []byte{0xdf, 0xbf}), - newTokenDefault(1, 1, "t1", []byte{0xe1, 0x80, 0x80}), - newTokenDefault(1, 1, "t1", []byte{0xec, 0xbf, 0xbf}), - newTokenDefault(1, 1, "t1", []byte{0xed, 0x80, 0x80}), - newTokenDefault(1, 1, "t1", []byte{0xed, 0x9f, 0xbf}), - newTokenDefault(1, 1, "t1", []byte{0xee, 0x80, 0x80}), - newTokenDefault(1, 1, "t1", []byte{0xef, 0xbf, 0xbf}), - newTokenDefault(1, 1, "t1", []byte{0xf0, 0x90, 0x80, 0x80}), - newTokenDefault(1, 1, "t1", []byte{0xf0, 0xbf, 0xbf, 0xbf}), - newTokenDefault(1, 1, "t1", []byte{0xf1, 0x80, 0x80, 0x80}), - newTokenDefault(1, 1, "t1", []byte{0xf3, 0xbf, 0xbf, 0xbf}), - newTokenDefault(1, 1, "t1", []byte{0xf4, 0x80, 0x80, 0x80}), - newTokenDefault(1, 1, "t1", []byte{0xf4, 0x8f, 0xbf, 0xbf}), + newTokenDefault(1, 1, []byte{0x00}), + newTokenDefault(1, 1, []byte{0x7f}), + newTokenDefault(1, 1, []byte{0xc2, 0x80}), + newTokenDefault(1, 1, []byte{0xdf, 0xbf}), + newTokenDefault(1, 1, []byte{0xe1, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xec, 0xbf, 0xbf}), + newTokenDefault(1, 1, []byte{0xed, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xed, 0x9f, 0xbf}), + newTokenDefault(1, 1, []byte{0xee, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xef, 0xbf, 0xbf}), + newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbf}), + newTokenDefault(1, 1, []byte{0xf1, 0x80, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xf3, 0xbf, 0xbf, 0xbf}), + newTokenDefault(1, 1, []byte{0xf4, 0x80, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xf4, 0x8f, 0xbf, 0xbf}), newEOFTokenDefault(), }, }, @@ -208,17 +202,17 @@ func TestLexer_Next(t *testing.T) { }, src: "ab.*+?|()[]", tokens: []*Token{ - newTokenDefault(1, 1, "t1", []byte("a")), - newTokenDefault(1, 1, "t1", []byte("b")), - newTokenDefault(1, 1, "t1", []byte(".")), - newTokenDefault(1, 1, "t1", []byte("*")), - newTokenDefault(1, 1, "t1", []byte("+")), - newTokenDefault(1, 1, "t1", []byte("?")), - newTokenDefault(1, 1, "t1", []byte("|")), - newTokenDefault(1, 1, "t1", []byte("(")), - newTokenDefault(1, 1, "t1", []byte(")")), - newTokenDefault(1, 1, "t1", []byte("[")), - newTokenDefault(1, 1, "t1", []byte("]")), + newTokenDefault(1, 1, []byte("a")), + newTokenDefault(1, 1, []byte("b")), + newTokenDefault(1, 1, []byte(".")), + newTokenDefault(1, 1, []byte("*")), + newTokenDefault(1, 1, []byte("+")), + newTokenDefault(1, 1, []byte("?")), + newTokenDefault(1, 1, []byte("|")), + newTokenDefault(1, 1, []byte("(")), + newTokenDefault(1, 1, []byte(")")), + newTokenDefault(1, 1, []byte("[")), + newTokenDefault(1, 1, []byte("]")), newEOFTokenDefault(), }, }, @@ -242,10 +236,10 @@ func TestLexer_Next(t *testing.T) { 0x7f, }), tokens: []*Token{ - newTokenDefault(1, 1, "char_1_byte", []byte{0x01}), - newTokenDefault(1, 1, "char_1_byte", []byte{0x02}), - newTokenDefault(1, 1, "char_1_byte", []byte{0x7e}), - newTokenDefault(1, 1, "char_1_byte", []byte{0x7f}), + newTokenDefault(1, 1, []byte{0x01}), + newTokenDefault(1, 1, []byte{0x02}), + newTokenDefault(1, 1, []byte{0x7e}), + newTokenDefault(1, 1, []byte{0x7f}), newEOFTokenDefault(), }, }, @@ -264,10 +258,10 @@ func TestLexer_Next(t *testing.T) { 0xdf, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "char_2_byte", []byte{0xc2, 0x80}), - newTokenDefault(1, 1, "char_2_byte", []byte{0xc2, 0x81}), - newTokenDefault(1, 1, "char_2_byte", []byte{0xdf, 0xbe}), - newTokenDefault(1, 1, "char_2_byte", []byte{0xdf, 0xbf}), + newTokenDefault(1, 1, []byte{0xc2, 0x80}), + newTokenDefault(1, 1, []byte{0xc2, 0x81}), + newTokenDefault(1, 1, []byte{0xdf, 0xbe}), + newTokenDefault(1, 1, []byte{0xdf, 0xbf}), newEOFTokenDefault(), }, }, @@ -283,7 +277,7 @@ func TestLexer_Next(t *testing.T) { 0xe0, 0xa0, 0x80, }), tokens: []*Token{ - newTokenDefault(1, 1, "char_3_byte", []byte{0xe0, 0xa0, 0x80}), + newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}), newEOFTokenDefault(), }, }, @@ -302,10 +296,10 @@ func TestLexer_Next(t *testing.T) { 0xe0, 0xa0, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "char_3_byte", []byte{0xe0, 0xa0, 0x80}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xe0, 0xa0, 0x81}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xe0, 0xa0, 0xbe}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xe0, 0xa0, 0xbf}), + newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}), + newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x81}), + newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0xbe}), + newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0xbf}), newEOFTokenDefault(), }, }, @@ -324,10 +318,10 @@ func TestLexer_Next(t *testing.T) { 0xe0, 0xbf, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "char_3_byte", []byte{0xe0, 0xa0, 0x80}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xe0, 0xa0, 0x81}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xe0, 0xbf, 0xbe}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xe0, 0xbf, 0xbf}), + newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}), + newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x81}), + newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbe}), + newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbf}), newEOFTokenDefault(), }, }, @@ -358,22 +352,22 @@ func TestLexer_Next(t *testing.T) { 0xef, 0xbf, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "char_3_byte", []byte{0xe0, 0xa0, 0x80}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xe0, 0xa0, 0x81}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xe0, 0xbf, 0xbe}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xe0, 0xbf, 0xbf}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xe1, 0x80, 0x80}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xe1, 0x80, 0x81}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xec, 0xbf, 0xbe}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xec, 0xbf, 0xbf}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xed, 0x80, 0x80}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xed, 0x80, 0x81}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xed, 0x9f, 0xbe}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xed, 0x9f, 0xbf}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xee, 0x80, 0x80}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xee, 0x80, 0x81}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xef, 0xbf, 0xbe}), - newTokenDefault(1, 1, "char_3_byte", []byte{0xef, 0xbf, 0xbf}), + newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x80}), + newTokenDefault(1, 1, []byte{0xe0, 0xa0, 0x81}), + newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbe}), + newTokenDefault(1, 1, []byte{0xe0, 0xbf, 0xbf}), + newTokenDefault(1, 1, []byte{0xe1, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xe1, 0x80, 0x81}), + newTokenDefault(1, 1, []byte{0xec, 0xbf, 0xbe}), + newTokenDefault(1, 1, []byte{0xec, 0xbf, 0xbf}), + newTokenDefault(1, 1, []byte{0xed, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xed, 0x80, 0x81}), + newTokenDefault(1, 1, []byte{0xed, 0x9f, 0xbe}), + newTokenDefault(1, 1, []byte{0xed, 0x9f, 0xbf}), + newTokenDefault(1, 1, []byte{0xee, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xee, 0x80, 0x81}), + newTokenDefault(1, 1, []byte{0xef, 0xbf, 0xbe}), + newTokenDefault(1, 1, []byte{0xef, 0xbf, 0xbf}), newEOFTokenDefault(), }, }, @@ -389,7 +383,7 @@ func TestLexer_Next(t *testing.T) { 0xf0, 0x90, 0x80, 0x80, }), tokens: []*Token{ - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0x90, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), newEOFTokenDefault(), }, }, @@ -408,10 +402,10 @@ func TestLexer_Next(t *testing.T) { 0xf0, 0x90, 0x80, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0x90, 0x80, 0x80}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0x90, 0x80, 0x81}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0x90, 0x80, 0xbe}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0x90, 0x80, 0xbf}), + newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}), + newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0xbe}), + newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0xbf}), newEOFTokenDefault(), }, }, @@ -430,10 +424,10 @@ func TestLexer_Next(t *testing.T) { 0xf0, 0x90, 0xbf, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0x90, 0x80, 0x80}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0x90, 0x80, 0x81}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0x90, 0xbf, 0xbe}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0x90, 0xbf, 0xbf}), + newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}), + newTokenDefault(1, 1, []byte{0xf0, 0x90, 0xbf, 0xbe}), + newTokenDefault(1, 1, []byte{0xf0, 0x90, 0xbf, 0xbf}), newEOFTokenDefault(), }, }, @@ -452,10 +446,10 @@ func TestLexer_Next(t *testing.T) { 0xf0, 0xbf, 0xbf, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0x90, 0x80, 0x80}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0x90, 0x80, 0x81}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0xbf, 0xbf, 0xbe}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0xbf, 0xbf, 0xbf}), + newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}), + newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbe}), + newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbf}), newEOFTokenDefault(), }, }, @@ -482,18 +476,18 @@ func TestLexer_Next(t *testing.T) { 0xf4, 0x8f, 0xbf, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0x90, 0x80, 0x80}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0x90, 0x80, 0x81}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0xbf, 0xbf, 0xbe}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf0, 0xbf, 0xbf, 0xbf}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf1, 0x80, 0x80, 0x80}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf1, 0x80, 0x80, 0x81}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf3, 0xbf, 0xbf, 0xbe}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf3, 0xbf, 0xbf, 0xbf}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf4, 0x80, 0x80, 0x80}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf4, 0x80, 0x80, 0x81}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf4, 0x8f, 0xbf, 0xbe}), - newTokenDefault(1, 1, "char_4_byte", []byte{0xf4, 0x8f, 0xbf, 0xbf}), + newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xf0, 0x90, 0x80, 0x81}), + newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbe}), + newTokenDefault(1, 1, []byte{0xf0, 0xbf, 0xbf, 0xbf}), + newTokenDefault(1, 1, []byte{0xf1, 0x80, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xf1, 0x80, 0x80, 0x81}), + newTokenDefault(1, 1, []byte{0xf3, 0xbf, 0xbf, 0xbe}), + newTokenDefault(1, 1, []byte{0xf3, 0xbf, 0xbf, 0xbf}), + newTokenDefault(1, 1, []byte{0xf4, 0x80, 0x80, 0x80}), + newTokenDefault(1, 1, []byte{0xf4, 0x80, 0x80, 0x81}), + newTokenDefault(1, 1, []byte{0xf4, 0x8f, 0xbf, 0xbe}), + newTokenDefault(1, 1, []byte{0xf4, 0x8f, 0xbf, 0xbf}), newEOFTokenDefault(), }, }, @@ -506,7 +500,7 @@ func TestLexer_Next(t *testing.T) { }, src: "foo9", tokens: []*Token{ - newTokenDefault(1, 1, "non_number", []byte("foo9")), + newTokenDefault(1, 1, []byte("foo9")), newEOFTokenDefault(), }, }, @@ -522,10 +516,10 @@ func TestLexer_Next(t *testing.T) { }, src: "nνに😸", tokens: []*Token{ - newTokenDefault(1, 1, "char_1_byte", []byte{0x6E}), - newTokenDefault(2, 2, "char_2_byte", []byte{0xCE, 0xBD}), - newTokenDefault(3, 3, "char_3_byte", []byte{0xE3, 0x81, 0xAB}), - newTokenDefault(4, 4, "char_4_byte", []byte{0xF0, 0x9F, 0x98, 0xB8}), + newTokenDefault(1, 1, []byte{0x6E}), + newTokenDefault(2, 2, []byte{0xCE, 0xBD}), + newTokenDefault(3, 3, []byte{0xE3, 0x81, 0xAB}), + newTokenDefault(4, 4, []byte{0xF0, 0x9F, 0x98, 0xB8}), newEOFTokenDefault(), }, }, @@ -538,10 +532,10 @@ func TestLexer_Next(t *testing.T) { }, src: "nνに😸", tokens: []*Token{ - newTokenDefault(1, 1, "code_points_alt", []byte{0x6E}), - newTokenDefault(1, 1, "code_points_alt", []byte{0xCE, 0xBD}), - newTokenDefault(1, 1, "code_points_alt", []byte{0xE3, 0x81, 0xAB}), - newTokenDefault(1, 1, "code_points_alt", []byte{0xF0, 0x9F, 0x98, 0xB8}), + newTokenDefault(1, 1, []byte{0x6E}), + newTokenDefault(1, 1, []byte{0xCE, 0xBD}), + newTokenDefault(1, 1, []byte{0xE3, 0x81, 0xAB}), + newTokenDefault(1, 1, []byte{0xF0, 0x9F, 0x98, 0xB8}), newEOFTokenDefault(), }, }, @@ -556,8 +550,8 @@ func TestLexer_Next(t *testing.T) { }, src: "abcdefdefabcdef", tokens: []*Token{ - newTokenDefault(1, 1, "t1", []byte("abcdefdef")), - newTokenDefault(1, 1, "t1", []byte("abcdef")), + newTokenDefault(1, 1, []byte("abcdefdef")), + newTokenDefault(1, 1, []byte("abcdef")), newEOFTokenDefault(), }, }, @@ -572,7 +566,7 @@ func TestLexer_Next(t *testing.T) { }, src: "abcdefdefabc", tokens: []*Token{ - newTokenDefault(1, 1, "t1", []byte("abcdefdefabc")), + newTokenDefault(1, 1, []byte("abcdefdefabc")), newEOFTokenDefault(), }, }, @@ -588,7 +582,7 @@ func TestLexer_Next(t *testing.T) { }, src: "abcdefdefabc", tokens: []*Token{ - newTokenDefault(1, 1, "t1", []byte("abcdefdefabc")), + newTokenDefault(1, 1, []byte("abcdefdefabc")), newEOFTokenDefault(), }, }, @@ -605,16 +599,16 @@ func TestLexer_Next(t *testing.T) { }, src: `"" "Hello world.\n\"Hello world.\""`, tokens: []*Token{ - newToken(1, "default", 2, 2, "string_open", []byte(`"`)), - newToken(2, "string", 5, 3, "string_close", []byte(`"`)), - newToken(1, "default", 1, 1, "white_space", []byte(` `)), - newToken(1, "default", 2, 2, "string_open", []byte(`"`)), - newToken(2, "string", 4, 2, "char_sequence", []byte(`Hello world.`)), - newToken(2, "string", 3, 1, "escape_sequence", []byte(`\n`)), - newToken(2, "string", 3, 1, "escape_sequence", []byte(`\"`)), - newToken(2, "string", 4, 2, "char_sequence", []byte(`Hello world.`)), - newToken(2, "string", 3, 1, "escape_sequence", []byte(`\"`)), - newToken(2, "string", 5, 3, "string_close", []byte(`"`)), + newToken(1, 2, 2, []byte(`"`)), + newToken(2, 5, 3, []byte(`"`)), + newToken(1, 1, 1, []byte(` `)), + newToken(1, 2, 2, []byte(`"`)), + newToken(2, 4, 2, []byte(`Hello world.`)), + newToken(2, 3, 1, []byte(`\n`)), + newToken(2, 3, 1, []byte(`\"`)), + newToken(2, 4, 2, []byte(`Hello world.`)), + newToken(2, 3, 1, []byte(`\"`)), + newToken(2, 5, 3, []byte(`"`)), newEOFTokenDefault(), }, }, @@ -632,15 +626,15 @@ func TestLexer_Next(t *testing.T) { }, src: ` a b < < `, tokens: []*Token{ - newToken(1, "default", 1, 1, "white_space", []byte(` `)), - newToken(1, "default", 2, 2, "char_a", []byte(`a`)), - newToken(2, "state_a", 1, 1, "white_space", []byte(` `)), - newToken(2, "state_a", 3, 2, "char_b", []byte(`b`)), - newToken(3, "state_b", 1, 1, "white_space", []byte(` `)), - newToken(3, "state_b", 5, 2, "back_from_b", []byte(`<`)), - newToken(2, "state_a", 1, 1, "white_space", []byte(` `)), - newToken(2, "state_a", 4, 3, "back_from_a", []byte(`<`)), - newToken(1, "default", 1, 1, "white_space", []byte(` `)), + newToken(1, 1, 1, []byte(` `)), + newToken(1, 2, 2, []byte(`a`)), + newToken(2, 1, 1, []byte(` `)), + newToken(2, 3, 2, []byte(`b`)), + newToken(3, 1, 1, []byte(` `)), + newToken(3, 5, 2, []byte(`<`)), + newToken(2, 1, 1, []byte(` `)), + newToken(2, 4, 3, []byte(`<`)), + newToken(1, 1, 1, []byte(` `)), newEOFTokenDefault(), }, }, @@ -658,35 +652,35 @@ func TestLexer_Next(t *testing.T) { }, src: `-> 1 -> 2 <- <- a`, tokens: []*Token{ - newToken(1, "default", 3, 3, "push_1", []byte(`-> 1`)), - newToken(2, "mode_1", 1, 1, "white_space", []byte(` `)), - newToken(2, "mode_1", 4, 2, "push_2", []byte(`-> 2`)), - newToken(3, "mode_2", 1, 1, "white_space", []byte(` `)), - newToken(3, "mode_2", 6, 2, "pop_2", []byte(`<-`)), - newToken(2, "mode_1", 1, 1, "white_space", []byte(` `)), - newToken(2, "mode_1", 5, 3, "pop_1", []byte(`<-`)), - newToken(1, "default", 1, 1, "white_space", []byte(` `)), - newToken(1, "default", 2, 2, "char", []byte(`a`)), + newToken(1, 3, 3, []byte(`-> 1`)), + newToken(2, 1, 1, []byte(` `)), + newToken(2, 4, 2, []byte(`-> 2`)), + newToken(3, 1, 1, []byte(` `)), + newToken(3, 6, 2, []byte(`<-`)), + newToken(2, 1, 1, []byte(` `)), + newToken(2, 5, 3, []byte(`<-`)), + newToken(1, 1, 1, []byte(` `)), + newToken(1, 2, 2, []byte(`a`)), newEOFTokenDefault(), }, passiveModeTran: true, tran: func(l *Lexer, tok *Token) error { switch l.spec.ModeName(l.Mode()) { case "default": - switch tok.KindName { - case "push_1": + switch tok.KindID { + case 3: // push_1 l.PushMode(2) } case "mode_1": - switch tok.KindName { - case "push_2": + switch tok.KindID { + case 4: // push_2 l.PushMode(3) - case "pop_1": + case 5: // pop_1 return l.PopMode() } case "mode_2": - switch tok.KindName { - case "pop_2": + switch tok.KindID { + case 6: // pop_2 return l.PopMode() } } @@ -707,15 +701,15 @@ func TestLexer_Next(t *testing.T) { }, src: `-> 1 -> 2 <- <- a`, tokens: []*Token{ - newToken(1, "default", 3, 3, "push_1", []byte(`-> 1`)), - newToken(2, "mode_1", 1, 1, "white_space", []byte(` `)), - newToken(2, "mode_1", 4, 2, "push_2", []byte(`-> 2`)), - newToken(3, "mode_2", 1, 1, "white_space", []byte(` `)), - newToken(3, "mode_2", 6, 2, "pop_2", []byte(`<-`)), - newToken(2, "mode_1", 1, 1, "white_space", []byte(` `)), - newToken(2, "mode_1", 5, 3, "pop_1", []byte(`<-`)), - newToken(1, "default", 1, 1, "white_space", []byte(` `)), - newToken(1, "default", 2, 2, "char", []byte(`a`)), + newToken(1, 3, 3, []byte(`-> 1`)), + newToken(2, 1, 1, []byte(` `)), + newToken(2, 4, 2, []byte(`-> 2`)), + newToken(3, 1, 1, []byte(` `)), + newToken(3, 6, 2, []byte(`<-`)), + newToken(2, 1, 1, []byte(` `)), + newToken(2, 5, 3, []byte(`<-`)), + newToken(1, 1, 1, []byte(` `)), + newToken(1, 2, 2, []byte(`a`)), newEOFTokenDefault(), }, // Active mode transition and an external transition function can be used together. @@ -723,10 +717,10 @@ func TestLexer_Next(t *testing.T) { tran: func(l *Lexer, tok *Token) error { switch l.spec.ModeName(l.Mode()) { case "mode_1": - switch tok.KindName { - case "push_2": + switch tok.KindID { + case 4: // push_2 l.PushMode(3) - case "pop_1": + case 5: // pop_1 return l.PopMode() } } @@ -750,15 +744,15 @@ func TestLexer_Next(t *testing.T) { }, src: `.*+?|()[\`, tokens: []*Token{ - newTokenDefault(1, 1, "dot", []byte(`.`)), - newTokenDefault(2, 2, "star", []byte(`*`)), - newTokenDefault(3, 3, "plus", []byte(`+`)), - newTokenDefault(4, 4, "question", []byte(`?`)), - newTokenDefault(5, 5, "vbar", []byte(`|`)), - newTokenDefault(6, 6, "lparen", []byte(`(`)), - newTokenDefault(7, 7, "rparen", []byte(`)`)), - newTokenDefault(8, 8, "lbrace", []byte(`[`)), - newTokenDefault(9, 9, "backslash", []byte(`\`)), + newTokenDefault(1, 1, []byte(`.`)), + newTokenDefault(2, 2, []byte(`*`)), + newTokenDefault(3, 3, []byte(`+`)), + newTokenDefault(4, 4, []byte(`?`)), + newTokenDefault(5, 5, []byte(`|`)), + newTokenDefault(6, 6, []byte(`(`)), + newTokenDefault(7, 7, []byte(`)`)), + newTokenDefault(8, 8, []byte(`[`)), + newTokenDefault(9, 9, []byte(`\`)), newEOFTokenDefault(), }, }, @@ -847,34 +841,34 @@ func TestLexer_Next_WithPosition(t *testing.T) { }) expected := []*Token{ - withPos(newTokenDefault(2, 2, "any", []byte{0x00}), 0, 0), - withPos(newTokenDefault(2, 2, "any", []byte{0x7F}), 0, 1), - withPos(newTokenDefault(1, 1, "newline", []byte{0x0A}), 0, 2), - - withPos(newTokenDefault(2, 2, "any", []byte{0xC2, 0x80}), 1, 0), - withPos(newTokenDefault(2, 2, "any", []byte{0xDF, 0xBF}), 1, 1), - withPos(newTokenDefault(1, 1, "newline", []byte{0x0A}), 1, 2), - - withPos(newTokenDefault(2, 2, "any", []byte{0xE0, 0xA0, 0x80}), 2, 0), - withPos(newTokenDefault(2, 2, "any", []byte{0xE0, 0xBF, 0xBF}), 2, 1), - withPos(newTokenDefault(2, 2, "any", []byte{0xE1, 0x80, 0x80}), 2, 2), - withPos(newTokenDefault(2, 2, "any", []byte{0xEC, 0xBF, 0xBF}), 2, 3), - withPos(newTokenDefault(2, 2, "any", []byte{0xED, 0x80, 0x80}), 2, 4), - withPos(newTokenDefault(2, 2, "any", []byte{0xED, 0x9F, 0xBF}), 2, 5), - withPos(newTokenDefault(2, 2, "any", []byte{0xEE, 0x80, 0x80}), 2, 6), - withPos(newTokenDefault(2, 2, "any", []byte{0xEF, 0xBF, 0xBF}), 2, 7), - withPos(newTokenDefault(1, 1, "newline", []byte{0x0A}), 2, 8), - - withPos(newTokenDefault(2, 2, "any", []byte{0xF0, 0x90, 0x80, 0x80}), 3, 0), - withPos(newTokenDefault(2, 2, "any", []byte{0xF0, 0xBF, 0xBF, 0xBF}), 3, 1), - withPos(newTokenDefault(2, 2, "any", []byte{0xF1, 0x80, 0x80, 0x80}), 3, 2), - withPos(newTokenDefault(2, 2, "any", []byte{0xF3, 0xBF, 0xBF, 0xBF}), 3, 3), - withPos(newTokenDefault(2, 2, "any", []byte{0xF4, 0x80, 0x80, 0x80}), 3, 4), - withPos(newTokenDefault(2, 2, "any", []byte{0xF4, 0x8F, 0xBF, 0xBF}), 3, 5), + withPos(newTokenDefault(2, 2, []byte{0x00}), 0, 0), + withPos(newTokenDefault(2, 2, []byte{0x7F}), 0, 1), + withPos(newTokenDefault(1, 1, []byte{0x0A}), 0, 2), + + withPos(newTokenDefault(2, 2, []byte{0xC2, 0x80}), 1, 0), + withPos(newTokenDefault(2, 2, []byte{0xDF, 0xBF}), 1, 1), + withPos(newTokenDefault(1, 1, []byte{0x0A}), 1, 2), + + withPos(newTokenDefault(2, 2, []byte{0xE0, 0xA0, 0x80}), 2, 0), + withPos(newTokenDefault(2, 2, []byte{0xE0, 0xBF, 0xBF}), 2, 1), + withPos(newTokenDefault(2, 2, []byte{0xE1, 0x80, 0x80}), 2, 2), + withPos(newTokenDefault(2, 2, []byte{0xEC, 0xBF, 0xBF}), 2, 3), + withPos(newTokenDefault(2, 2, []byte{0xED, 0x80, 0x80}), 2, 4), + withPos(newTokenDefault(2, 2, []byte{0xED, 0x9F, 0xBF}), 2, 5), + withPos(newTokenDefault(2, 2, []byte{0xEE, 0x80, 0x80}), 2, 6), + withPos(newTokenDefault(2, 2, []byte{0xEF, 0xBF, 0xBF}), 2, 7), + withPos(newTokenDefault(1, 1, []byte{0x0A}), 2, 8), + + withPos(newTokenDefault(2, 2, []byte{0xF0, 0x90, 0x80, 0x80}), 3, 0), + withPos(newTokenDefault(2, 2, []byte{0xF0, 0xBF, 0xBF, 0xBF}), 3, 1), + withPos(newTokenDefault(2, 2, []byte{0xF1, 0x80, 0x80, 0x80}), 3, 2), + withPos(newTokenDefault(2, 2, []byte{0xF3, 0xBF, 0xBF, 0xBF}), 3, 3), + withPos(newTokenDefault(2, 2, []byte{0xF4, 0x80, 0x80, 0x80}), 3, 4), + withPos(newTokenDefault(2, 2, []byte{0xF4, 0x8F, 0xBF, 0xBF}), 3, 5), // When a token contains multiple line breaks, the driver sets the token position to // the line number where a lexeme first appears. - withPos(newTokenDefault(1, 1, "newline", []byte{0x0A, 0x0A, 0x0A}), 3, 6), + withPos(newTokenDefault(1, 1, []byte{0x0A, 0x0A, 0x0A}), 3, 6), withPos(newEOFTokenDefault(), 0, 0), } @@ -902,10 +896,8 @@ func testToken(t *testing.T, expected, actual *Token, checkPosition bool) { t.Helper() if actual.ModeID != expected.ModeID || - actual.ModeName != expected.ModeName || actual.KindID != expected.KindID || actual.ModeKindID != expected.ModeKindID || - actual.KindName != expected.KindName || !bytes.Equal(actual.Lexeme, expected.Lexeme) || actual.EOF != expected.EOF || actual.Invalid != expected.Invalid { diff --git a/driver/template.go b/driver/template.go index ab95167..a3d2726 100644 --- a/driver/template.go +++ b/driver/template.go @@ -67,6 +67,33 @@ func GenLexer(clspec *spec.CompiledLexSpec, pkgName string) ([]byte, error) { modeNamesSrc = b.String() } + var modeIDToNameSrc string + { + var b strings.Builder + fmt.Fprintf(&b, ` +func ModeIDToName(id ModeID) string { + switch id {`) + for i, k := range clspec.ModeNames { + if i == spec.LexModeIDNil.Int() { + fmt.Fprintf(&b, ` + case ModeIDNil: + return ModeNameNil`) + continue + } + name := spec.SnakeCaseToUpperCamelCase(k.String()) + fmt.Fprintf(&b, ` + case ModeID%v: + return ModeName%v`, name, name) + } + fmt.Fprintf(&b, ` + } + return "" +} +`) + + modeIDToNameSrc = b.String() + } + var kindIDsSrc string { var b strings.Builder @@ -96,6 +123,33 @@ func GenLexer(clspec *spec.CompiledLexSpec, pkgName string) ([]byte, error) { kindNamesSrc = b.String() } + var kindIDToNameSrc string + { + var b strings.Builder + fmt.Fprintf(&b, ` +func KindIDToName(id KindID) string { + switch id {`) + for i, k := range clspec.KindNames { + if i == spec.LexModeIDNil.Int() { + fmt.Fprintf(&b, ` + case KindIDNil: + return KindNameNil`) + continue + } + name := spec.SnakeCaseToUpperCamelCase(k.String()) + fmt.Fprintf(&b, ` + case KindID%v: + return KindName%v`, name, name) + } + fmt.Fprintf(&b, ` + } + return "" +} +`) + + kindIDToNameSrc = b.String() + } + var specSrc string { t, err := template.New("").Funcs(genTemplateFuncs(clspec)).Parse(lexSpecTemplate) @@ -127,10 +181,14 @@ func GenLexer(clspec *spec.CompiledLexSpec, pkgName string) ([]byte, error) { {{ .modeNamesSrc }} +{{ .modeIDToNameSrc }} + {{ .kindIDsSrc }} {{ .kindNamesSrc }} +{{ .kindIDToNameSrc }} + {{ .specSrc }} ` @@ -141,12 +199,14 @@ func GenLexer(clspec *spec.CompiledLexSpec, pkgName string) ([]byte, error) { var b strings.Builder err = t.Execute(&b, map[string]string{ - "lexerSrc": lexerSrc, - "modeIDsSrc": modeIDsSrc, - "modeNamesSrc": modeNamesSrc, - "kindIDsSrc": kindIDsSrc, - "kindNamesSrc": kindNamesSrc, - "specSrc": specSrc, + "lexerSrc": lexerSrc, + "modeIDsSrc": modeIDsSrc, + "modeNamesSrc": modeNamesSrc, + "modeIDToNameSrc": modeIDToNameSrc, + "kindIDsSrc": kindIDsSrc, + "kindNamesSrc": kindNamesSrc, + "kindIDToNameSrc": kindIDToNameSrc, + "specSrc": specSrc, }) if err != nil { return nil, err |