diff options
author | Ryo Nihei <nihei.dev@gmail.com> | 2021-09-18 17:07:09 +0900 |
---|---|---|
committer | Ryo Nihei <nihei.dev@gmail.com> | 2021-09-18 17:07:09 +0900 |
commit | fe865a812401c2c612f2cd17cedd4728dc4798f7 (patch) | |
tree | 007dbc653364809e2273ba95aef0f1bea2006127 /driver | |
parent | Update CHANGELOG (diff) | |
download | tre-fe865a812401c2c612f2cd17cedd4728dc4798f7.tar.gz tre-fe865a812401c2c612f2cd17cedd4728dc4798f7.tar.xz |
Generate constant values representing mode IDs, mode names, kind IDs, and kind names
Diffstat (limited to 'driver')
-rw-r--r-- | driver/lexer_test.go | 154 | ||||
-rw-r--r-- | driver/template.go | 102 |
2 files changed, 165 insertions, 91 deletions
diff --git a/driver/lexer_test.go b/driver/lexer_test.go index ebb4aad..a742bad 100644 --- a/driver/lexer_test.go +++ b/driver/lexer_test.go @@ -227,7 +227,7 @@ func TestLexer_Next(t *testing.T) { // maleeni cannot handle the null character in patterns because compiler.lexer, // specifically read() and restore(), recognizes the null characters as that a symbol doesn't exist. // If a pattern needs a null character, use code point expression \u{0000}. - newLexEntryDefaultNOP("1ByteChar", "[\x01-\x7f]"), + newLexEntryDefaultNOP("char1Byte", "[\x01-\x7f]"), }, }, src: string([]byte{ @@ -237,10 +237,10 @@ func TestLexer_Next(t *testing.T) { 0x7f, }), tokens: []*Token{ - newTokenDefault(1, 1, "1ByteChar", []byte{0x01}), - newTokenDefault(1, 1, "1ByteChar", []byte{0x02}), - newTokenDefault(1, 1, "1ByteChar", []byte{0x7e}), - newTokenDefault(1, 1, "1ByteChar", []byte{0x7f}), + newTokenDefault(1, 1, "char1Byte", []byte{0x01}), + newTokenDefault(1, 1, "char1Byte", []byte{0x02}), + newTokenDefault(1, 1, "char1Byte", []byte{0x7e}), + newTokenDefault(1, 1, "char1Byte", []byte{0x7f}), newEOFTokenDefault(), }, }, @@ -248,7 +248,7 @@ func TestLexer_Next(t *testing.T) { lspec: &spec.LexSpec{ Entries: []*spec.LexEntry{ // all 2 byte characters - newLexEntryDefaultNOP("2ByteChar", "[\xc2\x80-\xdf\xbf]"), + newLexEntryDefaultNOP("char2Byte", "[\xc2\x80-\xdf\xbf]"), }, }, src: string([]byte{ @@ -258,10 +258,10 @@ func TestLexer_Next(t *testing.T) { 0xdf, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "2ByteChar", []byte{0xc2, 0x80}), - newTokenDefault(1, 1, "2ByteChar", []byte{0xc2, 0x81}), - newTokenDefault(1, 1, "2ByteChar", []byte{0xdf, 0xbe}), - newTokenDefault(1, 1, "2ByteChar", []byte{0xdf, 0xbf}), + newTokenDefault(1, 1, "char2Byte", []byte{0xc2, 0x80}), + newTokenDefault(1, 1, "char2Byte", []byte{0xc2, 0x81}), + newTokenDefault(1, 1, "char2Byte", []byte{0xdf, 0xbe}), + newTokenDefault(1, 1, "char2Byte", []byte{0xdf, 0xbf}), newEOFTokenDefault(), }, }, @@ -269,14 +269,14 @@ func TestLexer_Next(t *testing.T) { lspec: &spec.LexSpec{ Entries: []*spec.LexEntry{ // All bytes are the same. - newLexEntryDefaultNOP("3ByteChar", "[\xe0\xa0\x80-\xe0\xa0\x80]"), + newLexEntryDefaultNOP("char3Byte", "[\xe0\xa0\x80-\xe0\xa0\x80]"), }, }, src: string([]byte{ 0xe0, 0xa0, 0x80, }), tokens: []*Token{ - newTokenDefault(1, 1, "3ByteChar", []byte{0xe0, 0xa0, 0x80}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe0, 0xa0, 0x80}), newEOFTokenDefault(), }, }, @@ -284,7 +284,7 @@ func TestLexer_Next(t *testing.T) { lspec: &spec.LexSpec{ Entries: []*spec.LexEntry{ // The first two bytes are the same. - newLexEntryDefaultNOP("3ByteChar", "[\xe0\xa0\x80-\xe0\xa0\xbf]"), + newLexEntryDefaultNOP("char3Byte", "[\xe0\xa0\x80-\xe0\xa0\xbf]"), }, }, src: string([]byte{ @@ -294,10 +294,10 @@ func TestLexer_Next(t *testing.T) { 0xe0, 0xa0, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "3ByteChar", []byte{0xe0, 0xa0, 0x80}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xe0, 0xa0, 0x81}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xe0, 0xa0, 0xbe}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xe0, 0xa0, 0xbf}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe0, 0xa0, 0x80}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe0, 0xa0, 0x81}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe0, 0xa0, 0xbe}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe0, 0xa0, 0xbf}), newEOFTokenDefault(), }, }, @@ -305,7 +305,7 @@ func TestLexer_Next(t *testing.T) { lspec: &spec.LexSpec{ Entries: []*spec.LexEntry{ // The first byte are the same. - newLexEntryDefaultNOP("3ByteChar", "[\xe0\xa0\x80-\xe0\xbf\xbf]"), + newLexEntryDefaultNOP("char3Byte", "[\xe0\xa0\x80-\xe0\xbf\xbf]"), }, }, src: string([]byte{ @@ -315,10 +315,10 @@ func TestLexer_Next(t *testing.T) { 0xe0, 0xbf, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "3ByteChar", []byte{0xe0, 0xa0, 0x80}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xe0, 0xa0, 0x81}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xe0, 0xbf, 0xbe}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xe0, 0xbf, 0xbf}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe0, 0xa0, 0x80}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe0, 0xa0, 0x81}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe0, 0xbf, 0xbe}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe0, 0xbf, 0xbf}), newEOFTokenDefault(), }, }, @@ -326,7 +326,7 @@ func TestLexer_Next(t *testing.T) { lspec: &spec.LexSpec{ Entries: []*spec.LexEntry{ // all 3 byte characters - newLexEntryDefaultNOP("3ByteChar", "[\xe0\xa0\x80-\xef\xbf\xbf]"), + newLexEntryDefaultNOP("char3Byte", "[\xe0\xa0\x80-\xef\xbf\xbf]"), }, }, src: string([]byte{ @@ -348,22 +348,22 @@ func TestLexer_Next(t *testing.T) { 0xef, 0xbf, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "3ByteChar", []byte{0xe0, 0xa0, 0x80}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xe0, 0xa0, 0x81}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xe0, 0xbf, 0xbe}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xe0, 0xbf, 0xbf}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xe1, 0x80, 0x80}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xe1, 0x80, 0x81}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xec, 0xbf, 0xbe}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xec, 0xbf, 0xbf}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xed, 0x80, 0x80}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xed, 0x80, 0x81}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xed, 0x9f, 0xbe}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xed, 0x9f, 0xbf}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xee, 0x80, 0x80}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xee, 0x80, 0x81}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xef, 0xbf, 0xbe}), - newTokenDefault(1, 1, "3ByteChar", []byte{0xef, 0xbf, 0xbf}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe0, 0xa0, 0x80}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe0, 0xa0, 0x81}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe0, 0xbf, 0xbe}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe0, 0xbf, 0xbf}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe1, 0x80, 0x80}), + newTokenDefault(1, 1, "char3Byte", []byte{0xe1, 0x80, 0x81}), + newTokenDefault(1, 1, "char3Byte", []byte{0xec, 0xbf, 0xbe}), + newTokenDefault(1, 1, "char3Byte", []byte{0xec, 0xbf, 0xbf}), + newTokenDefault(1, 1, "char3Byte", []byte{0xed, 0x80, 0x80}), + newTokenDefault(1, 1, "char3Byte", []byte{0xed, 0x80, 0x81}), + newTokenDefault(1, 1, "char3Byte", []byte{0xed, 0x9f, 0xbe}), + newTokenDefault(1, 1, "char3Byte", []byte{0xed, 0x9f, 0xbf}), + newTokenDefault(1, 1, "char3Byte", []byte{0xee, 0x80, 0x80}), + newTokenDefault(1, 1, "char3Byte", []byte{0xee, 0x80, 0x81}), + newTokenDefault(1, 1, "char3Byte", []byte{0xef, 0xbf, 0xbe}), + newTokenDefault(1, 1, "char3Byte", []byte{0xef, 0xbf, 0xbf}), newEOFTokenDefault(), }, }, @@ -371,14 +371,14 @@ func TestLexer_Next(t *testing.T) { lspec: &spec.LexSpec{ Entries: []*spec.LexEntry{ // All bytes are the same. - newLexEntryDefaultNOP("4ByteChar", "[\xf0\x90\x80\x80-\xf0\x90\x80\x80]"), + newLexEntryDefaultNOP("char4Byte", "[\xf0\x90\x80\x80-\xf0\x90\x80\x80]"), }, }, src: string([]byte{ 0xf0, 0x90, 0x80, 0x80, }), tokens: []*Token{ - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0x90, 0x80, 0x80}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0x90, 0x80, 0x80}), newEOFTokenDefault(), }, }, @@ -386,7 +386,7 @@ func TestLexer_Next(t *testing.T) { lspec: &spec.LexSpec{ Entries: []*spec.LexEntry{ // The first 3 bytes are the same. - newLexEntryDefaultNOP("4ByteChar", "[\xf0\x90\x80\x80-\xf0\x90\x80\xbf]"), + newLexEntryDefaultNOP("char4Byte", "[\xf0\x90\x80\x80-\xf0\x90\x80\xbf]"), }, }, src: string([]byte{ @@ -396,10 +396,10 @@ func TestLexer_Next(t *testing.T) { 0xf0, 0x90, 0x80, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0x90, 0x80, 0x80}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0x90, 0x80, 0x81}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0x90, 0x80, 0xbe}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0x90, 0x80, 0xbf}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0x90, 0x80, 0x80}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0x90, 0x80, 0x81}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0x90, 0x80, 0xbe}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0x90, 0x80, 0xbf}), newEOFTokenDefault(), }, }, @@ -407,7 +407,7 @@ func TestLexer_Next(t *testing.T) { lspec: &spec.LexSpec{ Entries: []*spec.LexEntry{ // The first 2 bytes are the same. - newLexEntryDefaultNOP("4ByteChar", "[\xf0\x90\x80\x80-\xf0\x90\xbf\xbf]"), + newLexEntryDefaultNOP("char4Byte", "[\xf0\x90\x80\x80-\xf0\x90\xbf\xbf]"), }, }, src: string([]byte{ @@ -417,10 +417,10 @@ func TestLexer_Next(t *testing.T) { 0xf0, 0x90, 0xbf, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0x90, 0x80, 0x80}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0x90, 0x80, 0x81}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0x90, 0xbf, 0xbe}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0x90, 0xbf, 0xbf}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0x90, 0x80, 0x80}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0x90, 0x80, 0x81}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0x90, 0xbf, 0xbe}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0x90, 0xbf, 0xbf}), newEOFTokenDefault(), }, }, @@ -428,7 +428,7 @@ func TestLexer_Next(t *testing.T) { lspec: &spec.LexSpec{ Entries: []*spec.LexEntry{ // The first byte are the same. - newLexEntryDefaultNOP("4ByteChar", "[\xf0\x90\x80\x80-\xf0\xbf\xbf\xbf]"), + newLexEntryDefaultNOP("char4Byte", "[\xf0\x90\x80\x80-\xf0\xbf\xbf\xbf]"), }, }, src: string([]byte{ @@ -438,10 +438,10 @@ func TestLexer_Next(t *testing.T) { 0xf0, 0xbf, 0xbf, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0x90, 0x80, 0x80}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0x90, 0x80, 0x81}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0xbf, 0xbf, 0xbe}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0xbf, 0xbf, 0xbf}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0x90, 0x80, 0x80}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0x90, 0x80, 0x81}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0xbf, 0xbf, 0xbe}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0xbf, 0xbf, 0xbf}), newEOFTokenDefault(), }, }, @@ -449,7 +449,7 @@ func TestLexer_Next(t *testing.T) { lspec: &spec.LexSpec{ Entries: []*spec.LexEntry{ // all 4 byte characters - newLexEntryDefaultNOP("4ByteChar", "[\xf0\x90\x80\x80-\xf4\x8f\xbf\xbf]"), + newLexEntryDefaultNOP("char4Byte", "[\xf0\x90\x80\x80-\xf4\x8f\xbf\xbf]"), }, }, src: string([]byte{ @@ -467,18 +467,18 @@ func TestLexer_Next(t *testing.T) { 0xf4, 0x8f, 0xbf, 0xbf, }), tokens: []*Token{ - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0x90, 0x80, 0x80}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0x90, 0x80, 0x81}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0xbf, 0xbf, 0xbe}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf0, 0xbf, 0xbf, 0xbf}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf1, 0x80, 0x80, 0x80}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf1, 0x80, 0x80, 0x81}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf3, 0xbf, 0xbf, 0xbe}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf3, 0xbf, 0xbf, 0xbf}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf4, 0x80, 0x80, 0x80}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf4, 0x80, 0x80, 0x81}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf4, 0x8f, 0xbf, 0xbe}), - newTokenDefault(1, 1, "4ByteChar", []byte{0xf4, 0x8f, 0xbf, 0xbf}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0x90, 0x80, 0x80}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0x90, 0x80, 0x81}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0xbf, 0xbf, 0xbe}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf0, 0xbf, 0xbf, 0xbf}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf1, 0x80, 0x80, 0x80}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf1, 0x80, 0x80, 0x81}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf3, 0xbf, 0xbf, 0xbe}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf3, 0xbf, 0xbf, 0xbf}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf4, 0x80, 0x80, 0x80}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf4, 0x80, 0x80, 0x81}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf4, 0x8f, 0xbf, 0xbe}), + newTokenDefault(1, 1, "char4Byte", []byte{0xf4, 0x8f, 0xbf, 0xbf}), newEOFTokenDefault(), }, }, @@ -497,18 +497,18 @@ func TestLexer_Next(t *testing.T) { { lspec: &spec.LexSpec{ Entries: []*spec.LexEntry{ - newLexEntryDefaultNOP("1ByteChar", "\\u{006E}"), - newLexEntryDefaultNOP("2ByteChar", "\\u{03BD}"), - newLexEntryDefaultNOP("3ByteChar", "\\u{306B}"), - newLexEntryDefaultNOP("4ByteChar", "\\u{01F638}"), + newLexEntryDefaultNOP("char1Byte", "\\u{006E}"), + newLexEntryDefaultNOP("char2Byte", "\\u{03BD}"), + newLexEntryDefaultNOP("char3Byte", "\\u{306B}"), + newLexEntryDefaultNOP("char4Byte", "\\u{01F638}"), }, }, src: "nνに😸", tokens: []*Token{ - newTokenDefault(1, 1, "1ByteChar", []byte{0x6E}), - newTokenDefault(2, 2, "2ByteChar", []byte{0xCE, 0xBD}), - newTokenDefault(3, 3, "3ByteChar", []byte{0xE3, 0x81, 0xAB}), - newTokenDefault(4, 4, "4ByteChar", []byte{0xF0, 0x9F, 0x98, 0xB8}), + newTokenDefault(1, 1, "char1Byte", []byte{0x6E}), + newTokenDefault(2, 2, "char2Byte", []byte{0xCE, 0xBD}), + newTokenDefault(3, 3, "char3Byte", []byte{0xE3, 0x81, 0xAB}), + newTokenDefault(4, 4, "char4Byte", []byte{0xF0, 0x9F, 0x98, 0xB8}), newEOFTokenDefault(), }, }, diff --git a/driver/template.go b/driver/template.go index f7caa75..2772135 100644 --- a/driver/template.go +++ b/driver/template.go @@ -35,6 +35,67 @@ func GenLexer(clspec *spec.CompiledLexSpec, pkgName string) error { lexerSrc = b.String() } + var modeIDsSrc string + { + var b strings.Builder + fmt.Fprintf(&b, "const (\n") + for i, k := range clspec.ModeNames { + if i == spec.LexModeIDNil.Int() { + fmt.Fprintf(&b, " ModeIDNil ModeID = %v\n", i) + continue + } + fmt.Fprintf(&b, " ModeID%v ModeID = %v\n", spec.SnakeCaseToUpperCamelCase(k.String()), i) + } + fmt.Fprintf(&b, ")") + + modeIDsSrc = b.String() + } + + var modeNamesSrc string + { + var b strings.Builder + fmt.Fprintf(&b, "const (\n") + for i, k := range clspec.ModeNames { + if i == spec.LexModeIDNil.Int() { + fmt.Fprintf(&b, " ModeNameNil = %#v\n", "") + continue + } + fmt.Fprintf(&b, " ModeName%v = %#v\n", spec.SnakeCaseToUpperCamelCase(k.String()), k) + } + fmt.Fprintf(&b, ")") + + modeNamesSrc = b.String() + } + + var kindIDsSrc string + { + var b strings.Builder + fmt.Fprintf(&b, "const (\n") + for i, k := range clspec.KindNames { + if i == spec.LexKindIDNil.Int() { + fmt.Fprintf(&b, " KindIDNil KindID = %v\n", i) + continue + } + fmt.Fprintf(&b, " KindID%v KindID = %v\n", spec.SnakeCaseToUpperCamelCase(k.String()), i) + } + fmt.Fprintf(&b, ")") + + kindIDsSrc = b.String() + } + + var kindNamesSrc string + { + var b strings.Builder + fmt.Fprintf(&b, "const (\n") + fmt.Fprintf(&b, " KindNameNil = %#v\n", "") + for _, k := range clspec.KindNames[1:] { + fmt.Fprintf(&b, " KindName%v = %#v\n", spec.SnakeCaseToUpperCamelCase(k.String()), k) + } + fmt.Fprintf(&b, ")") + + kindNamesSrc = b.String() + } + var specSrc string { t, err := template.New("").Funcs(genTemplateFuncs(clspec)).Parse(lexSpecTemplate) @@ -44,8 +105,8 @@ func GenLexer(clspec *spec.CompiledLexSpec, pkgName string) error { var b strings.Builder err = t.Execute(&b, map[string]interface{}{ - "initialModeID": clspec.InitialModeID, - "modeIDNil": spec.LexModeIDNil, + "initialModeID": "ModeID" + spec.SnakeCaseToUpperCamelCase(clspec.ModeNames[clspec.InitialModeID].String()), + "modeIDNil": "ModeIDNil", "modeKindIDNil": spec.LexModeKindIDNil, "stateIDNil": spec.StateIDNil, "compressionLevel": clspec.CompressionLevel, @@ -62,6 +123,14 @@ func GenLexer(clspec *spec.CompiledLexSpec, pkgName string) error { tmpl := `// Code generated by maleeni-go. DO NOT EDIT. {{ .lexerSrc }} +{{ .modeIDsSrc }} + +{{ .modeNamesSrc }} + +{{ .kindIDsSrc }} + +{{ .kindNamesSrc }} + {{ .specSrc }} ` @@ -72,8 +141,12 @@ func GenLexer(clspec *spec.CompiledLexSpec, pkgName string) error { var b strings.Builder err = t.Execute(&b, map[string]string{ - "lexerSrc": lexerSrc, - "specSrc": specSrc, + "lexerSrc": lexerSrc, + "modeIDsSrc": modeIDsSrc, + "modeNamesSrc": modeNamesSrc, + "kindIDsSrc": kindIDsSrc, + "kindNamesSrc": kindNamesSrc, + "specSrc": specSrc, }) if err != nil { return err @@ -239,11 +312,10 @@ func genTemplateFuncs(clspec *spec.CompiledLexSpec) template.FuncMap { fmt.Fprintf(&b, "[]string{\n") for i, name := range clspec.ModeNames { if i == spec.LexModeIDNil.Int() { - fmt.Fprintf(&b, "%#v,\n", "") + fmt.Fprintf(&b, "ModeNameNil,\n") continue } - - fmt.Fprintf(&b, "%#v,\n", name) + fmt.Fprintf(&b, "ModeName%v,\n", spec.SnakeCaseToUpperCamelCase(name.String())) } fmt.Fprintf(&b, "}") return b.String() @@ -290,10 +362,13 @@ func genTemplateFuncs(clspec *spec.CompiledLexSpec) template.FuncMap { continue } - fmt.Fprintf(&b, "{") - fmt.Fprintf(&b, "%v", ids[0]) - for _, v := range ids[1:] { - fmt.Fprintf(&b, ", %v", v) + fmt.Fprintf(&b, "{\n") + for j, id := range ids { + if j == spec.LexModeKindIDNil.Int() { + fmt.Fprintf(&b, "KindIDNil,\n") + continue + } + fmt.Fprintf(&b, "KindID%v,\n", spec.SnakeCaseToUpperCamelCase(string(clspec.KindNames[id].String()))) } fmt.Fprintf(&b, "},\n") } @@ -305,11 +380,10 @@ func genTemplateFuncs(clspec *spec.CompiledLexSpec) template.FuncMap { fmt.Fprintf(&b, "[]string{\n") for i, name := range clspec.KindNames { if i == spec.LexKindIDNil.Int() { - fmt.Fprintf(&b, "%#v,\n", "") + fmt.Fprintf(&b, "KindNameNil,\n") continue } - - fmt.Fprintf(&b, "%#v,\n", name) + fmt.Fprintf(&b, "KindName%v,\n", spec.SnakeCaseToUpperCamelCase(name.String())) } fmt.Fprintf(&b, "}") return b.String() |