aboutsummaryrefslogtreecommitdiff
path: root/grammar/lexical
diff options
context:
space:
mode:
Diffstat (limited to 'grammar/lexical')
-rw-r--r--grammar/lexical/compiler.go413
-rw-r--r--grammar/lexical/compiler_test.go338
-rw-r--r--grammar/lexical/dfa/dfa.go173
-rw-r--r--grammar/lexical/dfa/dfa_test.go121
-rw-r--r--grammar/lexical/dfa/symbol_position.go182
-rw-r--r--grammar/lexical/dfa/symbol_position_test.go79
-rw-r--r--grammar/lexical/dfa/tree.go567
-rw-r--r--grammar/lexical/dfa/tree_test.go257
-rw-r--r--grammar/lexical/entry.go171
-rw-r--r--grammar/lexical/parser/error.go36
-rw-r--r--grammar/lexical/parser/fragment.go72
-rw-r--r--grammar/lexical/parser/lexer.go594
-rw-r--r--grammar/lexical/parser/lexer_test.go524
-rw-r--r--grammar/lexical/parser/parser.go531
-rw-r--r--grammar/lexical/parser/parser_test.go1389
-rw-r--r--grammar/lexical/parser/tree.go459
16 files changed, 0 insertions, 5906 deletions
diff --git a/grammar/lexical/compiler.go b/grammar/lexical/compiler.go
deleted file mode 100644
index 06e4068..0000000
--- a/grammar/lexical/compiler.go
+++ /dev/null
@@ -1,413 +0,0 @@
-package lexical
-
-import (
- "bytes"
- "fmt"
-
- "compressor"
- "grammar/lexical/dfa"
- psr "grammar/lexical/parser"
- spec "spec/grammar"
-)
-
-type CompileError struct {
- Kind spec.LexKindName
- Fragment bool
- Cause error
- Detail string
-}
-
-func Compile(lexspec *LexSpec, compLv int) (*spec.LexicalSpec, error, []*CompileError) {
- err := lexspec.Validate()
- if err != nil {
- return nil, fmt.Errorf("invalid lexical specification:\n%w", err), nil
- }
-
- modeEntries, modeNames, modeName2ID, fragmetns := groupEntriesByLexMode(lexspec.Entries)
-
- modeSpecs := []*spec.CompiledLexModeSpec{
- nil,
- }
- for i, es := range modeEntries[1:] {
- modeName := modeNames[i+1]
- modeSpec, err, cerrs := compile(es, modeName2ID, fragmetns, compLv)
- if err != nil {
- return nil, fmt.Errorf("failed to compile in %v mode: %w", modeName, err), cerrs
- }
- modeSpecs = append(modeSpecs, modeSpec)
- }
-
- var kindNames []spec.LexKindName
- var name2ID map[spec.LexKindName]spec.LexKindID
- {
- name2ID = map[spec.LexKindName]spec.LexKindID{}
- id := spec.LexKindIDMin
- for _, modeSpec := range modeSpecs[1:] {
- for _, name := range modeSpec.KindNames[1:] {
- if _, ok := name2ID[name]; ok {
- continue
- }
- name2ID[name] = id
- id++
- }
- }
-
- kindNames = make([]spec.LexKindName, len(name2ID)+1)
- for name, id := range name2ID {
- kindNames[id] = name
- }
- }
-
- var kindIDs [][]spec.LexKindID
- {
- kindIDs = make([][]spec.LexKindID, len(modeSpecs))
- for i, modeSpec := range modeSpecs[1:] {
- ids := make([]spec.LexKindID, len(modeSpec.KindNames))
- for modeID, name := range modeSpec.KindNames {
- if modeID == 0 {
- continue
- }
- ids[modeID] = name2ID[name]
- }
- kindIDs[i+1] = ids
- }
- }
-
- return &spec.LexicalSpec{
- InitialModeID: spec.LexModeIDDefault,
- ModeNames: modeNames,
- KindNames: kindNames,
- KindIDs: kindIDs,
- CompressionLevel: compLv,
- Specs: modeSpecs,
- }, nil, nil
-}
-
-func groupEntriesByLexMode(entries []*LexEntry) ([][]*LexEntry, []spec.LexModeName, map[spec.LexModeName]spec.LexModeID, map[spec.LexKindName]*LexEntry) {
- modeNames := []spec.LexModeName{
- spec.LexModeNameNil,
- spec.LexModeNameDefault,
- }
- modeName2ID := map[spec.LexModeName]spec.LexModeID{
- spec.LexModeNameNil: spec.LexModeIDNil,
- spec.LexModeNameDefault: spec.LexModeIDDefault,
- }
- lastModeID := spec.LexModeIDDefault
- modeEntries := [][]*LexEntry{
- nil,
- {},
- }
- fragments := map[spec.LexKindName]*LexEntry{}
- for _, e := range entries {
- if e.Fragment {
- fragments[e.Kind] = e
- continue
- }
- ms := e.Modes
- if len(ms) == 0 {
- ms = []spec.LexModeName{
- spec.LexModeNameDefault,
- }
- }
- for _, modeName := range ms {
- modeID, ok := modeName2ID[modeName]
- if !ok {
- modeID = lastModeID + 1
- lastModeID = modeID
- modeName2ID[modeName] = modeID
- modeNames = append(modeNames, modeName)
- modeEntries = append(modeEntries, []*LexEntry{})
- }
- modeEntries[modeID] = append(modeEntries[modeID], e)
- }
- }
- return modeEntries, modeNames, modeName2ID, fragments
-}
-
-func compile(
- entries []*LexEntry,
- modeName2ID map[spec.LexModeName]spec.LexModeID,
- fragments map[spec.LexKindName]*LexEntry,
- compLv int,
-) (*spec.CompiledLexModeSpec, error, []*CompileError) {
- var kindNames []spec.LexKindName
- kindIDToName := map[spec.LexModeKindID]spec.LexKindName{}
- var patterns map[spec.LexModeKindID][]byte
- {
- kindNames = append(kindNames, spec.LexKindNameNil)
- patterns = map[spec.LexModeKindID][]byte{}
- for i, e := range entries {
- kindID := spec.LexModeKindID(i + 1)
-
- kindNames = append(kindNames, e.Kind)
- kindIDToName[kindID] = e.Kind
- patterns[kindID] = []byte(e.Pattern)
- }
- }
-
- push := []spec.LexModeID{
- spec.LexModeIDNil,
- }
- pop := []int{
- 0,
- }
- for _, e := range entries {
- pushV := spec.LexModeIDNil
- if e.Push != "" {
- pushV = modeName2ID[e.Push]
- }
- push = append(push, pushV)
- popV := 0
- if e.Pop {
- popV = 1
- }
- pop = append(pop, popV)
- }
-
- fragmentPatterns := map[spec.LexKindName][]byte{}
- for k, e := range fragments {
- fragmentPatterns[k] = []byte(e.Pattern)
- }
-
- fragmentCPTrees := make(map[spec.LexKindName]psr.CPTree, len(fragmentPatterns))
- {
- var cerrs []*CompileError
- for kind, pat := range fragmentPatterns {
- p := psr.NewParser(kind, bytes.NewReader(pat))
- t, err := p.Parse()
- if err != nil {
- if err == psr.ParseErr {
- detail, cause := p.Error()
- cerrs = append(cerrs, &CompileError{
- Kind: kind,
- Fragment: true,
- Cause: cause,
- Detail: detail,
- })
- } else {
- cerrs = append(cerrs, &CompileError{
- Kind: kind,
- Fragment: true,
- Cause: err,
- })
- }
- continue
- }
- fragmentCPTrees[kind] = t
- }
- if len(cerrs) > 0 {
- return nil, fmt.Errorf("compile error"), cerrs
- }
-
- err := psr.CompleteFragments(fragmentCPTrees)
- if err != nil {
- if err == psr.ParseErr {
- for _, frag := range fragmentCPTrees {
- kind, frags, err := frag.Describe()
- if err != nil {
- return nil, err, nil
- }
-
- cerrs = append(cerrs, &CompileError{
- Kind: kind,
- Fragment: true,
- Cause: fmt.Errorf("fragment contains undefined fragments or cycles"),
- Detail: fmt.Sprintf("%v", frags),
- })
- }
-
- return nil, fmt.Errorf("compile error"), cerrs
- }
-
- return nil, err, nil
- }
- }
-
- cpTrees := map[spec.LexModeKindID]psr.CPTree{}
- {
- pats := make([]*psr.PatternEntry, len(patterns)+1)
- pats[spec.LexModeKindIDNil] = &psr.PatternEntry{
- ID: spec.LexModeKindIDNil,
- }
- for id, pattern := range patterns {
- pats[id] = &psr.PatternEntry{
- ID: id,
- Pattern: pattern,
- }
- }
-
- var cerrs []*CompileError
- for _, pat := range pats {
- if pat.ID == spec.LexModeKindIDNil {
- continue
- }
-
- p := psr.NewParser(kindIDToName[pat.ID], bytes.NewReader(pat.Pattern))
- t, err := p.Parse()
- if err != nil {
- if err == psr.ParseErr {
- detail, cause := p.Error()
- cerrs = append(cerrs, &CompileError{
- Kind: kindIDToName[pat.ID],
- Fragment: false,
- Cause: cause,
- Detail: detail,
- })
- } else {
- cerrs = append(cerrs, &CompileError{
- Kind: kindIDToName[pat.ID],
- Fragment: false,
- Cause: err,
- })
- }
- continue
- }
-
- complete, err := psr.ApplyFragments(t, fragmentCPTrees)
- if err != nil {
- return nil, err, nil
- }
- if !complete {
- _, frags, err := t.Describe()
- if err != nil {
- return nil, err, nil
- }
-
- cerrs = append(cerrs, &CompileError{
- Kind: kindIDToName[pat.ID],
- Fragment: false,
- Cause: fmt.Errorf("pattern contains undefined fragments"),
- Detail: fmt.Sprintf("%v", frags),
- })
- continue
- }
-
- cpTrees[pat.ID] = t
- }
- if len(cerrs) > 0 {
- return nil, fmt.Errorf("compile error"), cerrs
- }
- }
-
- var tranTab *spec.TransitionTable
- {
- root, symTab, err := dfa.ConvertCPTreeToByteTree(cpTrees)
- if err != nil {
- return nil, err, nil
- }
- d := dfa.GenDFA(root, symTab)
- tranTab, err = dfa.GenTransitionTable(d)
- if err != nil {
- return nil, err, nil
- }
- }
-
- var err error
- switch compLv {
- case 2:
- tranTab, err = compressTransitionTableLv2(tranTab)
- if err != nil {
- return nil, err, nil
- }
- case 1:
- tranTab, err = compressTransitionTableLv1(tranTab)
- if err != nil {
- return nil, err, nil
- }
- }
-
- return &spec.CompiledLexModeSpec{
- KindNames: kindNames,
- Push: push,
- Pop: pop,
- DFA: tranTab,
- }, nil, nil
-}
-
-const (
- CompressionLevelMin = 0
- CompressionLevelMax = 2
-)
-
-func compressTransitionTableLv2(tranTab *spec.TransitionTable) (*spec.TransitionTable, error) {
- ueTab := compressor.NewUniqueEntriesTable()
- {
- orig, err := compressor.NewOriginalTable(convertStateIDSliceToIntSlice(tranTab.UncompressedTransition), tranTab.ColCount)
- if err != nil {
- return nil, err
- }
- err = ueTab.Compress(orig)
- if err != nil {
- return nil, err
- }
- }
-
- rdTab := compressor.NewRowDisplacementTable(0)
- {
- orig, err := compressor.NewOriginalTable(ueTab.UniqueEntries, ueTab.OriginalColCount)
- if err != nil {
- return nil, err
- }
- err = rdTab.Compress(orig)
- if err != nil {
- return nil, err
- }
- }
-
- tranTab.Transition = &spec.UniqueEntriesTable{
- UniqueEntries: &spec.RowDisplacementTable{
- OriginalRowCount: rdTab.OriginalRowCount,
- OriginalColCount: rdTab.OriginalColCount,
- EmptyValue: spec.StateIDNil,
- Entries: convertIntSliceToStateIDSlice(rdTab.Entries),
- Bounds: rdTab.Bounds,
- RowDisplacement: rdTab.RowDisplacement,
- },
- RowNums: ueTab.RowNums,
- OriginalRowCount: ueTab.OriginalRowCount,
- OriginalColCount: ueTab.OriginalColCount,
- }
- tranTab.UncompressedTransition = nil
-
- return tranTab, nil
-}
-
-func compressTransitionTableLv1(tranTab *spec.TransitionTable) (*spec.TransitionTable, error) {
- ueTab := compressor.NewUniqueEntriesTable()
- {
- orig, err := compressor.NewOriginalTable(convertStateIDSliceToIntSlice(tranTab.UncompressedTransition), tranTab.ColCount)
- if err != nil {
- return nil, err
- }
- err = ueTab.Compress(orig)
- if err != nil {
- return nil, err
- }
- }
-
- tranTab.Transition = &spec.UniqueEntriesTable{
- UncompressedUniqueEntries: convertIntSliceToStateIDSlice(ueTab.UniqueEntries),
- RowNums: ueTab.RowNums,
- OriginalRowCount: ueTab.OriginalRowCount,
- OriginalColCount: ueTab.OriginalColCount,
- }
- tranTab.UncompressedTransition = nil
-
- return tranTab, nil
-}
-
-func convertStateIDSliceToIntSlice(s []spec.StateID) []int {
- is := make([]int, len(s))
- for i, v := range s {
- is[i] = v.Int()
- }
- return is
-}
-
-func convertIntSliceToStateIDSlice(s []int) []spec.StateID {
- ss := make([]spec.StateID, len(s))
- for i, v := range s {
- ss[i] = spec.StateID(v)
- }
- return ss
-}
diff --git a/grammar/lexical/compiler_test.go b/grammar/lexical/compiler_test.go
deleted file mode 100644
index 3336048..0000000
--- a/grammar/lexical/compiler_test.go
+++ /dev/null
@@ -1,338 +0,0 @@
-package lexical
-
-import (
- "encoding/json"
- "fmt"
- "testing"
-
- spec "spec/grammar"
-)
-
-func TestLexSpec_Validate(t *testing.T) {
- // We expect that the spelling inconsistency error will occur.
- spec := &LexSpec{
- Entries: []*LexEntry{
- {
- Modes: []spec.LexModeName{
- // 'Default' is the spelling inconsistency because 'default' is predefined.
- "Default",
- },
- Kind: "foo",
- Pattern: "foo",
- },
- },
- }
- err := spec.Validate()
- if err == nil {
- t.Fatalf("expected error didn't occur")
- }
-}
-
-func TestSnakeCaseToUpperCamelCase(t *testing.T) {
- tests := []struct {
- snake string
- camel string
- }{
- {
- snake: "foo",
- camel: "Foo",
- },
- {
- snake: "foo_bar",
- camel: "FooBar",
- },
- {
- snake: "foo_bar_baz",
- camel: "FooBarBaz",
- },
- {
- snake: "Foo",
- camel: "Foo",
- },
- {
- snake: "fooBar",
- camel: "FooBar",
- },
- {
- snake: "FOO",
- camel: "FOO",
- },
- {
- snake: "FOO_BAR",
- camel: "FOOBAR",
- },
- {
- snake: "_foo_bar_",
- camel: "FooBar",
- },
- {
- snake: "___foo___bar___",
- camel: "FooBar",
- },
- }
- for _, tt := range tests {
- c := SnakeCaseToUpperCamelCase(tt.snake)
- if c != tt.camel {
- t.Errorf("unexpected string; want: %v, got: %v", tt.camel, c)
- }
- }
-}
-
-func TestFindSpellingInconsistencies(t *testing.T) {
- tests := []struct {
- ids []string
- duplicated [][]string
- }{
- {
- ids: []string{"foo", "foo"},
- duplicated: nil,
- },
- {
- ids: []string{"foo", "Foo"},
- duplicated: [][]string{{"Foo", "foo"}},
- },
- {
- ids: []string{"foo", "foo", "Foo"},
- duplicated: [][]string{{"Foo", "foo"}},
- },
- {
- ids: []string{"foo_bar_baz", "FooBarBaz"},
- duplicated: [][]string{{"FooBarBaz", "foo_bar_baz"}},
- },
- {
- ids: []string{"foo", "Foo", "bar", "Bar"},
- duplicated: [][]string{{"Bar", "bar"}, {"Foo", "foo"}},
- },
- {
- ids: []string{"foo", "Foo", "bar", "Bar", "baz", "bra"},
- duplicated: [][]string{{"Bar", "bar"}, {"Foo", "foo"}},
- },
- }
- for i, tt := range tests {
- t.Run(fmt.Sprintf("#%v", i), func(t *testing.T) {
- duplicated := FindSpellingInconsistencies(tt.ids)
- if len(duplicated) != len(tt.duplicated) {
- t.Fatalf("unexpected IDs; want: %#v, got: %#v", tt.duplicated, duplicated)
- }
- for i, dupIDs := range duplicated {
- if len(dupIDs) != len(tt.duplicated[i]) {
- t.Fatalf("unexpected IDs; want: %#v, got: %#v", tt.duplicated[i], dupIDs)
- }
- for j, id := range dupIDs {
- if id != tt.duplicated[i][j] {
- t.Fatalf("unexpected IDs; want: %#v, got: %#v", tt.duplicated[i], dupIDs)
- }
- }
- }
- })
- }
-}
-
-func TestCompile(t *testing.T) {
- tests := []struct {
- Caption string
- Spec string
- Err bool
- }{
- {
- Caption: "allow duplicates names between fragments and non-fragments",
- Spec: `
-{
- "name": "test",
- "entries": [
- {
- "kind": "a2z",
- "pattern": "\\f{a2z}"
- },
- {
- "fragment": true,
- "kind": "a2z",
- "pattern": "[a-z]"
- }
- ]
-}
-`,
- },
- {
- Caption: "don't allow duplicates names in non-fragments",
- Spec: `
-{
- "name": "test",
- "entries": [
- {
- "kind": "a2z",
- "pattern": "a|b|c|d|e|f|g|h|i|j|k|l|m|n|o|p|q|r|s|t|u|v|w|x|y|z"
- },
- {
- "kind": "a2z",
- "pattern": "[a-z]"
- }
- ]
-}
-`,
- Err: true,
- },
- {
- Caption: "don't allow duplicates names in fragments",
- Spec: `
-{
- "name": "test",
- "entries": [
- {
- "kind": "a2z",
- "pattern": "\\f{a2z}"
- },
- {
- "fragments": true,
- "kind": "a2z",
- "pattern": "a|b|c|d|e|f|g|h|i|j|k|l|m|n|o|p|q|r|s|t|u|v|w|x|y|z"
- },
- {
- "fragments": true,
- "kind": "a2z",
- "pattern": "[a-z]"
- }
- ]
-}
-`,
- Err: true,
- },
- {
- Caption: "don't allow kind names in the same mode to contain spelling inconsistencies",
- Spec: `
-{
- "name": "test",
- "entries": [
- {
- "kind": "foo_1",
- "pattern": "foo_1"
- },
- {
- "kind": "foo1",
- "pattern": "foo1"
- }
- ]
-}
-`,
- Err: true,
- },
- {
- Caption: "don't allow kind names across modes to contain spelling inconsistencies",
- Spec: `
-{
- "name": "test",
- "entries": [
- {
- "modes": ["default"],
- "kind": "foo_1",
- "pattern": "foo_1"
- },
- {
- "modes": ["other_mode"],
- "kind": "foo1",
- "pattern": "foo1"
- }
- ]
-}
-`,
- Err: true,
- },
- {
- Caption: "don't allow mode names to contain spelling inconsistencies",
- Spec: `
-{
- "name": "test",
- "entries": [
- {
- "modes": ["foo_1"],
- "kind": "a",
- "pattern": "a"
- },
- {
- "modes": ["foo1"],
- "kind": "b",
- "pattern": "b"
- }
- ]
-}
-`,
- Err: true,
- },
- {
- Caption: "allow fragment names in the same mode to contain spelling inconsistencies because fragments will not appear in output files",
- Spec: `
-{
- "name": "test",
- "entries": [
- {
- "kind": "a",
- "pattern": "a"
- },
- {
- "fragment": true,
- "kind": "foo_1",
- "pattern": "foo_1"
- },
- {
- "fragment": true,
- "kind": "foo1",
- "pattern": "foo1"
- }
- ]
-}
-`,
- },
- {
- Caption: "allow fragment names across modes to contain spelling inconsistencies because fragments will not appear in output files",
- Spec: `
-{
- "name": "test",
- "entries": [
- {
- "modes": ["default"],
- "kind": "a",
- "pattern": "a"
- },
- {
- "modes": ["default"],
- "fragment": true,
- "kind": "foo_1",
- "pattern": "foo_1"
- },
- {
- "modes": ["other_mode"],
- "fragment": true,
- "kind": "foo1",
- "pattern": "foo1"
- }
- ]
-}
-`,
- },
- }
- for i, tt := range tests {
- t.Run(fmt.Sprintf("#%v %s", i, tt.Caption), func(t *testing.T) {
- lspec := &LexSpec{}
- err := json.Unmarshal([]byte(tt.Spec), lspec)
- if err != nil {
- t.Fatalf("%v", err)
- }
- clspec, err, _ := Compile(lspec, CompressionLevelMin)
- if tt.Err {
- if err == nil {
- t.Fatalf("expected an error")
- }
- if clspec != nil {
- t.Fatalf("Compile function mustn't return a compiled specification")
- }
- } else {
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if clspec == nil {
- t.Fatalf("Compile function must return a compiled specification")
- }
- }
- })
- }
-}
diff --git a/grammar/lexical/dfa/dfa.go b/grammar/lexical/dfa/dfa.go
deleted file mode 100644
index 884b168..0000000
--- a/grammar/lexical/dfa/dfa.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package dfa
-
-import (
- "sort"
-
- spec "spec/grammar"
-)
-
-type symbolTable struct {
- symPos2Byte map[symbolPosition]byteRange
- endPos2ID map[symbolPosition]spec.LexModeKindID
-}
-
-func genSymbolTable(root byteTree) *symbolTable {
- symTab := &symbolTable{
- symPos2Byte: map[symbolPosition]byteRange{},
- endPos2ID: map[symbolPosition]spec.LexModeKindID{},
- }
- return genSymTab(symTab, root)
-}
-
-func genSymTab(symTab *symbolTable, node byteTree) *symbolTable {
- if node == nil {
- return symTab
- }
-
- switch n := node.(type) {
- case *symbolNode:
- symTab.symPos2Byte[n.pos] = byteRange{
- from: n.from,
- to: n.to,
- }
- case *endMarkerNode:
- symTab.endPos2ID[n.pos] = n.id
- default:
- left, right := node.children()
- genSymTab(symTab, left)
- genSymTab(symTab, right)
- }
- return symTab
-}
-
-type DFA struct {
- States []string
- InitialState string
- AcceptingStatesTable map[string]spec.LexModeKindID
- TransitionTable map[string][256]string
-}
-
-func GenDFA(root byteTree, symTab *symbolTable) *DFA {
- initialState := root.first()
- initialStateHash := initialState.hash()
- stateMap := map[string]*symbolPositionSet{
- initialStateHash: initialState,
- }
- tranTab := map[string][256]string{}
- {
- follow := genFollowTable(root)
- unmarkedStates := map[string]*symbolPositionSet{
- initialStateHash: initialState,
- }
- for len(unmarkedStates) > 0 {
- nextUnmarkedStates := map[string]*symbolPositionSet{}
- for hash, state := range unmarkedStates {
- tranTabOfState := [256]*symbolPositionSet{}
- for _, pos := range state.set() {
- if pos.isEndMark() {
- continue
- }
- valRange := symTab.symPos2Byte[pos]
- for symVal := valRange.from; symVal <= valRange.to; symVal++ {
- if tranTabOfState[symVal] == nil {
- tranTabOfState[symVal] = newSymbolPositionSet()
- }
- tranTabOfState[symVal].merge(follow[pos])
- }
- }
- for _, t := range tranTabOfState {
- if t == nil {
- continue
- }
- h := t.hash()
- if _, ok := stateMap[h]; ok {
- continue
- }
- stateMap[h] = t
- nextUnmarkedStates[h] = t
- }
- tabOfState := [256]string{}
- for v, t := range tranTabOfState {
- if t == nil {
- continue
- }
- tabOfState[v] = t.hash()
- }
- tranTab[hash] = tabOfState
- }
- unmarkedStates = nextUnmarkedStates
- }
- }
-
- accTab := map[string]spec.LexModeKindID{}
- {
- for h, s := range stateMap {
- for _, pos := range s.set() {
- if !pos.isEndMark() {
- continue
- }
- priorID, ok := accTab[h]
- if !ok {
- accTab[h] = symTab.endPos2ID[pos]
- } else {
- id := symTab.endPos2ID[pos]
- if id < priorID {
- accTab[h] = id
- }
- }
- }
- }
- }
-
- var states []string
- {
- for s := range stateMap {
- states = append(states, s)
- }
- sort.Slice(states, func(i, j int) bool {
- return states[i] < states[j]
- })
- }
-
- return &DFA{
- States: states,
- InitialState: initialStateHash,
- AcceptingStatesTable: accTab,
- TransitionTable: tranTab,
- }
-}
-
-func GenTransitionTable(dfa *DFA) (*spec.TransitionTable, error) {
- stateHash2ID := map[string]spec.StateID{}
- for i, s := range dfa.States {
- // Since 0 represents an invalid value in a transition table,
- // assign a number greater than or equal to 1 to states.
- stateHash2ID[s] = spec.StateID(i + spec.StateIDMin.Int())
- }
-
- acc := make([]spec.LexModeKindID, len(dfa.States)+1)
- for _, s := range dfa.States {
- id, ok := dfa.AcceptingStatesTable[s]
- if !ok {
- continue
- }
- acc[stateHash2ID[s]] = id
- }
-
- rowCount := len(dfa.States) + 1
- colCount := 256
- tran := make([]spec.StateID, rowCount*colCount)
- for s, tab := range dfa.TransitionTable {
- for v, to := range tab {
- tran[stateHash2ID[s].Int()*256+v] = stateHash2ID[to]
- }
- }
-
- return &spec.TransitionTable{
- InitialStateID: stateHash2ID[dfa.InitialState],
- AcceptingStates: acc,
- UncompressedTransition: tran,
- RowCount: rowCount,
- ColCount: colCount,
- }, nil
-}
diff --git a/grammar/lexical/dfa/dfa_test.go b/grammar/lexical/dfa/dfa_test.go
deleted file mode 100644
index 9af9aeb..0000000
--- a/grammar/lexical/dfa/dfa_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package dfa
-
-import (
- "strings"
- "testing"
-
- "grammar/lexical/parser"
- spec "spec/grammar"
-)
-
-func TestGenDFA(t *testing.T) {
- p := parser.NewParser(spec.LexKindName("test"), strings.NewReader("(a|b)*abb"))
- cpt, err := p.Parse()
- if err != nil {
- t.Fatal(err)
- }
- bt, symTab, err := ConvertCPTreeToByteTree(map[spec.LexModeKindID]parser.CPTree{
- spec.LexModeKindIDMin: cpt,
- })
- if err != nil {
- t.Fatal(err)
- }
- dfa := GenDFA(bt, symTab)
- if dfa == nil {
- t.Fatalf("DFA is nil")
- }
-
- symPos := func(n uint16) symbolPosition {
- pos, err := newSymbolPosition(n, false)
- if err != nil {
- panic(err)
- }
- return pos
- }
-
- endPos := func(n uint16) symbolPosition {
- pos, err := newSymbolPosition(n, true)
- if err != nil {
- panic(err)
- }
- return pos
- }
-
- s0 := newSymbolPositionSet().add(symPos(1)).add(symPos(2)).add(symPos(3))
- s1 := newSymbolPositionSet().add(symPos(1)).add(symPos(2)).add(symPos(3)).add(symPos(4))
- s2 := newSymbolPositionSet().add(symPos(1)).add(symPos(2)).add(symPos(3)).add(symPos(5))
- s3 := newSymbolPositionSet().add(symPos(1)).add(symPos(2)).add(symPos(3)).add(endPos(6))
-
- rune2Int := func(char rune, index int) uint8 {
- return uint8([]byte(string(char))[index])
- }
-
- tranS0 := [256]string{}
- tranS0[rune2Int('a', 0)] = s1.hash()
- tranS0[rune2Int('b', 0)] = s0.hash()
-
- tranS1 := [256]string{}
- tranS1[rune2Int('a', 0)] = s1.hash()
- tranS1[rune2Int('b', 0)] = s2.hash()
-
- tranS2 := [256]string{}
- tranS2[rune2Int('a', 0)] = s1.hash()
- tranS2[rune2Int('b', 0)] = s3.hash()
-
- tranS3 := [256]string{}
- tranS3[rune2Int('a', 0)] = s1.hash()
- tranS3[rune2Int('b', 0)] = s0.hash()
-
- expectedTranTab := map[string][256]string{
- s0.hash(): tranS0,
- s1.hash(): tranS1,
- s2.hash(): tranS2,
- s3.hash(): tranS3,
- }
- if len(dfa.TransitionTable) != len(expectedTranTab) {
- t.Errorf("transition table is mismatched: want: %v entries, got: %v entries", len(expectedTranTab), len(dfa.TransitionTable))
- }
- for h, eTranTab := range expectedTranTab {
- tranTab, ok := dfa.TransitionTable[h]
- if !ok {
- t.Errorf("no entry; hash: %v", h)
- continue
- }
- if len(tranTab) != len(eTranTab) {
- t.Errorf("transition table is mismatched: hash: %v, want: %v entries, got: %v entries", h, len(eTranTab), len(tranTab))
- }
- for c, eNext := range eTranTab {
- if eNext == "" {
- continue
- }
-
- next := tranTab[c]
- if next == "" {
- t.Errorf("no enatry: hash: %v, char: %v", h, c)
- }
- if next != eNext {
- t.Errorf("next state is mismatched: want: %v, got: %v", eNext, next)
- }
- }
- }
-
- if dfa.InitialState != s0.hash() {
- t.Errorf("initial state is mismatched: want: %v, got: %v", s0.hash(), dfa.InitialState)
- }
-
- accTab := map[string]spec.LexModeKindID{
- s3.hash(): 1,
- }
- if len(dfa.AcceptingStatesTable) != len(accTab) {
- t.Errorf("accepting states are mismatched: want: %v entries, got: %v entries", len(accTab), len(dfa.AcceptingStatesTable))
- }
- for eState, eID := range accTab {
- id, ok := dfa.AcceptingStatesTable[eState]
- if !ok {
- t.Errorf("accepting state is not found: state: %v", eState)
- }
- if id != eID {
- t.Errorf("ID is mismatched: state: %v, want: %v, got: %v", eState, eID, id)
- }
- }
-}
diff --git a/grammar/lexical/dfa/symbol_position.go b/grammar/lexical/dfa/symbol_position.go
deleted file mode 100644
index f154251..0000000
--- a/grammar/lexical/dfa/symbol_position.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package dfa
-
-import (
- "encoding/binary"
- "fmt"
- "strings"
-)
-
-type symbolPosition uint16
-
-const (
- symbolPositionNil symbolPosition = 0x0000
-
- symbolPositionMin uint16 = 0x0001
- symbolPositionMax uint16 = 0x7fff
-
- symbolPositionMaskSymbol uint16 = 0x0000
- symbolPositionMaskEndMark uint16 = 0x8000
-
- symbolPositionMaskValue uint16 = 0x7fff
-)
-
-func newSymbolPosition(n uint16, endMark bool) (symbolPosition, error) {
- if n < symbolPositionMin || n > symbolPositionMax {
- return symbolPositionNil, fmt.Errorf("symbol position must be within %v to %v: n: %v, endMark: %v", symbolPositionMin, symbolPositionMax, n, endMark)
- }
- if endMark {
- return symbolPosition(n | symbolPositionMaskEndMark), nil
- }
- return symbolPosition(n | symbolPositionMaskSymbol), nil
-}
-
-func (p symbolPosition) String() string {
- if p.isEndMark() {
- return fmt.Sprintf("end#%v", uint16(p)&symbolPositionMaskValue)
- }
- return fmt.Sprintf("sym#%v", uint16(p)&symbolPositionMaskValue)
-}
-
-func (p symbolPosition) isEndMark() bool {
- return uint16(p)&symbolPositionMaskEndMark > 1
-}
-
-func (p symbolPosition) describe() (uint16, bool) {
- v := uint16(p) & symbolPositionMaskValue
- if p.isEndMark() {
- return v, true
- }
- return v, false
-}
-
-type symbolPositionSet struct {
- // `s` represents a set of symbol positions.
- // However, immediately after adding a symbol position, the elements may be duplicated.
- // When you need an aligned set with no duplicates, you can get such value via the set function.
- s []symbolPosition
- sorted bool
-}
-
-func newSymbolPositionSet() *symbolPositionSet {
- return &symbolPositionSet{
- s: []symbolPosition{},
- sorted: false,
- }
-}
-
-func (s *symbolPositionSet) String() string {
- if len(s.s) <= 0 {
- return "{}"
- }
- ps := s.sortAndRemoveDuplicates()
- var b strings.Builder
- fmt.Fprintf(&b, "{")
- for i, p := range ps {
- if i <= 0 {
- fmt.Fprintf(&b, "%v", p)
- continue
- }
- fmt.Fprintf(&b, ", %v", p)
- }
- fmt.Fprintf(&b, "}")
- return b.String()
-}
-
-func (s *symbolPositionSet) set() []symbolPosition {
- s.sortAndRemoveDuplicates()
- return s.s
-}
-
-func (s *symbolPositionSet) add(pos symbolPosition) *symbolPositionSet {
- s.s = append(s.s, pos)
- s.sorted = false
- return s
-}
-
-func (s *symbolPositionSet) merge(t *symbolPositionSet) *symbolPositionSet {
- s.s = append(s.s, t.s...)
- s.sorted = false
- return s
-}
-
-func (s *symbolPositionSet) hash() string {
- if len(s.s) <= 0 {
- return ""
- }
- sorted := s.sortAndRemoveDuplicates()
- var buf []byte
- for _, p := range sorted {
- b := make([]byte, 8)
- binary.PutUvarint(b, uint64(p))
- buf = append(buf, b...)
- }
- // Convert to a string to be able to use it as a key of a map.
- // But note this byte sequence is made from values of symbol positions,
- // so this is not a well-formed UTF-8 sequence.
- return string(buf)
-}
-
-func (s *symbolPositionSet) sortAndRemoveDuplicates() []symbolPosition {
- if s.sorted {
- return s.s
- }
-
- sortSymbolPositions(s.s, 0, len(s.s)-1)
-
- // Remove duplicates.
- lastV := s.s[0]
- nextIdx := 1
- for _, v := range s.s[1:] {
- if v == lastV {
- continue
- }
- s.s[nextIdx] = v
- nextIdx++
- lastV = v
- }
- s.s = s.s[:nextIdx]
- s.sorted = true
-
- return s.s
-}
-
-// sortSymbolPositions sorts a slice of symbol positions as it uses quick sort.
-func sortSymbolPositions(ps []symbolPosition, left, right int) {
- if left >= right {
- return
- }
- var pivot symbolPosition
- {
- // Use a median as a pivot.
- p1 := ps[left]
- p2 := ps[(left+right)/2]
- p3 := ps[right]
- if p1 > p2 {
- p1, p2 = p2, p1
- }
- if p2 > p3 {
- p2 = p3
- if p1 > p2 {
- p2 = p1
- }
- }
- pivot = p2
- }
- i := left
- j := right
- for i <= j {
- for ps[i] < pivot {
- i++
- }
- for ps[j] > pivot {
- j--
- }
- if i <= j {
- ps[i], ps[j] = ps[j], ps[i]
- i++
- j--
- }
- }
- sortSymbolPositions(ps, left, j)
- sortSymbolPositions(ps, i, right)
-}
diff --git a/grammar/lexical/dfa/symbol_position_test.go b/grammar/lexical/dfa/symbol_position_test.go
deleted file mode 100644
index c867f64..0000000
--- a/grammar/lexical/dfa/symbol_position_test.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package dfa
-
-import (
- "fmt"
- "testing"
-)
-
-func TestNewSymbolPosition(t *testing.T) {
- tests := []struct {
- n uint16
- endMark bool
- err bool
- }{
- {
- n: 0,
- endMark: false,
- err: true,
- },
- {
- n: 0,
- endMark: true,
- err: true,
- },
- {
- n: symbolPositionMin - 1,
- endMark: false,
- err: true,
- },
- {
- n: symbolPositionMin - 1,
- endMark: true,
- err: true,
- },
- {
- n: symbolPositionMin,
- endMark: false,
- },
- {
- n: symbolPositionMin,
- endMark: true,
- },
- {
- n: symbolPositionMax,
- endMark: false,
- },
- {
- n: symbolPositionMax,
- endMark: true,
- },
- {
- n: symbolPositionMax + 1,
- endMark: false,
- err: true,
- },
- {
- n: symbolPositionMax + 1,
- endMark: true,
- err: true,
- },
- }
- for i, tt := range tests {
- t.Run(fmt.Sprintf("#%v n: %v, endMark: %v", i, tt.n, tt.endMark), func(t *testing.T) {
- pos, err := newSymbolPosition(tt.n, tt.endMark)
- if tt.err {
- if err == nil {
- t.Fatal("err is nil")
- }
- return
- }
- if err != nil {
- t.Fatal(err)
- }
- n, endMark := pos.describe()
- if n != tt.n || endMark != tt.endMark {
- t.Errorf("unexpected symbol position: want: n: %v, endMark: %v, got: n: %v, endMark: %v", tt.n, tt.endMark, n, endMark)
- }
- })
- }
-}
diff --git a/grammar/lexical/dfa/tree.go b/grammar/lexical/dfa/tree.go
deleted file mode 100644
index 85061f9..0000000
--- a/grammar/lexical/dfa/tree.go
+++ /dev/null
@@ -1,567 +0,0 @@
-package dfa
-
-import (
- "fmt"
- "io"
- "sort"
-
- "grammar/lexical/parser"
- spec "spec/grammar"
- "utf8"
-)
-
-type byteTree interface {
- fmt.Stringer
- children() (byteTree, byteTree)
- nullable() bool
- first() *symbolPositionSet
- last() *symbolPositionSet
- clone() byteTree
-}
-
-var (
- _ byteTree = &symbolNode{}
- _ byteTree = &endMarkerNode{}
- _ byteTree = &concatNode{}
- _ byteTree = &altNode{}
- _ byteTree = &repeatNode{}
- _ byteTree = &optionNode{}
-)
-
-type byteRange struct {
- from byte
- to byte
-}
-
-type symbolNode struct {
- byteRange
- pos symbolPosition
- firstMemo *symbolPositionSet
- lastMemo *symbolPositionSet
-}
-
-func newSymbolNode(value byte) *symbolNode {
- return &symbolNode{
- byteRange: byteRange{
- from: value,
- to: value,
- },
- pos: symbolPositionNil,
- }
-}
-
-func newRangeSymbolNode(from, to byte) *symbolNode {
- return &symbolNode{
- byteRange: byteRange{
- from: from,
- to: to,
- },
- pos: symbolPositionNil,
- }
-}
-
-func (n *symbolNode) String() string {
- return fmt.Sprintf("symbol: value: %v-%v, pos: %v", n.from, n.to, n.pos)
-}
-
-func (n *symbolNode) children() (byteTree, byteTree) {
- return nil, nil
-}
-
-func (n *symbolNode) nullable() bool {
- return false
-}
-
-func (n *symbolNode) first() *symbolPositionSet {
- if n.firstMemo == nil {
- n.firstMemo = newSymbolPositionSet()
- n.firstMemo.add(n.pos)
- }
- return n.firstMemo
-}
-
-func (n *symbolNode) last() *symbolPositionSet {
- if n.lastMemo == nil {
- n.lastMemo = newSymbolPositionSet()
- n.lastMemo.add(n.pos)
- }
- return n.lastMemo
-}
-
-func (n *symbolNode) clone() byteTree {
- return newRangeSymbolNode(n.from, n.to)
-}
-
-type endMarkerNode struct {
- id spec.LexModeKindID
- pos symbolPosition
- firstMemo *symbolPositionSet
- lastMemo *symbolPositionSet
-}
-
-func newEndMarkerNode(id spec.LexModeKindID) *endMarkerNode {
- return &endMarkerNode{
- id: id,
- pos: symbolPositionNil,
- }
-}
-
-func (n *endMarkerNode) String() string {
- return fmt.Sprintf("end: pos: %v", n.pos)
-}
-
-func (n *endMarkerNode) children() (byteTree, byteTree) {
- return nil, nil
-}
-
-func (n *endMarkerNode) nullable() bool {
- return false
-}
-
-func (n *endMarkerNode) first() *symbolPositionSet {
- if n.firstMemo == nil {
- n.firstMemo = newSymbolPositionSet()
- n.firstMemo.add(n.pos)
- }
- return n.firstMemo
-}
-
-func (n *endMarkerNode) last() *symbolPositionSet {
- if n.lastMemo == nil {
- n.lastMemo = newSymbolPositionSet()
- n.lastMemo.add(n.pos)
- }
- return n.lastMemo
-}
-
-func (n *endMarkerNode) clone() byteTree {
- return newEndMarkerNode(n.id)
-}
-
-type concatNode struct {
- left byteTree
- right byteTree
- firstMemo *symbolPositionSet
- lastMemo *symbolPositionSet
-}
-
-func newConcatNode(left, right byteTree) *concatNode {
- return &concatNode{
- left: left,
- right: right,
- }
-}
-
-func (n *concatNode) String() string {
- return "concat"
-}
-
-func (n *concatNode) children() (byteTree, byteTree) {
- return n.left, n.right
-}
-
-func (n *concatNode) nullable() bool {
- return n.left.nullable() && n.right.nullable()
-}
-
-func (n *concatNode) first() *symbolPositionSet {
- if n.firstMemo == nil {
- n.firstMemo = newSymbolPositionSet()
- n.firstMemo.merge(n.left.first())
- if n.left.nullable() {
- n.firstMemo.merge(n.right.first())
- }
- n.firstMemo.sortAndRemoveDuplicates()
- }
- return n.firstMemo
-}
-
-func (n *concatNode) last() *symbolPositionSet {
- if n.lastMemo == nil {
- n.lastMemo = newSymbolPositionSet()
- n.lastMemo.merge(n.right.last())
- if n.right.nullable() {
- n.lastMemo.merge(n.left.last())
- }
- n.lastMemo.sortAndRemoveDuplicates()
- }
- return n.lastMemo
-}
-
-func (n *concatNode) clone() byteTree {
- return newConcatNode(n.left.clone(), n.right.clone())
-}
-
-type altNode struct {
- left byteTree
- right byteTree
- firstMemo *symbolPositionSet
- lastMemo *symbolPositionSet
-}
-
-func newAltNode(left, right byteTree) *altNode {
- return &altNode{
- left: left,
- right: right,
- }
-}
-
-func (n *altNode) String() string {
- return "alt"
-}
-
-func (n *altNode) children() (byteTree, byteTree) {
- return n.left, n.right
-}
-
-func (n *altNode) nullable() bool {
- return n.left.nullable() || n.right.nullable()
-}
-
-func (n *altNode) first() *symbolPositionSet {
- if n.firstMemo == nil {
- n.firstMemo = newSymbolPositionSet()
- n.firstMemo.merge(n.left.first())
- n.firstMemo.merge(n.right.first())
- n.firstMemo.sortAndRemoveDuplicates()
- }
- return n.firstMemo
-}
-
-func (n *altNode) last() *symbolPositionSet {
- if n.lastMemo == nil {
- n.lastMemo = newSymbolPositionSet()
- n.lastMemo.merge(n.left.last())
- n.lastMemo.merge(n.right.last())
- n.lastMemo.sortAndRemoveDuplicates()
- }
- return n.lastMemo
-}
-
-func (n *altNode) clone() byteTree {
- return newAltNode(n.left.clone(), n.right.clone())
-}
-
-type repeatNode struct {
- left byteTree
- firstMemo *symbolPositionSet
- lastMemo *symbolPositionSet
-}
-
-func newRepeatNode(left byteTree) *repeatNode {
- return &repeatNode{
- left: left,
- }
-}
-
-func (n *repeatNode) String() string {
- return "repeat"
-}
-
-func (n *repeatNode) children() (byteTree, byteTree) {
- return n.left, nil
-}
-
-func (n *repeatNode) nullable() bool {
- return true
-}
-
-func (n *repeatNode) first() *symbolPositionSet {
- if n.firstMemo == nil {
- n.firstMemo = newSymbolPositionSet()
- n.firstMemo.merge(n.left.first())
- n.firstMemo.sortAndRemoveDuplicates()
- }
- return n.firstMemo
-}
-
-func (n *repeatNode) last() *symbolPositionSet {
- if n.lastMemo == nil {
- n.lastMemo = newSymbolPositionSet()
- n.lastMemo.merge(n.left.last())
- n.lastMemo.sortAndRemoveDuplicates()
- }
- return n.lastMemo
-}
-
-func (n *repeatNode) clone() byteTree {
- return newRepeatNode(n.left.clone())
-}
-
-type optionNode struct {
- left byteTree
- firstMemo *symbolPositionSet
- lastMemo *symbolPositionSet
-}
-
-func newOptionNode(left byteTree) *optionNode {
- return &optionNode{
- left: left,
- }
-}
-
-func (n *optionNode) String() string {
- return "option"
-}
-
-func (n *optionNode) children() (byteTree, byteTree) {
- return n.left, nil
-}
-
-func (n *optionNode) nullable() bool {
- return true
-}
-
-func (n *optionNode) first() *symbolPositionSet {
- if n.firstMemo == nil {
- n.firstMemo = newSymbolPositionSet()
- n.firstMemo.merge(n.left.first())
- n.firstMemo.sortAndRemoveDuplicates()
- }
- return n.firstMemo
-}
-
-func (n *optionNode) last() *symbolPositionSet {
- if n.lastMemo == nil {
- n.lastMemo = newSymbolPositionSet()
- n.lastMemo.merge(n.left.last())
- n.lastMemo.sortAndRemoveDuplicates()
- }
- return n.lastMemo
-}
-
-func (n *optionNode) clone() byteTree {
- return newOptionNode(n.left.clone())
-}
-
-type followTable map[symbolPosition]*symbolPositionSet
-
-func genFollowTable(root byteTree) followTable {
- follow := followTable{}
- calcFollow(follow, root)
- return follow
-}
-
-func calcFollow(follow followTable, ast byteTree) {
- if ast == nil {
- return
- }
- left, right := ast.children()
- calcFollow(follow, left)
- calcFollow(follow, right)
- switch n := ast.(type) {
- case *concatNode:
- l, r := n.children()
- for _, p := range l.last().set() {
- if _, ok := follow[p]; !ok {
- follow[p] = newSymbolPositionSet()
- }
- follow[p].merge(r.first())
- }
- case *repeatNode:
- for _, p := range n.last().set() {
- if _, ok := follow[p]; !ok {
- follow[p] = newSymbolPositionSet()
- }
- follow[p].merge(n.first())
- }
- }
-}
-
-func positionSymbols(node byteTree, n uint16) (uint16, error) {
- if node == nil {
- return n, nil
- }
-
- l, r := node.children()
- p := n
- p, err := positionSymbols(l, p)
- if err != nil {
- return p, err
- }
- p, err = positionSymbols(r, p)
- if err != nil {
- return p, err
- }
- switch n := node.(type) {
- case *symbolNode:
- n.pos, err = newSymbolPosition(p, false)
- if err != nil {
- return p, err
- }
- p++
- case *endMarkerNode:
- n.pos, err = newSymbolPosition(p, true)
- if err != nil {
- return p, err
- }
- p++
- }
- node.first()
- node.last()
- return p, nil
-}
-
-func concat(ts ...byteTree) byteTree {
- nonNilNodes := []byteTree{}
- for _, t := range ts {
- if t == nil {
- continue
- }
- nonNilNodes = append(nonNilNodes, t)
- }
- if len(nonNilNodes) <= 0 {
- return nil
- }
- if len(nonNilNodes) == 1 {
- return nonNilNodes[0]
- }
- concat := newConcatNode(nonNilNodes[0], nonNilNodes[1])
- for _, t := range nonNilNodes[2:] {
- concat = newConcatNode(concat, t)
- }
- return concat
-}
-
-func oneOf(ts ...byteTree) byteTree {
- nonNilNodes := []byteTree{}
- for _, t := range ts {
- if t == nil {
- continue
- }
- nonNilNodes = append(nonNilNodes, t)
- }
- if len(nonNilNodes) <= 0 {
- return nil
- }
- if len(nonNilNodes) == 1 {
- return nonNilNodes[0]
- }
- alt := newAltNode(nonNilNodes[0], nonNilNodes[1])
- for _, t := range nonNilNodes[2:] {
- alt = newAltNode(alt, t)
- }
- return alt
-}
-
-//nolint:unused
-func printByteTree(w io.Writer, t byteTree, ruledLine string, childRuledLinePrefix string, withAttrs bool) {
- if t == nil {
- return
- }
- fmt.Fprintf(w, "%v%v", ruledLine, t)
- if withAttrs {
- fmt.Fprintf(w, ", nullable: %v, first: %v, last: %v", t.nullable(), t.first(), t.last())
- }
- fmt.Fprintf(w, "\n")
- left, right := t.children()
- children := []byteTree{}
- if left != nil {
- children = append(children, left)
- }
- if right != nil {
- children = append(children, right)
- }
- num := len(children)
- for i, child := range children {
- line := "└─ "
- if num > 1 {
- if i == 0 {
- line = "├─ "
- } else if i < num-1 {
- line = "│ "
- }
- }
- prefix := "│ "
- if i >= num-1 {
- prefix = " "
- }
- printByteTree(w, child, childRuledLinePrefix+line, childRuledLinePrefix+prefix, withAttrs)
- }
-}
-
-func ConvertCPTreeToByteTree(cpTrees map[spec.LexModeKindID]parser.CPTree) (byteTree, *symbolTable, error) {
- var ids []spec.LexModeKindID
- for id := range cpTrees {
- ids = append(ids, id)
- }
- sort.Slice(ids, func(i, j int) bool {
- return ids[i] < ids[j]
- })
-
- var bt byteTree
- for _, id := range ids {
- cpTree := cpTrees[id]
- t, err := convCPTreeToByteTree(cpTree)
- if err != nil {
- return nil, nil, err
- }
- bt = oneOf(bt, concat(t, newEndMarkerNode(id)))
- }
- _, err := positionSymbols(bt, symbolPositionMin)
- if err != nil {
- return nil, nil, err
- }
-
- return bt, genSymbolTable(bt), nil
-}
-
-func convCPTreeToByteTree(cpTree parser.CPTree) (byteTree, error) {
- if from, to, ok := cpTree.Range(); ok {
- bs, err := utf8.GenCharBlocks(from, to)
- if err != nil {
- return nil, err
- }
- var a byteTree
- for _, b := range bs {
- var c byteTree
- for i := 0; i < len(b.From); i++ {
- c = concat(c, newRangeSymbolNode(b.From[i], b.To[i]))
- }
- a = oneOf(a, c)
- }
- return a, nil
- }
-
- if tree, ok := cpTree.Repeatable(); ok {
- t, err := convCPTreeToByteTree(tree)
- if err != nil {
- return nil, err
- }
- return newRepeatNode(t), nil
- }
-
- if tree, ok := cpTree.Optional(); ok {
- t, err := convCPTreeToByteTree(tree)
- if err != nil {
- return nil, err
- }
- return newOptionNode(t), nil
- }
-
- if left, right, ok := cpTree.Concatenation(); ok {
- l, err := convCPTreeToByteTree(left)
- if err != nil {
- return nil, err
- }
- r, err := convCPTreeToByteTree(right)
- if err != nil {
- return nil, err
- }
- return newConcatNode(l, r), nil
- }
-
- if left, right, ok := cpTree.Alternatives(); ok {
- l, err := convCPTreeToByteTree(left)
- if err != nil {
- return nil, err
- }
- r, err := convCPTreeToByteTree(right)
- if err != nil {
- return nil, err
- }
- return newAltNode(l, r), nil
- }
-
- return nil, fmt.Errorf("invalid tree type: %T", cpTree)
-}
diff --git a/grammar/lexical/dfa/tree_test.go b/grammar/lexical/dfa/tree_test.go
deleted file mode 100644
index 188fe95..0000000
--- a/grammar/lexical/dfa/tree_test.go
+++ /dev/null
@@ -1,257 +0,0 @@
-package dfa
-
-import (
- "fmt"
- "strings"
- "testing"
-
- "grammar/lexical/parser"
- spec "spec/grammar"
-)
-
-func TestByteTree(t *testing.T) {
- tests := []struct {
- root byteTree
- nullable bool
- first *symbolPositionSet
- last *symbolPositionSet
- }{
- {
- root: newSymbolNodeWithPos(0, 1),
- nullable: false,
- first: newSymbolPositionSet().add(1),
- last: newSymbolPositionSet().add(1),
- },
- {
- root: newEndMarkerNodeWithPos(1, 1),
- nullable: false,
- first: newSymbolPositionSet().add(1),
- last: newSymbolPositionSet().add(1),
- },
- {
- root: newConcatNode(
- newSymbolNodeWithPos(0, 1),
- newSymbolNodeWithPos(0, 2),
- ),
- nullable: false,
- first: newSymbolPositionSet().add(1),
- last: newSymbolPositionSet().add(2),
- },
- {
- root: newConcatNode(
- newRepeatNode(newSymbolNodeWithPos(0, 1)),
- newSymbolNodeWithPos(0, 2),
- ),
- nullable: false,
- first: newSymbolPositionSet().add(1).add(2),
- last: newSymbolPositionSet().add(2),
- },
- {
- root: newConcatNode(
- newSymbolNodeWithPos(0, 1),
- newRepeatNode(newSymbolNodeWithPos(0, 2)),
- ),
- nullable: false,
- first: newSymbolPositionSet().add(1),
- last: newSymbolPositionSet().add(1).add(2),
- },
- {
- root: newConcatNode(
- newRepeatNode(newSymbolNodeWithPos(0, 1)),
- newRepeatNode(newSymbolNodeWithPos(0, 2)),
- ),
- nullable: true,
- first: newSymbolPositionSet().add(1).add(2),
- last: newSymbolPositionSet().add(1).add(2),
- },
- {
- root: newAltNode(
- newSymbolNodeWithPos(0, 1),
- newSymbolNodeWithPos(0, 2),
- ),
- nullable: false,
- first: newSymbolPositionSet().add(1).add(2),
- last: newSymbolPositionSet().add(1).add(2),
- },
- {
- root: newAltNode(
- newRepeatNode(newSymbolNodeWithPos(0, 1)),
- newSymbolNodeWithPos(0, 2),
- ),
- nullable: true,
- first: newSymbolPositionSet().add(1).add(2),
- last: newSymbolPositionSet().add(1).add(2),
- },
- {
- root: newAltNode(
- newSymbolNodeWithPos(0, 1),
- newRepeatNode(newSymbolNodeWithPos(0, 2)),
- ),
- nullable: true,
- first: newSymbolPositionSet().add(1).add(2),
- last: newSymbolPositionSet().add(1).add(2),
- },
- {
- root: newAltNode(
- newRepeatNode(newSymbolNodeWithPos(0, 1)),
- newRepeatNode(newSymbolNodeWithPos(0, 2)),
- ),
- nullable: true,
- first: newSymbolPositionSet().add(1).add(2),
- last: newSymbolPositionSet().add(1).add(2),
- },
- {
- root: newRepeatNode(newSymbolNodeWithPos(0, 1)),
- nullable: true,
- first: newSymbolPositionSet().add(1),
- last: newSymbolPositionSet().add(1),
- },
- {
- root: newOptionNode(newSymbolNodeWithPos(0, 1)),
- nullable: true,
- first: newSymbolPositionSet().add(1),
- last: newSymbolPositionSet().add(1),
- },
- }
- for i, tt := range tests {
- t.Run(fmt.Sprintf("#%v", i), func(t *testing.T) {
- if tt.root.nullable() != tt.nullable {
- t.Errorf("unexpected nullable attribute; want: %v, got: %v", tt.nullable, tt.root.nullable())
- }
- if tt.first.hash() != tt.root.first().hash() {
- t.Errorf("unexpected first positions attribute; want: %v, got: %v", tt.first, tt.root.first())
- }
- if tt.last.hash() != tt.root.last().hash() {
- t.Errorf("unexpected last positions attribute; want: %v, got: %v", tt.last, tt.root.last())
- }
- })
- }
-}
-
-func newSymbolNodeWithPos(v byte, pos symbolPosition) *symbolNode {
- n := newSymbolNode(v)
- n.pos = pos
- return n
-}
-
-func newEndMarkerNodeWithPos(id int, pos symbolPosition) *endMarkerNode {
- n := newEndMarkerNode(spec.LexModeKindID(id))
- n.pos = pos
- return n
-}
-
-func TestFollowAndSymbolTable(t *testing.T) {
- symPos := func(n uint16) symbolPosition {
- pos, err := newSymbolPosition(n, false)
- if err != nil {
- panic(err)
- }
- return pos
- }
-
- endPos := func(n uint16) symbolPosition {
- pos, err := newSymbolPosition(n, true)
- if err != nil {
- panic(err)
- }
- return pos
- }
-
- p := parser.NewParser(spec.LexKindName("test"), strings.NewReader("(a|b)*abb"))
- cpt, err := p.Parse()
- if err != nil {
- t.Fatal(err)
- }
-
- bt, symTab, err := ConvertCPTreeToByteTree(map[spec.LexModeKindID]parser.CPTree{
- spec.LexModeKindIDMin: cpt,
- })
- if err != nil {
- t.Fatal(err)
- }
-
- {
- followTab := genFollowTable(bt)
- if followTab == nil {
- t.Fatal("follow table is nil")
- }
- expectedFollowTab := followTable{
- 1: newSymbolPositionSet().add(symPos(1)).add(symPos(2)).add(symPos(3)),
- 2: newSymbolPositionSet().add(symPos(1)).add(symPos(2)).add(symPos(3)),
- 3: newSymbolPositionSet().add(symPos(4)),
- 4: newSymbolPositionSet().add(symPos(5)),
- 5: newSymbolPositionSet().add(endPos(6)),
- }
- testFollowTable(t, expectedFollowTab, followTab)
- }
-
- {
- entry := func(v byte) byteRange {
- return byteRange{
- from: v,
- to: v,
- }
- }
-
- expectedSymTab := &symbolTable{
- symPos2Byte: map[symbolPosition]byteRange{
- symPos(1): entry(byte('a')),
- symPos(2): entry(byte('b')),
- symPos(3): entry(byte('a')),
- symPos(4): entry(byte('b')),
- symPos(5): entry(byte('b')),
- },
- endPos2ID: map[symbolPosition]spec.LexModeKindID{
- endPos(6): 1,
- },
- }
- testSymbolTable(t, expectedSymTab, symTab)
- }
-}
-
-func testFollowTable(t *testing.T, expected, actual followTable) {
- if len(actual) != len(expected) {
- t.Errorf("unexpected number of the follow table entries; want: %v, got: %v", len(expected), len(actual))
- }
- for ePos, eSet := range expected {
- aSet, ok := actual[ePos]
- if !ok {
- t.Fatalf("follow entry is not found: position: %v, follow: %v", ePos, eSet)
- }
- if aSet.hash() != eSet.hash() {
- t.Fatalf("follow entry of position %v is mismatched: want: %v, got: %v", ePos, aSet, eSet)
- }
- }
-}
-
-func testSymbolTable(t *testing.T, expected, actual *symbolTable) {
- t.Helper()
-
- if len(actual.symPos2Byte) != len(expected.symPos2Byte) {
- t.Errorf("unexpected symPos2Byte entries: want: %v entries, got: %v entries", len(expected.symPos2Byte), len(actual.symPos2Byte))
- }
- for ePos, eByte := range expected.symPos2Byte {
- byte, ok := actual.symPos2Byte[ePos]
- if !ok {
- t.Errorf("a symbol position entry is not found: %v -> %v", ePos, eByte)
- continue
- }
- if byte.from != eByte.from || byte.to != eByte.to {
- t.Errorf("unexpected symbol position entry: want: %v -> %v, got: %v -> %v", ePos, eByte, ePos, byte)
- }
- }
-
- if len(actual.endPos2ID) != len(expected.endPos2ID) {
- t.Errorf("unexpected endPos2ID entries: want: %v entries, got: %v entries", len(expected.endPos2ID), len(actual.endPos2ID))
- }
- for ePos, eID := range expected.endPos2ID {
- id, ok := actual.endPos2ID[ePos]
- if !ok {
- t.Errorf("an end position entry is not found: %v -> %v", ePos, eID)
- continue
- }
- if id != eID {
- t.Errorf("unexpected end position entry: want: %v -> %v, got: %v -> %v", ePos, eID, ePos, id)
- }
- }
-}
diff --git a/grammar/lexical/entry.go b/grammar/lexical/entry.go
deleted file mode 100644
index 6d2fbe3..0000000
--- a/grammar/lexical/entry.go
+++ /dev/null
@@ -1,171 +0,0 @@
-package lexical
-
-import (
- "fmt"
- "sort"
- "strings"
-
- spec "spec/grammar"
-)
-
-type LexEntry struct {
- Kind spec.LexKindName
- Pattern string
- Modes []spec.LexModeName
- Push spec.LexModeName
- Pop bool
- Fragment bool
-}
-
-type LexSpec struct {
- Entries []*LexEntry
-}
-
-func (s *LexSpec) Validate() error {
- if len(s.Entries) <= 0 {
- return fmt.Errorf("the lexical specification must have at least one entry")
- }
- {
- ks := map[string]struct{}{}
- fks := map[string]struct{}{}
- for _, e := range s.Entries {
- // Allow duplicate names between fragments and non-fragments.
- if e.Fragment {
- if _, exist := fks[e.Kind.String()]; exist {
- return fmt.Errorf("kinds `%v` are duplicates", e.Kind)
- }
- fks[e.Kind.String()] = struct{}{}
- } else {
- if _, exist := ks[e.Kind.String()]; exist {
- return fmt.Errorf("kinds `%v` are duplicates", e.Kind)
- }
- ks[e.Kind.String()] = struct{}{}
- }
- }
- }
- {
- kinds := []string{}
- modes := []string{
- spec.LexModeNameDefault.String(), // This is a predefined mode.
- }
- for _, e := range s.Entries {
- if e.Fragment {
- continue
- }
-
- kinds = append(kinds, e.Kind.String())
-
- for _, m := range e.Modes {
- modes = append(modes, m.String())
- }
- }
-
- kindErrs := findSpellingInconsistenciesErrors(kinds, nil)
- modeErrs := findSpellingInconsistenciesErrors(modes, func(ids []string) error {
- if SnakeCaseToUpperCamelCase(ids[0]) == SnakeCaseToUpperCamelCase(spec.LexModeNameDefault.String()) {
- var b strings.Builder
- fmt.Fprintf(&b, "%+v", ids[0])
- for _, id := range ids[1:] {
- fmt.Fprintf(&b, ", %+v", id)
- }
- return fmt.Errorf("these identifiers are treated as the same. please use the same spelling as predefined '%v': %v", spec.LexModeNameDefault, b.String())
- }
- return nil
- })
- errs := append(kindErrs, modeErrs...)
- if len(errs) > 0 {
- var b strings.Builder
- fmt.Fprintf(&b, "%v", errs[0])
- for _, err := range errs[1:] {
- fmt.Fprintf(&b, "\n%v", err)
- }
- return fmt.Errorf(b.String())
- }
- }
-
- return nil
-}
-
-func findSpellingInconsistenciesErrors(ids []string, hook func(ids []string) error) []error {
- duplicated := FindSpellingInconsistencies(ids)
- if len(duplicated) == 0 {
- return nil
- }
-
- var errs []error
- for _, dup := range duplicated {
- if hook != nil {
- err := hook(dup)
- if err != nil {
- errs = append(errs, err)
- continue
- }
- }
-
- var b strings.Builder
- fmt.Fprintf(&b, "%+v", dup[0])
- for _, id := range dup[1:] {
- fmt.Fprintf(&b, ", %+v", id)
- }
- err := fmt.Errorf("these identifiers are treated as the same. please use the same spelling: %v", b.String())
- errs = append(errs, err)
- }
-
- return errs
-}
-
-// FindSpellingInconsistencies finds spelling inconsistencies in identifiers. The identifiers are considered to be the same
-// if they are spelled the same when expressed in UpperCamelCase. For example, `left_paren` and `LeftParen` are spelled the same
-// in UpperCamelCase. Thus they are considere to be spelling inconsistency.
-func FindSpellingInconsistencies(ids []string) [][]string {
- m := map[string][]string{}
- for _, id := range removeDuplicates(ids) {
- c := SnakeCaseToUpperCamelCase(id)
- m[c] = append(m[c], id)
- }
-
- var duplicated [][]string
- for _, camels := range m {
- if len(camels) == 1 {
- continue
- }
- duplicated = append(duplicated, camels)
- }
-
- for _, dup := range duplicated {
- sort.Slice(dup, func(i, j int) bool {
- return dup[i] < dup[j]
- })
- }
- sort.Slice(duplicated, func(i, j int) bool {
- return duplicated[i][0] < duplicated[j][0]
- })
-
- return duplicated
-}
-
-func removeDuplicates(s []string) []string {
- m := map[string]struct{}{}
- for _, v := range s {
- m[v] = struct{}{}
- }
-
- var unique []string
- for v := range m {
- unique = append(unique, v)
- }
-
- return unique
-}
-
-func SnakeCaseToUpperCamelCase(snake string) string {
- elems := strings.Split(snake, "_")
- for i, e := range elems {
- if len(e) == 0 {
- continue
- }
- elems[i] = strings.ToUpper(string(e[0])) + e[1:]
- }
-
- return strings.Join(elems, "")
-}
diff --git a/grammar/lexical/parser/error.go b/grammar/lexical/parser/error.go
deleted file mode 100644
index be81da4..0000000
--- a/grammar/lexical/parser/error.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package parser
-
-import "fmt"
-
-var (
- ParseErr = fmt.Errorf("parse error")
-
- // lexical errors
- synErrIncompletedEscSeq = fmt.Errorf("incompleted escape sequence; unexpected EOF following \\")
- synErrInvalidEscSeq = fmt.Errorf("invalid escape sequence")
- synErrInvalidCodePoint = fmt.Errorf("code points must consist of just 4 or 6 hex digits")
- synErrCharPropInvalidSymbol = fmt.Errorf("invalid character property symbol")
- SynErrFragmentInvalidSymbol = fmt.Errorf("invalid fragment symbol")
-
- // syntax errors
- synErrUnexpectedToken = fmt.Errorf("unexpected token")
- synErrNullPattern = fmt.Errorf("a pattern must be a non-empty byte sequence")
- synErrUnmatchablePattern = fmt.Errorf("a pattern cannot match any characters")
- synErrAltLackOfOperand = fmt.Errorf("an alternation expression must have operands")
- synErrRepNoTarget = fmt.Errorf("a repeat expression must have an operand")
- synErrGroupNoElem = fmt.Errorf("a grouping expression must include at least one character")
- synErrGroupUnclosed = fmt.Errorf("unclosed grouping expression")
- synErrGroupNoInitiator = fmt.Errorf(") needs preceding (")
- synErrGroupInvalidForm = fmt.Errorf("invalid grouping expression")
- synErrBExpNoElem = fmt.Errorf("a bracket expression must include at least one character")
- synErrBExpUnclosed = fmt.Errorf("unclosed bracket expression")
- synErrBExpInvalidForm = fmt.Errorf("invalid bracket expression")
- synErrRangeInvalidOrder = fmt.Errorf("a range expression with invalid order")
- synErrRangePropIsUnavailable = fmt.Errorf("a property expression is unavailable in a range expression")
- synErrRangeInvalidForm = fmt.Errorf("invalid range expression")
- synErrCPExpInvalidForm = fmt.Errorf("invalid code point expression")
- synErrCPExpOutOfRange = fmt.Errorf("a code point must be between U+0000 to U+10FFFF")
- synErrCharPropExpInvalidForm = fmt.Errorf("invalid character property expression")
- synErrCharPropUnsupported = fmt.Errorf("unsupported character property")
- synErrFragmentExpInvalidForm = fmt.Errorf("invalid fragment expression")
-)
diff --git a/grammar/lexical/parser/fragment.go b/grammar/lexical/parser/fragment.go
deleted file mode 100644
index e51759e..0000000
--- a/grammar/lexical/parser/fragment.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package parser
-
-import (
- "fmt"
-
- spec "spec/grammar"
-)
-
-type incompleteFragment struct {
- kind spec.LexKindName
- root *rootNode
-}
-
-func CompleteFragments(fragments map[spec.LexKindName]CPTree) error {
- if len(fragments) == 0 {
- return nil
- }
-
- completeFragments := map[spec.LexKindName]CPTree{}
- incompleteFragments := []*incompleteFragment{}
- for kind, tree := range fragments {
- root, ok := tree.(*rootNode)
- if !ok {
- return fmt.Errorf("CompleteFragments can take only *rootNode: %T", tree)
- }
- if root.incomplete() {
- incompleteFragments = append(incompleteFragments, &incompleteFragment{
- kind: kind,
- root: root,
- })
- } else {
- completeFragments[kind] = root
- }
- }
- for len(incompleteFragments) > 0 {
- lastIncompCount := len(incompleteFragments)
- remainingFragments := []*incompleteFragment{}
- for _, e := range incompleteFragments {
- complete, err := ApplyFragments(e.root, completeFragments)
- if err != nil {
- return err
- }
- if !complete {
- remainingFragments = append(remainingFragments, e)
- } else {
- completeFragments[e.kind] = e.root
- }
- }
- incompleteFragments = remainingFragments
- if len(incompleteFragments) == lastIncompCount {
- return ParseErr
- }
- }
-
- return nil
-}
-
-func ApplyFragments(t CPTree, fragments map[spec.LexKindName]CPTree) (bool, error) {
- root, ok := t.(*rootNode)
- if !ok {
- return false, fmt.Errorf("ApplyFragments can take only *rootNode type: %T", t)
- }
-
- for name, frag := range fragments {
- err := root.applyFragment(name, frag)
- if err != nil {
- return false, err
- }
- }
-
- return !root.incomplete(), nil
-}
diff --git a/grammar/lexical/parser/lexer.go b/grammar/lexical/parser/lexer.go
deleted file mode 100644
index 3861825..0000000
--- a/grammar/lexical/parser/lexer.go
+++ /dev/null
@@ -1,594 +0,0 @@
-package parser
-
-import (
- "bufio"
- "fmt"
- "io"
- "strings"
-)
-
-type tokenKind string
-
-const (
- tokenKindChar tokenKind = "char"
- tokenKindAnyChar tokenKind = "."
- tokenKindRepeat tokenKind = "*"
- tokenKindRepeatOneOrMore tokenKind = "+"
- tokenKindOption tokenKind = "?"
- tokenKindAlt tokenKind = "|"
- tokenKindGroupOpen tokenKind = "("
- tokenKindGroupClose tokenKind = ")"
- tokenKindBExpOpen tokenKind = "["
- tokenKindInverseBExpOpen tokenKind = "[^"
- tokenKindBExpClose tokenKind = "]"
- tokenKindCharRange tokenKind = "-"
- tokenKindCodePointLeader tokenKind = "\\u"
- tokenKindCharPropLeader tokenKind = "\\p"
- tokenKindFragmentLeader tokenKind = "\\f"
- tokenKindLBrace tokenKind = "{"
- tokenKindRBrace tokenKind = "}"
- tokenKindEqual tokenKind = "="
- tokenKindCodePoint tokenKind = "code point"
- tokenKindCharPropSymbol tokenKind = "character property symbol"
- tokenKindFragmentSymbol tokenKind = "fragment symbol"
- tokenKindEOF tokenKind = "eof"
-)
-
-type token struct {
- kind tokenKind
- char rune
- propSymbol string
- codePoint string
- fragmentSymbol string
-}
-
-const nullChar = '\u0000'
-
-func newToken(kind tokenKind, char rune) *token {
- return &token{
- kind: kind,
- char: char,
- }
-}
-
-func newCodePointToken(codePoint string) *token {
- return &token{
- kind: tokenKindCodePoint,
- codePoint: codePoint,
- }
-}
-
-func newCharPropSymbolToken(propSymbol string) *token {
- return &token{
- kind: tokenKindCharPropSymbol,
- propSymbol: propSymbol,
- }
-}
-
-func newFragmentSymbolToken(fragmentSymbol string) *token {
- return &token{
- kind: tokenKindFragmentSymbol,
- fragmentSymbol: fragmentSymbol,
- }
-}
-
-type lexerMode string
-
-const (
- lexerModeDefault lexerMode = "default"
- lexerModeBExp lexerMode = "bracket expression"
- lexerModeCPExp lexerMode = "code point expression"
- lexerModeCharPropExp lexerMode = "character property expression"
- lexerModeFragmentExp lexerMode = "fragment expression"
-)
-
-type lexerModeStack struct {
- stack []lexerMode
-}
-
-func newLexerModeStack() *lexerModeStack {
- return &lexerModeStack{
- stack: []lexerMode{
- lexerModeDefault,
- },
- }
-}
-
-func (s *lexerModeStack) top() lexerMode {
- return s.stack[len(s.stack)-1]
-}
-
-func (s *lexerModeStack) push(m lexerMode) {
- s.stack = append(s.stack, m)
-}
-
-func (s *lexerModeStack) pop() {
- s.stack = s.stack[:len(s.stack)-1]
-}
-
-type rangeState string
-
-// [a-z]
-// ^^^^
-// |||`-- ready
-// ||`-- expect range terminator
-// |`-- read range initiator
-// `-- ready
-const (
- rangeStateReady rangeState = "ready"
- rangeStateReadRangeInitiator rangeState = "read range initiator"
- rangeStateExpectRangeTerminator rangeState = "expect range terminator"
-)
-
-type lexer struct {
- src *bufio.Reader
- peekChar2 rune
- peekEOF2 bool
- peekChar1 rune
- peekEOF1 bool
- lastChar rune
- reachedEOF bool
- prevChar1 rune
- prevEOF1 bool
- prevChar2 rune
- pervEOF2 bool
- modeStack *lexerModeStack
- rangeState rangeState
-
- errCause error
- errDetail string
-}
-
-func newLexer(src io.Reader) *lexer {
- return &lexer{
- src: bufio.NewReader(src),
- peekChar2: nullChar,
- peekEOF2: false,
- peekChar1: nullChar,
- peekEOF1: false,
- lastChar: nullChar,
- reachedEOF: false,
- prevChar1: nullChar,
- prevEOF1: false,
- prevChar2: nullChar,
- pervEOF2: false,
- modeStack: newLexerModeStack(),
- rangeState: rangeStateReady,
- }
-}
-
-func (l *lexer) error() (string, error) {
- return l.errDetail, l.errCause
-}
-
-func (l *lexer) next() (*token, error) {
- c, eof, err := l.read()
- if err != nil {
- return nil, err
- }
- if eof {
- return newToken(tokenKindEOF, nullChar), nil
- }
-
- switch l.modeStack.top() {
- case lexerModeBExp:
- tok, err := l.nextInBExp(c)
- if err != nil {
- return nil, err
- }
- if tok.kind == tokenKindChar || tok.kind == tokenKindCodePointLeader || tok.kind == tokenKindCharPropLeader {
- switch l.rangeState {
- case rangeStateReady:
- l.rangeState = rangeStateReadRangeInitiator
- case rangeStateExpectRangeTerminator:
- l.rangeState = rangeStateReady
- }
- }
- switch tok.kind {
- case tokenKindBExpClose:
- l.modeStack.pop()
- case tokenKindCharRange:
- l.rangeState = rangeStateExpectRangeTerminator
- case tokenKindCodePointLeader:
- l.modeStack.push(lexerModeCPExp)
- case tokenKindCharPropLeader:
- l.modeStack.push(lexerModeCharPropExp)
- }
- return tok, nil
- case lexerModeCPExp:
- tok, err := l.nextInCodePoint(c)
- if err != nil {
- return nil, err
- }
- switch tok.kind {
- case tokenKindRBrace:
- l.modeStack.pop()
- }
- return tok, nil
- case lexerModeCharPropExp:
- tok, err := l.nextInCharProp(c)
- if err != nil {
- return nil, err
- }
- switch tok.kind {
- case tokenKindRBrace:
- l.modeStack.pop()
- }
- return tok, nil
- case lexerModeFragmentExp:
- tok, err := l.nextInFragment(c)
- if err != nil {
- return nil, err
- }
- switch tok.kind {
- case tokenKindRBrace:
- l.modeStack.pop()
- }
- return tok, nil
- default:
- tok, err := l.nextInDefault(c)
- if err != nil {
- return nil, err
- }
- switch tok.kind {
- case tokenKindBExpOpen:
- l.modeStack.push(lexerModeBExp)
- l.rangeState = rangeStateReady
- case tokenKindInverseBExpOpen:
- l.modeStack.push(lexerModeBExp)
- l.rangeState = rangeStateReady
- case tokenKindCodePointLeader:
- l.modeStack.push(lexerModeCPExp)
- case tokenKindCharPropLeader:
- l.modeStack.push(lexerModeCharPropExp)
- case tokenKindFragmentLeader:
- l.modeStack.push(lexerModeFragmentExp)
- }
- return tok, nil
- }
-}
-
-func (l *lexer) nextInDefault(c rune) (*token, error) {
- switch c {
- case '*':
- return newToken(tokenKindRepeat, nullChar), nil
- case '+':
- return newToken(tokenKindRepeatOneOrMore, nullChar), nil
- case '?':
- return newToken(tokenKindOption, nullChar), nil
- case '.':
- return newToken(tokenKindAnyChar, nullChar), nil
- case '|':
- return newToken(tokenKindAlt, nullChar), nil
- case '(':
- return newToken(tokenKindGroupOpen, nullChar), nil
- case ')':
- return newToken(tokenKindGroupClose, nullChar), nil
- case '[':
- c1, eof, err := l.read()
- if err != nil {
- return nil, err
- }
- if eof {
- err := l.restore()
- if err != nil {
- return nil, err
- }
- return newToken(tokenKindBExpOpen, nullChar), nil
- }
- if c1 != '^' {
- err := l.restore()
- if err != nil {
- return nil, err
- }
- return newToken(tokenKindBExpOpen, nullChar), nil
- }
- c2, eof, err := l.read()
- if err != nil {
- return nil, err
- }
- if eof {
- err := l.restore()
- if err != nil {
- return nil, err
- }
- return newToken(tokenKindInverseBExpOpen, nullChar), nil
- }
- if c2 != ']' {
- err := l.restore()
- if err != nil {
- return nil, err
- }
- return newToken(tokenKindInverseBExpOpen, nullChar), nil
- }
- err = l.restore()
- if err != nil {
- return nil, err
- }
- err = l.restore()
- if err != nil {
- return nil, err
- }
- return newToken(tokenKindBExpOpen, nullChar), nil
- case '\\':
- c, eof, err := l.read()
- if err != nil {
- return nil, err
- }
- if eof {
- l.errCause = synErrIncompletedEscSeq
- return nil, ParseErr
- }
- if c == 'u' {
- return newToken(tokenKindCodePointLeader, nullChar), nil
- }
- if c == 'p' {
- return newToken(tokenKindCharPropLeader, nullChar), nil
- }
- if c == 'f' {
- return newToken(tokenKindFragmentLeader, nullChar), nil
- }
- if c == '\\' || c == '.' || c == '*' || c == '+' || c == '?' || c == '|' || c == '(' || c == ')' || c == '[' || c == ']' {
- return newToken(tokenKindChar, c), nil
- }
- l.errCause = synErrInvalidEscSeq
- l.errDetail = fmt.Sprintf("\\%v is not supported", string(c))
- return nil, ParseErr
- default:
- return newToken(tokenKindChar, c), nil
- }
-}
-
-func (l *lexer) nextInBExp(c rune) (*token, error) {
- switch c {
- case '-':
- if l.rangeState != rangeStateReadRangeInitiator {
- return newToken(tokenKindChar, c), nil
- }
- c1, eof, err := l.read()
- if err != nil {
- return nil, err
- }
- if eof {
- err := l.restore()
- if err != nil {
- return nil, err
- }
- return newToken(tokenKindChar, c), nil
- }
- if c1 != ']' {
- err := l.restore()
- if err != nil {
- return nil, err
- }
- return newToken(tokenKindCharRange, nullChar), nil
- }
- err = l.restore()
- if err != nil {
- return nil, err
- }
- return newToken(tokenKindChar, c), nil
- case ']':
- return newToken(tokenKindBExpClose, nullChar), nil
- case '\\':
- c, eof, err := l.read()
- if err != nil {
- return nil, err
- }
- if eof {
- l.errCause = synErrIncompletedEscSeq
- return nil, ParseErr
- }
- if c == 'u' {
- return newToken(tokenKindCodePointLeader, nullChar), nil
- }
- if c == 'p' {
- return newToken(tokenKindCharPropLeader, nullChar), nil
- }
- if c == '\\' || c == '^' || c == '-' || c == ']' {
- return newToken(tokenKindChar, c), nil
- }
- l.errCause = synErrInvalidEscSeq
- l.errDetail = fmt.Sprintf("\\%v is not supported in a bracket expression", string(c))
- return nil, ParseErr
- default:
- return newToken(tokenKindChar, c), nil
- }
-}
-
-func (l *lexer) nextInCodePoint(c rune) (*token, error) {
- switch c {
- case '{':
- return newToken(tokenKindLBrace, nullChar), nil
- case '}':
- return newToken(tokenKindRBrace, nullChar), nil
- default:
- if !isHexDigit(c) {
- l.errCause = synErrInvalidCodePoint
- return nil, ParseErr
- }
- var b strings.Builder
- fmt.Fprint(&b, string(c))
- n := 1
- for {
- c, eof, err := l.read()
- if err != nil {
- return nil, err
- }
- if eof {
- err := l.restore()
- if err != nil {
- return nil, err
- }
- break
- }
- if c == '}' {
- err := l.restore()
- if err != nil {
- return nil, err
- }
- break
- }
- if !isHexDigit(c) || n >= 6 {
- l.errCause = synErrInvalidCodePoint
- return nil, ParseErr
- }
- fmt.Fprint(&b, string(c))
- n++
- }
- cp := b.String()
- cpLen := len(cp)
- if !(cpLen == 4 || cpLen == 6) {
- l.errCause = synErrInvalidCodePoint
- return nil, ParseErr
- }
- return newCodePointToken(b.String()), nil
- }
-}
-
-func isHexDigit(c rune) bool {
- if c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c >= 'a' && c <= 'z' {
- return true
- }
- return false
-}
-
-func (l *lexer) nextInCharProp(c rune) (*token, error) {
- switch c {
- case '{':
- return newToken(tokenKindLBrace, nullChar), nil
- case '}':
- return newToken(tokenKindRBrace, nullChar), nil
- case '=':
- return newToken(tokenKindEqual, nullChar), nil
- default:
- var b strings.Builder
- fmt.Fprint(&b, string(c))
- n := 1
- for {
- c, eof, err := l.read()
- if err != nil {
- return nil, err
- }
- if eof {
- err := l.restore()
- if err != nil {
- return nil, err
- }
- break
- }
- if c == '}' || c == '=' {
- err := l.restore()
- if err != nil {
- return nil, err
- }
- break
- }
- fmt.Fprint(&b, string(c))
- n++
- }
- sym := strings.TrimSpace(b.String())
- if len(sym) == 0 {
- l.errCause = synErrCharPropInvalidSymbol
- return nil, ParseErr
- }
- return newCharPropSymbolToken(sym), nil
- }
-}
-
-func (l *lexer) nextInFragment(c rune) (*token, error) {
- switch c {
- case '{':
- return newToken(tokenKindLBrace, nullChar), nil
- case '}':
- return newToken(tokenKindRBrace, nullChar), nil
- default:
- var b strings.Builder
- fmt.Fprint(&b, string(c))
- n := 1
- for {
- c, eof, err := l.read()
- if err != nil {
- return nil, err
- }
- if eof {
- err := l.restore()
- if err != nil {
- return nil, err
- }
- break
- }
- if c == '}' {
- err := l.restore()
- if err != nil {
- return nil, err
- }
- break
- }
- fmt.Fprint(&b, string(c))
- n++
- }
- sym := strings.TrimSpace(b.String())
- if len(sym) == 0 {
- l.errCause = SynErrFragmentInvalidSymbol
- return nil, ParseErr
- }
- return newFragmentSymbolToken(sym), nil
- }
-}
-
-func (l *lexer) read() (rune, bool, error) {
- if l.reachedEOF {
- return l.lastChar, l.reachedEOF, nil
- }
- if l.peekChar1 != nullChar || l.peekEOF1 {
- l.prevChar2 = l.prevChar1
- l.pervEOF2 = l.prevEOF1
- l.prevChar1 = l.lastChar
- l.prevEOF1 = l.reachedEOF
- l.lastChar = l.peekChar1
- l.reachedEOF = l.peekEOF1
- l.peekChar1 = l.peekChar2
- l.peekEOF1 = l.peekEOF2
- l.peekChar2 = nullChar
- l.peekEOF2 = false
- return l.lastChar, l.reachedEOF, nil
- }
- c, _, err := l.src.ReadRune()
- if err != nil {
- if err == io.EOF {
- l.prevChar2 = l.prevChar1
- l.pervEOF2 = l.prevEOF1
- l.prevChar1 = l.lastChar
- l.prevEOF1 = l.reachedEOF
- l.lastChar = nullChar
- l.reachedEOF = true
- return l.lastChar, l.reachedEOF, nil
- }
- return nullChar, false, err
- }
- l.prevChar2 = l.prevChar1
- l.pervEOF2 = l.prevEOF1
- l.prevChar1 = l.lastChar
- l.prevEOF1 = l.reachedEOF
- l.lastChar = c
- l.reachedEOF = false
- return l.lastChar, l.reachedEOF, nil
-}
-
-func (l *lexer) restore() error {
- if l.lastChar == nullChar && !l.reachedEOF {
- return fmt.Errorf("failed to call restore() because the last character is null")
- }
- l.peekChar2 = l.peekChar1
- l.peekEOF2 = l.peekEOF1
- l.peekChar1 = l.lastChar
- l.peekEOF1 = l.reachedEOF
- l.lastChar = l.prevChar1
- l.reachedEOF = l.prevEOF1
- l.prevChar1 = l.prevChar2
- l.prevEOF1 = l.pervEOF2
- l.prevChar2 = nullChar
- l.pervEOF2 = false
- return nil
-}
diff --git a/grammar/lexical/parser/lexer_test.go b/grammar/lexical/parser/lexer_test.go
deleted file mode 100644
index 055466e..0000000
--- a/grammar/lexical/parser/lexer_test.go
+++ /dev/null
@@ -1,524 +0,0 @@
-package parser
-
-import (
- "strings"
- "testing"
-)
-
-func TestLexer(t *testing.T) {
- tests := []struct {
- caption string
- src string
- tokens []*token
- err error
- }{
- {
- caption: "lexer can recognize ordinaly characters",
- src: "123abcいろは",
- tokens: []*token{
- newToken(tokenKindChar, '1'),
- newToken(tokenKindChar, '2'),
- newToken(tokenKindChar, '3'),
- newToken(tokenKindChar, 'a'),
- newToken(tokenKindChar, 'b'),
- newToken(tokenKindChar, 'c'),
- newToken(tokenKindChar, 'い'),
- newToken(tokenKindChar, 'ろ'),
- newToken(tokenKindChar, 'は'),
- newToken(tokenKindEOF, nullChar),
- },
- },
- {
- caption: "lexer can recognize the special characters in default mode",
- src: ".*+?|()[\\u",
- tokens: []*token{
- newToken(tokenKindAnyChar, nullChar),
- newToken(tokenKindRepeat, nullChar),
- newToken(tokenKindRepeatOneOrMore, nullChar),
- newToken(tokenKindOption, nullChar),
- newToken(tokenKindAlt, nullChar),
- newToken(tokenKindGroupOpen, nullChar),
- newToken(tokenKindGroupClose, nullChar),
- newToken(tokenKindBExpOpen, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindEOF, nullChar),
- },
- },
- {
- caption: "lexer can recognize the escape sequences in default mode",
- src: "\\\\\\.\\*\\+\\?\\|\\(\\)\\[",
- tokens: []*token{
- newToken(tokenKindChar, '\\'),
- newToken(tokenKindChar, '.'),
- newToken(tokenKindChar, '*'),
- newToken(tokenKindChar, '+'),
- newToken(tokenKindChar, '?'),
- newToken(tokenKindChar, '|'),
- newToken(tokenKindChar, '('),
- newToken(tokenKindChar, ')'),
- newToken(tokenKindChar, '['),
- newToken(tokenKindEOF, nullChar),
- },
- },
- {
- caption: "], {, and } are treated as an ordinary character in default mode",
- src: "]{}",
- tokens: []*token{
- newToken(tokenKindChar, ']'),
- newToken(tokenKindChar, '{'),
- newToken(tokenKindChar, '}'),
- newToken(tokenKindEOF, nullChar),
- },
- },
- {
- caption: "lexer can recognize the special characters in bracket expression mode",
- src: "[a-z\\u{09AF}][^a-z\\u{09abcf}]",
- tokens: []*token{
- newToken(tokenKindBExpOpen, nullChar),
- newToken(tokenKindChar, 'a'),
- newToken(tokenKindCharRange, nullChar),
- newToken(tokenKindChar, 'z'),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("09AF"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindInverseBExpOpen, nullChar),
- newToken(tokenKindChar, 'a'),
- newToken(tokenKindCharRange, nullChar),
- newToken(tokenKindChar, 'z'),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("09abcf"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindEOF, nullChar),
- },
- },
- {
- caption: "lexer can recognize the escape sequences in bracket expression mode",
- src: "[\\^a\\-z]",
- tokens: []*token{
- newToken(tokenKindBExpOpen, nullChar),
- newToken(tokenKindChar, '^'),
- newToken(tokenKindChar, 'a'),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindChar, 'z'),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindEOF, nullChar),
- },
- },
- {
- caption: "in a bracket expression, the special characters are also handled as normal characters",
- src: "[\\\\.*+?|()[",
- tokens: []*token{
- newToken(tokenKindBExpOpen, nullChar),
- newToken(tokenKindChar, '\\'),
- newToken(tokenKindChar, '.'),
- newToken(tokenKindChar, '*'),
- newToken(tokenKindChar, '+'),
- newToken(tokenKindChar, '?'),
- newToken(tokenKindChar, '|'),
- newToken(tokenKindChar, '('),
- newToken(tokenKindChar, ')'),
- newToken(tokenKindChar, '['),
- newToken(tokenKindEOF, nullChar),
- },
- },
- {
- caption: "hyphen symbols that appear in bracket expressions are handled as the character range symbol or ordinary characters",
- // [...-...][...-][-...][-]
- // ~~~~~~~ ~ ~ ~
- // ^ ^ ^ ^
- // | | | `-- Ordinary Character (b)
- // | | `-- Ordinary Character (b)
- // | `-- Ordinary Character (b)
- // `-- Character Range (a)
- //
- // a. *-* is handled as a character-range expression.
- // b. *-, -*, or - are handled as ordinary characters.
- src: "[a-z][a-][-z][-][--][---][^a-z][^a-][^-z][^-][^--][^---]",
- tokens: []*token{
- newToken(tokenKindBExpOpen, nullChar),
- newToken(tokenKindChar, 'a'),
- newToken(tokenKindCharRange, nullChar),
- newToken(tokenKindChar, 'z'),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindBExpOpen, nullChar),
- newToken(tokenKindChar, 'a'),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindBExpOpen, nullChar),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindChar, 'z'),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindBExpOpen, nullChar),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindBExpOpen, nullChar),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindBExpOpen, nullChar),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindCharRange, nullChar),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindBExpClose, nullChar),
-
- newToken(tokenKindInverseBExpOpen, nullChar),
- newToken(tokenKindChar, 'a'),
- newToken(tokenKindCharRange, nullChar),
- newToken(tokenKindChar, 'z'),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindInverseBExpOpen, nullChar),
- newToken(tokenKindChar, 'a'),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindInverseBExpOpen, nullChar),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindChar, 'z'),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindInverseBExpOpen, nullChar),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindInverseBExpOpen, nullChar),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindInverseBExpOpen, nullChar),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindCharRange, nullChar),
- newToken(tokenKindChar, '-'),
- newToken(tokenKindBExpClose, nullChar),
-
- newToken(tokenKindEOF, nullChar),
- },
- },
- {
- caption: "caret symbols that appear in bracket expressions are handled as the logical inverse symbol or ordinary characters",
- // [^...^...][^]
- // ~~ ~ ~~
- // ^ ^ ^^
- // | | |`-- Ordinary Character (c)
- // | | `-- Bracket Expression
- // | `-- Ordinary Character (b)
- // `-- Inverse Bracket Expression (a)
- //
- // a. Bracket expressions that have a caret symbol at the beginning are handled as logical inverse expressions.
- // b. caret symbols that appear as the second and the subsequent symbols are handled as ordinary symbols.
- // c. When a bracket expression has just one symbol, a caret symbol at the beginning is handled as an ordinary character.
- src: "[^^][^]",
- tokens: []*token{
- newToken(tokenKindInverseBExpOpen, nullChar),
- newToken(tokenKindChar, '^'),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindBExpOpen, nullChar),
- newToken(tokenKindChar, '^'),
- newToken(tokenKindBExpClose, nullChar),
- newToken(tokenKindEOF, nullChar),
- },
- },
- {
- caption: "lexer raises an error when an invalid escape sequence appears",
- src: "\\@",
- err: synErrInvalidEscSeq,
- },
- {
- caption: "lexer raises an error when the incomplete escape sequence (EOF following \\) appears",
- src: "\\",
- err: synErrIncompletedEscSeq,
- },
- {
- caption: "lexer raises an error when an invalid escape sequence appears",
- src: "[\\@",
- tokens: []*token{
- newToken(tokenKindBExpOpen, nullChar),
- },
- err: synErrInvalidEscSeq,
- },
- {
- caption: "lexer raises an error when the incomplete escape sequence (EOF following \\) appears",
- src: "[\\",
- tokens: []*token{
- newToken(tokenKindBExpOpen, nullChar),
- },
- err: synErrIncompletedEscSeq,
- },
- {
- caption: "lexer can recognize the special characters and code points in code point expression mode",
- src: "\\u{0123}\\u{4567}\\u{89abcd}\\u{efAB}\\u{CDEF01}[\\u{0123}\\u{4567}\\u{89abcd}\\u{efAB}\\u{CDEF01}][^\\u{0123}\\u{4567}\\u{89abcd}\\u{efAB}\\u{CDEF01}]",
- tokens: []*token{
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("0123"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("4567"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("89abcd"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("efAB"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("CDEF01"),
- newToken(tokenKindRBrace, nullChar),
-
- newToken(tokenKindBExpOpen, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("0123"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("4567"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("89abcd"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("efAB"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("CDEF01"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindBExpClose, nullChar),
-
- newToken(tokenKindInverseBExpOpen, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("0123"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("4567"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("89abcd"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("efAB"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("CDEF01"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindBExpClose, nullChar),
-
- newToken(tokenKindEOF, nullChar),
- },
- },
- {
- caption: "a one digit hex string isn't a valid code point",
- src: "\\u{0",
- tokens: []*token{
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- },
- err: synErrInvalidCodePoint,
- },
- {
- caption: "a two digits hex string isn't a valid code point",
- src: "\\u{01",
- tokens: []*token{
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- },
- err: synErrInvalidCodePoint,
- },
- {
- caption: "a three digits hex string isn't a valid code point",
- src: "\\u{012",
- tokens: []*token{
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- },
- err: synErrInvalidCodePoint,
- },
- {
- caption: "a four digits hex string is a valid code point",
- src: "\\u{0123}",
- tokens: []*token{
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("0123"),
- newToken(tokenKindRBrace, nullChar),
- },
- },
- {
- caption: "a five digits hex string isn't a valid code point",
- src: "\\u{01234",
- tokens: []*token{
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- },
- err: synErrInvalidCodePoint,
- },
- {
- caption: "a six digits hex string is a valid code point",
- src: "\\u{012345}",
- tokens: []*token{
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCodePointToken("012345"),
- newToken(tokenKindRBrace, nullChar),
- },
- },
- {
- caption: "a seven digits hex string isn't a valid code point",
- src: "\\u{0123456",
- tokens: []*token{
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- },
- err: synErrInvalidCodePoint,
- },
- {
- caption: "a code point must be hex digits",
- src: "\\u{g",
- tokens: []*token{
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- },
- err: synErrInvalidCodePoint,
- },
- {
- caption: "a code point must be hex digits",
- src: "\\u{G",
- tokens: []*token{
- newToken(tokenKindCodePointLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- },
- err: synErrInvalidCodePoint,
- },
- {
- caption: "lexer can recognize the special characters and symbols in character property expression mode",
- src: "\\p{Letter}\\p{General_Category=Letter}[\\p{Letter}\\p{General_Category=Letter}][^\\p{Letter}\\p{General_Category=Letter}]",
- tokens: []*token{
- newToken(tokenKindCharPropLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCharPropSymbolToken("Letter"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCharPropLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCharPropSymbolToken("General_Category"),
- newToken(tokenKindEqual, nullChar),
- newCharPropSymbolToken("Letter"),
- newToken(tokenKindRBrace, nullChar),
-
- newToken(tokenKindBExpOpen, nullChar),
- newToken(tokenKindCharPropLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCharPropSymbolToken("Letter"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCharPropLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCharPropSymbolToken("General_Category"),
- newToken(tokenKindEqual, nullChar),
- newCharPropSymbolToken("Letter"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindBExpClose, nullChar),
-
- newToken(tokenKindInverseBExpOpen, nullChar),
- newToken(tokenKindCharPropLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCharPropSymbolToken("Letter"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindCharPropLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newCharPropSymbolToken("General_Category"),
- newToken(tokenKindEqual, nullChar),
- newCharPropSymbolToken("Letter"),
- newToken(tokenKindRBrace, nullChar),
- newToken(tokenKindBExpClose, nullChar),
-
- newToken(tokenKindEOF, nullChar),
- },
- },
- {
- caption: "lexer can recognize the special characters and symbols in fragment expression mode",
- src: "\\f{integer}",
- tokens: []*token{
- newToken(tokenKindFragmentLeader, nullChar),
- newToken(tokenKindLBrace, nullChar),
- newFragmentSymbolToken("integer"),
- newToken(tokenKindRBrace, nullChar),
-
- newToken(tokenKindEOF, nullChar),
- },
- },
- {
- caption: "a fragment expression is not supported in a bracket expression",
- src: "[\\f",
- tokens: []*token{
- newToken(tokenKindBExpOpen, nullChar),
- },
- err: synErrInvalidEscSeq,
- },
- {
- caption: "a fragment expression is not supported in an inverse bracket expression",
- src: "[^\\f",
- tokens: []*token{
- newToken(tokenKindInverseBExpOpen, nullChar),
- },
- err: synErrInvalidEscSeq,
- },
- }
- for _, tt := range tests {
- t.Run(tt.caption, func(t *testing.T) {
- lex := newLexer(strings.NewReader(tt.src))
- var err error
- var tok *token
- i := 0
- for {
- tok, err = lex.next()
- if err != nil {
- break
- }
- if i >= len(tt.tokens) {
- break
- }
- eTok := tt.tokens[i]
- i++
- testToken(t, tok, eTok)
-
- if tok.kind == tokenKindEOF {
- break
- }
- }
- if tt.err != nil {
- if err != ParseErr {
- t.Fatalf("unexpected error: want: %v, got: %v", ParseErr, err)
- }
- detail, cause := lex.error()
- if cause != tt.err {
- t.Fatalf("unexpected error: want: %v, got: %v (%v)", tt.err, cause, detail)
- }
- } else {
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- }
- if i < len(tt.tokens) {
- t.Fatalf("expecte more tokens")
- }
- })
- }
-}
-
-func testToken(t *testing.T, a, e *token) {
- t.Helper()
- if e.kind != a.kind || e.char != a.char || e.codePoint != a.codePoint {
- t.Fatalf("unexpected token: want: %+v, got: %+v", e, a)
- }
-}
diff --git a/grammar/lexical/parser/parser.go b/grammar/lexical/parser/parser.go
deleted file mode 100644
index d1a08a2..0000000
--- a/grammar/lexical/parser/parser.go
+++ /dev/null
@@ -1,531 +0,0 @@
-package parser
-
-import (
- "bytes"
- "fmt"
- "io"
- "strconv"
-
- spec "spec/grammar"
- "ucd"
-)
-
-type PatternEntry struct {
- ID spec.LexModeKindID
- Pattern []byte
-}
-
-type parser struct {
- kind spec.LexKindName
- lex *lexer
- peekedTok *token
- lastTok *token
-
- // If and only if isContributoryPropertyExposed is true, the parser interprets contributory properties that
- // appear in property expressions.
- //
- // The contributory properties are not exposed, and users cannot use those properties because the parser
- // follows [UAX #44 5.13 Property APIs]. For instance, \p{Other_Alphabetic} is invalid.
- //
- // isContributoryPropertyExposed is set to true when the parser is generated recursively. The parser needs to
- // interpret derived properties internally because the derived properties consist of other properties that
- // may contain the contributory properties.
- //
- // [UAX #44 5.13 Property APIs] says:
- // > The following subtypes of Unicode character properties should generally not be exposed in APIs,
- // > except in limited circumstances. They may not be useful, particularly in public API collections,
- // > and may instead prove misleading to the users of such API collections.
- // > * Contributory properties are not recommended for public APIs.
- // > ...
- // https://unicode.org/reports/tr44/#Property_APIs
- isContributoryPropertyExposed bool
-
- errCause error
- errDetail string
-}
-
-func NewParser(kind spec.LexKindName, src io.Reader) *parser {
- return &parser{
- kind: kind,
- lex: newLexer(src),
- isContributoryPropertyExposed: false,
- }
-}
-
-func (p *parser) exposeContributoryProperty() {
- p.isContributoryPropertyExposed = true
-}
-
-func (p *parser) Error() (string, error) {
- return p.errDetail, p.errCause
-}
-
-func (p *parser) Parse() (root CPTree, retErr error) {
- defer func() {
- err := recover()
- if err != nil {
- var ok bool
- retErr, ok = err.(error)
- if !ok {
- panic(err)
- }
- return
- }
- }()
-
- return newRootNode(p.kind, p.parseRegexp()), nil
-}
-
-func (p *parser) parseRegexp() CPTree {
- alt := p.parseAlt()
- if alt == nil {
- if p.consume(tokenKindGroupClose) {
- p.raiseParseError(synErrGroupNoInitiator, "")
- }
- p.raiseParseError(synErrNullPattern, "")
- }
- if p.consume(tokenKindGroupClose) {
- p.raiseParseError(synErrGroupNoInitiator, "")
- }
- p.expect(tokenKindEOF)
- return alt
-}
-
-func (p *parser) parseAlt() CPTree {
- left := p.parseConcat()
- if left == nil {
- if p.consume(tokenKindAlt) {
- p.raiseParseError(synErrAltLackOfOperand, "")
- }
- return nil
- }
- for {
- if !p.consume(tokenKindAlt) {
- break
- }
- right := p.parseConcat()
- if right == nil {
- p.raiseParseError(synErrAltLackOfOperand, "")
- }
- left = newAltNode(left, right)
- }
- return left
-}
-
-func (p *parser) parseConcat() CPTree {
- left := p.parseRepeat()
- for {
- right := p.parseRepeat()
- if right == nil {
- break
- }
- left = newConcatNode(left, right)
- }
- return left
-}
-
-func (p *parser) parseRepeat() CPTree {
- group := p.parseGroup()
- if group == nil {
- if p.consume(tokenKindRepeat) {
- p.raiseParseError(synErrRepNoTarget, "* needs an operand")
- }
- if p.consume(tokenKindRepeatOneOrMore) {
- p.raiseParseError(synErrRepNoTarget, "+ needs an operand")
- }
- if p.consume(tokenKindOption) {
- p.raiseParseError(synErrRepNoTarget, "? needs an operand")
- }
- return nil
- }
- if p.consume(tokenKindRepeat) {
- return newRepeatNode(group)
- }
- if p.consume(tokenKindRepeatOneOrMore) {
- return newRepeatOneOrMoreNode(group)
- }
- if p.consume(tokenKindOption) {
- return newOptionNode(group)
- }
- return group
-}
-
-func (p *parser) parseGroup() CPTree {
- if p.consume(tokenKindGroupOpen) {
- alt := p.parseAlt()
- if alt == nil {
- if p.consume(tokenKindEOF) {
- p.raiseParseError(synErrGroupUnclosed, "")
- }
- p.raiseParseError(synErrGroupNoElem, "")
- }
- if p.consume(tokenKindEOF) {
- p.raiseParseError(synErrGroupUnclosed, "")
- }
- if !p.consume(tokenKindGroupClose) {
- p.raiseParseError(synErrGroupInvalidForm, "")
- }
- return alt
- }
- return p.parseSingleChar()
-}
-
-func (p *parser) parseSingleChar() CPTree {
- if p.consume(tokenKindAnyChar) {
- return genAnyCharAST()
- }
- if p.consume(tokenKindBExpOpen) {
- left := p.parseBExpElem()
- if left == nil {
- if p.consume(tokenKindEOF) {
- p.raiseParseError(synErrBExpUnclosed, "")
- }
- p.raiseParseError(synErrBExpNoElem, "")
- }
- for {
- right := p.parseBExpElem()
- if right == nil {
- break
- }
- left = newAltNode(left, right)
- }
- if p.consume(tokenKindEOF) {
- p.raiseParseError(synErrBExpUnclosed, "")
- }
- p.expect(tokenKindBExpClose)
- return left
- }
- if p.consume(tokenKindInverseBExpOpen) {
- elem := p.parseBExpElem()
- if elem == nil {
- if p.consume(tokenKindEOF) {
- p.raiseParseError(synErrBExpUnclosed, "")
- }
- p.raiseParseError(synErrBExpNoElem, "")
- }
- inverse := exclude(elem, genAnyCharAST())
- if inverse == nil {
- p.raiseParseError(synErrUnmatchablePattern, "")
- }
- for {
- elem := p.parseBExpElem()
- if elem == nil {
- break
- }
- inverse = exclude(elem, inverse)
- if inverse == nil {
- p.raiseParseError(synErrUnmatchablePattern, "")
- }
- }
- if p.consume(tokenKindEOF) {
- p.raiseParseError(synErrBExpUnclosed, "")
- }
- p.expect(tokenKindBExpClose)
- return inverse
- }
- if p.consume(tokenKindCodePointLeader) {
- return p.parseCodePoint()
- }
- if p.consume(tokenKindCharPropLeader) {
- return p.parseCharProp()
- }
- if p.consume(tokenKindFragmentLeader) {
- return p.parseFragment()
- }
- c := p.parseNormalChar()
- if c == nil {
- if p.consume(tokenKindBExpClose) {
- p.raiseParseError(synErrBExpInvalidForm, "")
- }
- return nil
- }
- return c
-}
-
-func (p *parser) parseBExpElem() CPTree {
- var left CPTree
- switch {
- case p.consume(tokenKindCodePointLeader):
- left = p.parseCodePoint()
- case p.consume(tokenKindCharPropLeader):
- left = p.parseCharProp()
- if p.consume(tokenKindCharRange) {
- p.raiseParseError(synErrRangePropIsUnavailable, "")
- }
- default:
- left = p.parseNormalChar()
- }
- if left == nil {
- return nil
- }
- if !p.consume(tokenKindCharRange) {
- return left
- }
- var right CPTree
- switch {
- case p.consume(tokenKindCodePointLeader):
- right = p.parseCodePoint()
- case p.consume(tokenKindCharPropLeader):
- p.raiseParseError(synErrRangePropIsUnavailable, "")
- default:
- right = p.parseNormalChar()
- }
- if right == nil {
- p.raiseParseError(synErrRangeInvalidForm, "")
- }
- from, _, _ := left.Range()
- _, to, _ := right.Range()
- if !isValidOrder(from, to) {
- p.raiseParseError(synErrRangeInvalidOrder, fmt.Sprintf("%X..%X", from, to))
- }
- return newRangeSymbolNode(from, to)
-}
-
-func (p *parser) parseCodePoint() CPTree {
- if !p.consume(tokenKindLBrace) {
- p.raiseParseError(synErrCPExpInvalidForm, "")
- }
- if !p.consume(tokenKindCodePoint) {
- p.raiseParseError(synErrCPExpInvalidForm, "")
- }
-
- n, err := strconv.ParseInt(p.lastTok.codePoint, 16, 64)
- if err != nil {
- panic(fmt.Errorf("failed to decode a code point (%v) into a int: %v", p.lastTok.codePoint, err))
- }
- if n < 0x0000 || n > 0x10FFFF {
- p.raiseParseError(synErrCPExpOutOfRange, "")
- }
-
- sym := newSymbolNode(rune(n))
-
- if !p.consume(tokenKindRBrace) {
- p.raiseParseError(synErrCPExpInvalidForm, "")
- }
-
- return sym
-}
-
-func (p *parser) parseCharProp() CPTree {
- if !p.consume(tokenKindLBrace) {
- p.raiseParseError(synErrCharPropExpInvalidForm, "")
- }
- var sym1, sym2 string
- if !p.consume(tokenKindCharPropSymbol) {
- p.raiseParseError(synErrCharPropExpInvalidForm, "")
- }
- sym1 = p.lastTok.propSymbol
- if p.consume(tokenKindEqual) {
- if !p.consume(tokenKindCharPropSymbol) {
- p.raiseParseError(synErrCharPropExpInvalidForm, "")
- }
- sym2 = p.lastTok.propSymbol
- }
-
- var alt CPTree
- var propName, propVal string
- if sym2 != "" {
- propName = sym1
- propVal = sym2
- } else {
- propName = ""
- propVal = sym1
- }
- if !p.isContributoryPropertyExposed && ucd.IsContributoryProperty(propName) {
- p.raiseParseError(synErrCharPropUnsupported, propName)
- }
- pat, err := ucd.NormalizeCharacterProperty(propName, propVal)
- if err != nil {
- p.raiseParseError(synErrCharPropUnsupported, err.Error())
- }
- if pat != "" {
- p := NewParser(p.kind, bytes.NewReader([]byte(pat)))
- p.exposeContributoryProperty()
- ast, err := p.Parse()
- if err != nil {
- panic(err)
- }
- alt = ast
- } else {
- cpRanges, inverse, err := ucd.FindCodePointRanges(propName, propVal)
- if err != nil {
- p.raiseParseError(synErrCharPropUnsupported, err.Error())
- }
- if inverse {
- r := cpRanges[0]
- alt = exclude(newRangeSymbolNode(r.From, r.To), genAnyCharAST())
- if alt == nil {
- p.raiseParseError(synErrUnmatchablePattern, "")
- }
- for _, r := range cpRanges[1:] {
- alt = exclude(newRangeSymbolNode(r.From, r.To), alt)
- if alt == nil {
- p.raiseParseError(synErrUnmatchablePattern, "")
- }
- }
- } else {
- for _, r := range cpRanges {
- alt = genAltNode(
- alt,
- newRangeSymbolNode(r.From, r.To),
- )
- }
- }
- }
-
- if !p.consume(tokenKindRBrace) {
- p.raiseParseError(synErrCharPropExpInvalidForm, "")
- }
-
- return alt
-}
-
-func (p *parser) parseFragment() CPTree {
- if !p.consume(tokenKindLBrace) {
- p.raiseParseError(synErrFragmentExpInvalidForm, "")
- }
- if !p.consume(tokenKindFragmentSymbol) {
- p.raiseParseError(synErrFragmentExpInvalidForm, "")
- }
- sym := p.lastTok.fragmentSymbol
-
- if !p.consume(tokenKindRBrace) {
- p.raiseParseError(synErrFragmentExpInvalidForm, "")
- }
-
- return newFragmentNode(spec.LexKindName(sym), nil)
-}
-
-func (p *parser) parseNormalChar() CPTree {
- if !p.consume(tokenKindChar) {
- return nil
- }
- return newSymbolNode(p.lastTok.char)
-}
-
-func exclude(symbol, base CPTree) CPTree {
- if left, right, ok := symbol.Alternatives(); ok {
- return exclude(right, exclude(left, base))
- }
-
- if left, right, ok := base.Alternatives(); ok {
- return genAltNode(
- exclude(symbol, left),
- exclude(symbol, right),
- )
- }
-
- if bFrom, bTo, ok := base.Range(); ok {
- sFrom, sTo, ok := symbol.Range()
- if !ok {
- panic(fmt.Errorf("invalid symbol tree: %T", symbol))
- }
-
- switch {
- case sFrom > bFrom && sTo < bTo:
- return genAltNode(
- newRangeSymbolNode(bFrom, sFrom-1),
- newRangeSymbolNode(sTo+1, bTo),
- )
- case sFrom <= bFrom && sTo >= bFrom && sTo < bTo:
- return newRangeSymbolNode(sTo+1, bTo)
- case sFrom > bFrom && sFrom <= bTo && sTo >= bTo:
- return newRangeSymbolNode(bFrom, sFrom-1)
- case sFrom <= bFrom && sTo >= bTo:
- return nil
- default:
- return base
- }
- }
-
- panic(fmt.Errorf("invalid base tree: %T", base))
-}
-
-func genAnyCharAST() CPTree {
- return newRangeSymbolNode(0x0, 0x10FFFF)
-}
-
-func isValidOrder(from, to rune) bool {
- return from <= to
-}
-
-func genConcatNode(cs ...CPTree) CPTree {
- nonNilNodes := []CPTree{}
- for _, c := range cs {
- if c == nil {
- continue
- }
- nonNilNodes = append(nonNilNodes, c)
- }
- if len(nonNilNodes) <= 0 {
- return nil
- }
- if len(nonNilNodes) == 1 {
- return nonNilNodes[0]
- }
- concat := newConcatNode(nonNilNodes[0], nonNilNodes[1])
- for _, c := range nonNilNodes[2:] {
- concat = newConcatNode(concat, c)
- }
- return concat
-}
-
-func genAltNode(cs ...CPTree) CPTree {
- nonNilNodes := []CPTree{}
- for _, c := range cs {
- if c == nil {
- continue
- }
- nonNilNodes = append(nonNilNodes, c)
- }
- if len(nonNilNodes) <= 0 {
- return nil
- }
- if len(nonNilNodes) == 1 {
- return nonNilNodes[0]
- }
- alt := newAltNode(nonNilNodes[0], nonNilNodes[1])
- for _, c := range nonNilNodes[2:] {
- alt = newAltNode(alt, c)
- }
- return alt
-}
-
-func (p *parser) expect(expected tokenKind) {
- if !p.consume(expected) {
- tok := p.peekedTok
- p.raiseParseError(synErrUnexpectedToken, fmt.Sprintf("expected: %v, actual: %v", expected, tok.kind))
- }
-}
-
-func (p *parser) consume(expected tokenKind) bool {
- var tok *token
- var err error
- if p.peekedTok != nil {
- tok = p.peekedTok
- p.peekedTok = nil
- } else {
- tok, err = p.lex.next()
- if err != nil {
- if err == ParseErr {
- detail, cause := p.lex.error()
- p.raiseParseError(cause, detail)
- }
- panic(err)
- }
- }
- p.lastTok = tok
- if tok.kind == expected {
- return true
- }
- p.peekedTok = tok
- p.lastTok = nil
-
- return false
-}
-
-func (p *parser) raiseParseError(err error, detail string) {
- p.errCause = err
- p.errDetail = detail
- panic(ParseErr)
-}
diff --git a/grammar/lexical/parser/parser_test.go b/grammar/lexical/parser/parser_test.go
deleted file mode 100644
index e876d3b..0000000
--- a/grammar/lexical/parser/parser_test.go
+++ /dev/null
@@ -1,1389 +0,0 @@
-package parser
-
-import (
- "fmt"
- "reflect"
- "strings"
- "testing"
-
- spec "spec/grammar"
- "ucd"
-)
-
-func TestParse(t *testing.T) {
- tests := []struct {
- pattern string
- fragments map[spec.LexKindName]string
- ast CPTree
- syntaxError error
-
- // When an AST is large, as patterns containing a character property expression, this test only checks
- // that the pattern is parsable. The check of the validity of such AST is performed by checking that it
- // can be matched correctly using the driver.
- skipTestAST bool
- }{
- {
- pattern: "a",
- ast: newSymbolNode('a'),
- },
- {
- pattern: "abc",
- ast: genConcatNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- },
- {
- pattern: "a?",
- ast: newOptionNode(
- newSymbolNode('a'),
- ),
- },
- {
- pattern: "[abc]?",
- ast: newOptionNode(
- genAltNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- ),
- },
- {
- pattern: "\\u{3042}?",
- ast: newOptionNode(
- newSymbolNode('\u3042'),
- ),
- },
- {
- pattern: "\\p{Letter}?",
- skipTestAST: true,
- },
- {
- pattern: "\\f{a2c}?",
- fragments: map[spec.LexKindName]string{
- "a2c": "abc",
- },
- ast: newOptionNode(
- newFragmentNode("a2c",
- genConcatNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- ),
- ),
- },
- {
- pattern: "(a)?",
- ast: newOptionNode(
- newSymbolNode('a'),
- ),
- },
- {
- pattern: "((a?)?)?",
- ast: newOptionNode(
- newOptionNode(
- newOptionNode(
- newSymbolNode('a'),
- ),
- ),
- ),
- },
- {
- pattern: "(abc)?",
- ast: newOptionNode(
- genConcatNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- ),
- },
- {
- pattern: "(a|b)?",
- ast: newOptionNode(
- genAltNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- ),
- ),
- },
- {
- pattern: "?",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: "(?)",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: "a|?",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: "?|b",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: "a??",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: "a*",
- ast: newRepeatNode(
- newSymbolNode('a'),
- ),
- },
- {
- pattern: "[abc]*",
- ast: newRepeatNode(
- genAltNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- ),
- },
- {
- pattern: "\\u{3042}*",
- ast: newRepeatNode(
- newSymbolNode('\u3042'),
- ),
- },
- {
- pattern: "\\p{Letter}*",
- skipTestAST: true,
- },
- {
- pattern: "\\f{a2c}*",
- fragments: map[spec.LexKindName]string{
- "a2c": "abc",
- },
- ast: newRepeatNode(
- newFragmentNode("a2c",
- genConcatNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- ),
- ),
- },
- {
- pattern: "((a*)*)*",
- ast: newRepeatNode(
- newRepeatNode(
- newRepeatNode(
- newSymbolNode('a'),
- ),
- ),
- ),
- },
- {
- pattern: "(abc)*",
- ast: newRepeatNode(
- genConcatNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- ),
- },
- {
- pattern: "(a|b)*",
- ast: newRepeatNode(
- genAltNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- ),
- ),
- },
- {
- pattern: "*",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: "(*)",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: "a|*",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: "*|b",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: "a**",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: "a+",
- ast: genConcatNode(
- newSymbolNode('a'),
- newRepeatNode(
- newSymbolNode('a'),
- ),
- ),
- },
- {
- pattern: "[abc]+",
- ast: genConcatNode(
- genAltNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- newRepeatNode(
- genAltNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- ),
- ),
- },
- {
- pattern: "\\u{3042}+",
- ast: genConcatNode(
- newSymbolNode('\u3042'),
- newRepeatNode(
- newSymbolNode('\u3042'),
- ),
- ),
- },
- {
- pattern: "\\p{Letter}+",
- skipTestAST: true,
- },
- {
- pattern: "\\f{a2c}+",
- fragments: map[spec.LexKindName]string{
- "a2c": "abc",
- },
- ast: genConcatNode(
- newFragmentNode("a2c",
- genConcatNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- ),
- newRepeatNode(
- newFragmentNode("a2c",
- genConcatNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- ),
- ),
- ),
- },
- {
- pattern: "((a+)+)+",
- ast: genConcatNode(
- genConcatNode(
- genConcatNode(
- genConcatNode(
- newSymbolNode('a'),
- newRepeatNode(
- newSymbolNode('a'),
- ),
- ),
- newRepeatNode(
- genConcatNode(
- newSymbolNode('a'),
- newRepeatNode(
- newSymbolNode('a'),
- ),
- ),
- ),
- ),
- newRepeatNode(
- genConcatNode(
- genConcatNode(
- newSymbolNode('a'),
- newRepeatNode(
- newSymbolNode('a'),
- ),
- ),
- newRepeatNode(
- genConcatNode(
- newSymbolNode('a'),
- newRepeatNode(
- newSymbolNode('a'),
- ),
- ),
- ),
- ),
- ),
- ),
- ),
- },
- {
- pattern: "(abc)+",
- ast: genConcatNode(
- genConcatNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- newRepeatNode(
- genConcatNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- ),
- ),
- },
- {
- pattern: "(a|b)+",
- ast: genConcatNode(
- genAltNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- ),
- newRepeatNode(
- genAltNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- ),
- ),
- ),
- },
- {
- pattern: "+",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: "(+)",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: "a|+",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: "+|b",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: "a++",
- syntaxError: synErrRepNoTarget,
- },
- {
- pattern: ".",
- ast: newRangeSymbolNode(0x00, 0x10FFFF),
- },
- {
- pattern: "[a]",
- ast: newSymbolNode('a'),
- },
- {
- pattern: "[abc]",
- ast: genAltNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- },
- {
- pattern: "[a-z]",
- ast: newRangeSymbolNode('a', 'z'),
- },
- {
- pattern: "[A-Za-z]",
- ast: genAltNode(
- newRangeSymbolNode('A', 'Z'),
- newRangeSymbolNode('a', 'z'),
- ),
- },
- {
- pattern: "[\\u{004E}]",
- ast: newSymbolNode('N'),
- },
- {
- pattern: "[\\u{0061}-\\u{007A}]",
- ast: newRangeSymbolNode('a', 'z'),
- },
- {
- pattern: "[\\p{Lu}]",
- skipTestAST: true,
- },
- {
- pattern: "[a-\\p{Lu}]",
- syntaxError: synErrRangePropIsUnavailable,
- },
- {
- pattern: "[\\p{Lu}-z]",
- syntaxError: synErrRangePropIsUnavailable,
- },
- {
- pattern: "[\\p{Lu}-\\p{Ll}]",
- syntaxError: synErrRangePropIsUnavailable,
- },
- {
- pattern: "[z-a]",
- syntaxError: synErrRangeInvalidOrder,
- },
- {
- pattern: "a[]",
- syntaxError: synErrBExpNoElem,
- },
- {
- pattern: "[]a",
- syntaxError: synErrBExpNoElem,
- },
- {
- pattern: "[]",
- syntaxError: synErrBExpNoElem,
- },
- {
- pattern: "[^\\u{004E}]",
- ast: genAltNode(
- newRangeSymbolNode(0x00, '\u004E'-1),
- newRangeSymbolNode('\u004E'+1, 0x10FFFF),
- ),
- },
- {
- pattern: "[^\\u{0061}-\\u{007A}]",
- ast: genAltNode(
- newRangeSymbolNode(0x00, '\u0061'-1),
- newRangeSymbolNode('\u007A'+1, 0x10FFFF),
- ),
- },
- {
- pattern: "[^\\p{Lu}]",
- skipTestAST: true,
- },
- {
- pattern: "[^a-\\p{Lu}]",
- syntaxError: synErrRangePropIsUnavailable,
- },
- {
- pattern: "[^\\p{Lu}-z]",
- syntaxError: synErrRangePropIsUnavailable,
- },
- {
- pattern: "[^\\p{Lu}-\\p{Ll}]",
- syntaxError: synErrRangePropIsUnavailable,
- },
- {
- pattern: "[^\\u{0000}-\\u{10FFFF}]",
- syntaxError: synErrUnmatchablePattern,
- },
- {
- pattern: "[^\\u{0000}-\\u{FFFF}\\u{010000}-\\u{10FFFF}]",
- syntaxError: synErrUnmatchablePattern,
- },
- {
- pattern: "[^]",
- ast: newSymbolNode('^'),
- },
- {
- pattern: "[",
- syntaxError: synErrBExpUnclosed,
- },
- {
- pattern: "([",
- syntaxError: synErrBExpUnclosed,
- },
- {
- pattern: "[a",
- syntaxError: synErrBExpUnclosed,
- },
- {
- pattern: "([a",
- syntaxError: synErrBExpUnclosed,
- },
- {
- pattern: "[a-",
- syntaxError: synErrBExpUnclosed,
- },
- {
- pattern: "([a-",
- syntaxError: synErrBExpUnclosed,
- },
- {
- pattern: "[^",
- syntaxError: synErrBExpUnclosed,
- },
- {
- pattern: "([^",
- syntaxError: synErrBExpUnclosed,
- },
- {
- pattern: "[^a",
- syntaxError: synErrBExpUnclosed,
- },
- {
- pattern: "([^a",
- syntaxError: synErrBExpUnclosed,
- },
- {
- pattern: "[^a-",
- syntaxError: synErrBExpUnclosed,
- },
- {
- pattern: "([^a-",
- syntaxError: synErrBExpUnclosed,
- },
- {
- pattern: "]",
- ast: newSymbolNode(']'),
- },
- {
- pattern: "(]",
- syntaxError: synErrGroupUnclosed,
- },
- {
- pattern: "a]",
- ast: genConcatNode(
- newSymbolNode('a'),
- newSymbolNode(']'),
- ),
- },
- {
- pattern: "(a]",
- syntaxError: synErrGroupUnclosed,
- },
- {
- pattern: "([)",
- syntaxError: synErrBExpUnclosed,
- },
- {
- pattern: "([a)",
- syntaxError: synErrBExpUnclosed,
- },
- {
- pattern: "[a-]",
- ast: genAltNode(
- newSymbolNode('a'),
- newSymbolNode('-'),
- ),
- },
- {
- pattern: "[^a-]",
- ast: genAltNode(
- newRangeSymbolNode(0x00, 0x2C),
- newRangeSymbolNode(0x2E, 0x60),
- newRangeSymbolNode(0x62, 0x10FFFF),
- ),
- },
- {
- pattern: "[-z]",
- ast: genAltNode(
- newSymbolNode('-'),
- newSymbolNode('z'),
- ),
- },
- {
- pattern: "[^-z]",
- ast: newAltNode(
- newRangeSymbolNode(0x00, 0x2C),
- newAltNode(
- newRangeSymbolNode(0x2E, 0x79),
- newRangeSymbolNode(0x7B, 0x10FFFF),
- ),
- ),
- },
- {
- pattern: "[-]",
- ast: newSymbolNode('-'),
- },
- {
- pattern: "[^-]",
- ast: genAltNode(
- newRangeSymbolNode(0x00, 0x2C),
- newRangeSymbolNode(0x2E, 0x10FFFF),
- ),
- },
- {
- pattern: "[^01]",
- ast: genAltNode(
- newRangeSymbolNode(0x00, '0'-1),
- newRangeSymbolNode('1'+1, 0x10FFFF),
- ),
- },
- {
- pattern: "[^10]",
- ast: genAltNode(
- newRangeSymbolNode(0x00, '0'-1),
- newRangeSymbolNode('1'+1, 0x10FFFF),
- ),
- },
- {
- pattern: "[^a-z]",
- ast: genAltNode(
- newRangeSymbolNode(0x00, 'a'-1),
- newRangeSymbolNode('z'+1, 0x10FFFF),
- ),
- },
- {
- pattern: "[^az]",
- ast: genAltNode(
- newRangeSymbolNode(0x00, 'a'-1),
- genAltNode(
- newRangeSymbolNode('a'+1, 'z'-1),
- newRangeSymbolNode('z'+1, 0x10FFFF),
- ),
- ),
- },
- {
- pattern: "\\u{006E}",
- ast: newSymbolNode('\u006E'),
- },
- {
- pattern: "\\u{03BD}",
- ast: newSymbolNode('\u03BD'),
- },
- {
- pattern: "\\u{306B}",
- ast: newSymbolNode('\u306B'),
- },
- {
- pattern: "\\u{01F638}",
- ast: newSymbolNode('\U0001F638'),
- },
- {
- pattern: "\\u{0000}",
- ast: newSymbolNode('\u0000'),
- },
- {
- pattern: "\\u{10FFFF}",
- ast: newSymbolNode('\U0010FFFF'),
- },
- {
- pattern: "\\u{110000}",
- syntaxError: synErrCPExpOutOfRange,
- },
- {
- pattern: "\\u",
- syntaxError: synErrCPExpInvalidForm,
- },
- {
- pattern: "\\u{",
- syntaxError: synErrCPExpInvalidForm,
- },
- {
- pattern: "\\u{03BD",
- syntaxError: synErrCPExpInvalidForm,
- },
- {
- pattern: "\\u{}",
- syntaxError: synErrCPExpInvalidForm,
- },
- {
- pattern: "\\p{Letter}",
- skipTestAST: true,
- },
- {
- pattern: "\\p{General_Category=Letter}",
- skipTestAST: true,
- },
- {
- pattern: "\\p{ Letter }",
- skipTestAST: true,
- },
- {
- pattern: "\\p{ General_Category = Letter }",
- skipTestAST: true,
- },
- {
- pattern: "\\p",
- syntaxError: synErrCharPropExpInvalidForm,
- },
- {
- pattern: "\\p{",
- syntaxError: synErrCharPropExpInvalidForm,
- },
- {
- pattern: "\\p{Letter",
- syntaxError: synErrCharPropExpInvalidForm,
- },
- {
- pattern: "\\p{General_Category=}",
- syntaxError: synErrCharPropExpInvalidForm,
- },
- {
- pattern: "\\p{General_Category= }",
- syntaxError: synErrCharPropInvalidSymbol,
- },
- {
- pattern: "\\p{=Letter}",
- syntaxError: synErrCharPropExpInvalidForm,
- },
- {
- pattern: "\\p{ =Letter}",
- syntaxError: synErrCharPropInvalidSymbol,
- },
- {
- pattern: "\\p{=}",
- syntaxError: synErrCharPropExpInvalidForm,
- },
- {
- pattern: "\\p{}",
- syntaxError: synErrCharPropExpInvalidForm,
- },
- {
- pattern: "\\f{a2c}",
- fragments: map[spec.LexKindName]string{
- "a2c": "abc",
- },
- ast: newFragmentNode("a2c",
- genConcatNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- ),
- },
- {
- pattern: "\\f{ a2c }",
- fragments: map[spec.LexKindName]string{
- "a2c": "abc",
- },
- ast: newFragmentNode("a2c",
- genConcatNode(
- newSymbolNode('a'),
- newSymbolNode('b'),
- newSymbolNode('c'),
- ),
- ),
- },
- {
- pattern: "\\f",
- syntaxError: synErrFragmentExpInvalidForm,
- },
- {
- pattern: "\\f{",
- syntaxError: synErrFragmentExpInvalidForm,
- },
- {
- pattern: "\\f{a2c",
- fragments: map[spec.LexKindName]string{
- "a2c": "abc",
- },
- syntaxError: synErrFragmentExpInvalidForm,
- },
- {
- pattern: "(a)",
- ast: newSymbolNode('a'),
- },
- {
- pattern: "(((a)))",
- ast: newSymbolNode('a'),
- },
- {
- pattern: "a()",
- syntaxError: synErrGroupNoElem,
- },
- {
- pattern: "()a",
- syntaxError: synErrGroupNoElem,
- },
- {
- pattern: "()",
- syntaxError: synErrGroupNoElem,
- },
- {
- pattern: "(",
- syntaxError: synErrGroupUnclosed,
- },
- {
- pattern: "a(",
- syntaxError: synErrGroupUnclosed,
- },
- {
- pattern: "(a",
- syntaxError: synErrGroupUnclosed,
- },
- {
- pattern: "((",
- syntaxError: synErrGroupUnclosed,
- },
- {
- pattern: "((a)",
- syntaxError: synErrGroupUnclosed,
- },
- {
- pattern: ")",
- syntaxError: synErrGroupNoInitiator,
- },
- {
- pattern: "a)",
- syntaxError: synErrGroupNoInitiator,
- },
- {
- pattern: ")a",
- syntaxError: synErrGroupNoInitiator,
- },
- {
- pattern: "))",
- syntaxError: synErrGroupNoInitiator,
- },
- {
- pattern: "(a))",
- syntaxError: synErrGroupNoInitiator,
- },
- {
- pattern: "Mulder|Scully",
- ast: genAltNode(
- genConcatNode(
- newSymbolNode('M'),
- newSymbolNode('u'),
- newSymbolNode('l'),
- newSymbolNode('d'),
- newSymbolNode('e'),
- newSymbolNode('r'),
- ),
- genConcatNode(
- newSymbolNode('S'),
- newSymbolNode('c'),
- newSymbolNode('u'),
- newSymbolNode('l'),
- newSymbolNode('l'),
- newSymbolNode('y'),
- ),
- ),
- },
- {
- pattern: "Langly|Frohike|Byers",
- ast: genAltNode(
- genConcatNode(
- newSymbolNode('L'),
- newSymbolNode('a'),
- newSymbolNode('n'),
- newSymbolNode('g'),
- newSymbolNode('l'),
- newSymbolNode('y'),
- ),
- genConcatNode(
- newSymbolNode('F'),
- newSymbolNode('r'),
- newSymbolNode('o'),
- newSymbolNode('h'),
- newSymbolNode('i'),
- newSymbolNode('k'),
- newSymbolNode('e'),
- ),
- genConcatNode(
- newSymbolNode('B'),
- newSymbolNode('y'),
- newSymbolNode('e'),
- newSymbolNode('r'),
- newSymbolNode('s'),
- ),
- ),
- },
- {
- pattern: "|",
- syntaxError: synErrAltLackOfOperand,
- },
- {
- pattern: "||",
- syntaxError: synErrAltLackOfOperand,
- },
- {
- pattern: "Mulder|",
- syntaxError: synErrAltLackOfOperand,
- },
- {
- pattern: "|Scully",
- syntaxError: synErrAltLackOfOperand,
- },
- {
- pattern: "Langly|Frohike|",
- syntaxError: synErrAltLackOfOperand,
- },
- {
- pattern: "Langly||Byers",
- syntaxError: synErrAltLackOfOperand,
- },
- {
- pattern: "|Frohike|Byers",
- syntaxError: synErrAltLackOfOperand,
- },
- {
- pattern: "|Frohike|",
- syntaxError: synErrAltLackOfOperand,
- },
- {
- pattern: "Fox(|)Mulder",
- syntaxError: synErrAltLackOfOperand,
- },
- {
- pattern: "(Fox|)Mulder",
- syntaxError: synErrAltLackOfOperand,
- },
- {
- pattern: "Fox(|Mulder)",
- syntaxError: synErrAltLackOfOperand,
- },
- }
- for i, tt := range tests {
- t.Run(fmt.Sprintf("#%v %v", i, tt.pattern), func(t *testing.T) {
- fragmentTrees := map[spec.LexKindName]CPTree{}
- for kind, pattern := range tt.fragments {
- p := NewParser(kind, strings.NewReader(pattern))
- root, err := p.Parse()
- if err != nil {
- t.Fatal(err)
- }
-
- fragmentTrees[kind] = root
- }
- err := CompleteFragments(fragmentTrees)
- if err != nil {
- t.Fatal(err)
- }
-
- p := NewParser(spec.LexKindName("test"), strings.NewReader(tt.pattern))
- root, err := p.Parse()
- if tt.syntaxError != nil {
- // printCPTree(os.Stdout, root, "", "")
- if err != ParseErr {
- t.Fatalf("unexpected error: want: %v, got: %v", ParseErr, err)
- }
- _, synErr := p.Error()
- if synErr != tt.syntaxError {
- t.Fatalf("unexpected syntax error: want: %v, got: %v", tt.syntaxError, synErr)
- }
- if root != nil {
- t.Fatalf("tree must be nil")
- }
- } else {
- if err != nil {
- detail, cause := p.Error()
- t.Fatalf("%v: %v: %v", err, cause, detail)
- }
- if root == nil {
- t.Fatal("tree must be non-nil")
- }
-
- complete, err := ApplyFragments(root, fragmentTrees)
- if err != nil {
- t.Fatal(err)
- }
- if !complete {
- t.Fatalf("incomplete fragments")
- }
-
- // printCPTree(os.Stdout, root, "", "")
- if !tt.skipTestAST {
- r := root.(*rootNode)
- testAST(t, tt.ast, r.tree)
- }
- }
- })
- }
-}
-
-func TestParse_ContributoryPropertyIsNotExposed(t *testing.T) {
- for _, cProp := range ucd.ContributoryProperties() {
- t.Run(fmt.Sprintf("%v", cProp), func(t *testing.T) {
- p := NewParser(spec.LexKindName("test"), strings.NewReader(fmt.Sprintf(`\p{%v=yes}`, cProp)))
- root, err := p.Parse()
- if err == nil {
- t.Fatalf("expected syntax error: got: nil")
- }
- _, synErr := p.Error()
- if synErr != synErrCharPropUnsupported {
- t.Fatalf("unexpected syntax error: want: %v, got: %v", synErrCharPropUnsupported, synErr)
- }
- if root != nil {
- t.Fatalf("tree is not nil")
- }
- })
- }
-}
-
-func TestExclude(t *testing.T) {
- for _, test := range []struct {
- caption string
- target CPTree
- base CPTree
- result CPTree
- }{
- // t.From > b.From && t.To < b.To
-
- // |t.From - b.From| = 1
- // |b.To - t.To| = 1
- //
- // Target (t): +--+
- // Base (b): +--+--+--+
- // Result (b - t): +--+ +--+
- {
- caption: "|t.From - b.From| = 1 && |b.To - t.To| = 1",
- target: newSymbolNode('1'),
- base: newRangeSymbolNode('0', '2'),
- result: newAltNode(
- newSymbolNode('0'),
- newSymbolNode('2'),
- ),
- },
- // |t.From - b.From| > 1
- // |b.To - t.To| > 1
- //
- // Target (t): +--+
- // Base (b): +--+--+--+--+--+
- // Result (b - t): +--+--+ +--+--+
- {
- caption: "|t.From - b.From| > 1 && |b.To - t.To| > 1",
- target: newSymbolNode('2'),
- base: newRangeSymbolNode('0', '4'),
- result: newAltNode(
- newRangeSymbolNode('0', '1'),
- newRangeSymbolNode('3', '4'),
- ),
- },
-
- // t.From <= b.From && t.To >= b.From && t.To < b.To
-
- // |b.From - t.From| = 0
- // |t.To - b.From| = 0
- // |b.To - t.To| = 1
- //
- // Target (t): +--+
- // Base (b): +--+--+
- // Result (b - t): +--+
- {
- caption: "|b.From - t.From| = 0 && |t.To - b.From| = 0 && |b.To - t.To| = 1",
- target: newSymbolNode('0'),
- base: newRangeSymbolNode('0', '1'),
- result: newSymbolNode('1'),
- },
- // |b.From - t.From| = 0
- // |t.To - b.From| = 0
- // |b.To - t.To| > 1
- //
- // Target (t): +--+
- // Base (b): +--+--+--+
- // Result (b - t): +--+--+
- {
- caption: "|b.From - t.From| = 0 && |t.To - b.From| = 0 && |b.To - t.To| > 1",
- target: newSymbolNode('0'),
- base: newRangeSymbolNode('0', '2'),
- result: newRangeSymbolNode('1', '2'),
- },
- // |b.From - t.From| = 0
- // |t.To - b.From| > 0
- // |b.To - t.To| = 1
- //
- // Target (t): +--+--+
- // Base (b): +--+--+--+
- // Result (b - t): +--+
- {
- caption: "|b.From - t.From| = 0 && |t.To - b.From| > 0 && |b.To - t.To| = 1",
- target: newRangeSymbolNode('0', '1'),
- base: newRangeSymbolNode('0', '2'),
- result: newSymbolNode('2'),
- },
- // |b.From - t.From| = 0
- // |t.To - b.From| > 0
- // |b.To - t.To| > 1
- //
- // Target (t): +--+--+
- // Base (b): +--+--+--+--+
- // Result (b - t): +--+--+
- {
- caption: "|b.From - t.From| = 0 && |t.To - b.From| > 0 && |b.To - t.To| > 1",
- target: newRangeSymbolNode('0', '1'),
- base: newRangeSymbolNode('0', '3'),
- result: newRangeSymbolNode('2', '3'),
- },
- // |b.From - t.From| > 0
- // |t.To - b.From| = 0
- // |b.To - t.To| = 1
- //
- // Target (t): +--+--+
- // Base (b): +--+--+
- // Result (b - t): +--+
- {
- caption: "|b.From - t.From| > 0 && |t.To - b.From| = 0 && |b.To - t.To| = 1",
- target: newRangeSymbolNode('0', '1'),
- base: newRangeSymbolNode('1', '2'),
- result: newSymbolNode('2'),
- },
- // |b.From - t.From| > 0
- // |t.To - b.From| = 0
- // |b.To - t.To| > 1
- //
- // Target (t): +--+--+
- // Base (b): +--+--+--+
- // Result (b - t): +--+--+
- {
- caption: "|b.From - t.From| > 0 && |t.To - b.From| = 0 && |b.To - t.To| > 1",
- target: newRangeSymbolNode('0', '1'),
- base: newRangeSymbolNode('1', '3'),
- result: newRangeSymbolNode('2', '3'),
- },
- // |b.From - t.From| > 0
- // |t.To - b.From| > 0
- // |b.To - t.To| = 1
- //
- // Target (t): +--+--+--+
- // Base (b): +--+--+--+
- // Result (b - t): +--+
- {
- caption: "|b.From - t.From| > 0 && |t.To - b.From| > 0 && |b.To - t.To| = 1",
- target: newRangeSymbolNode('0', '2'),
- base: newRangeSymbolNode('1', '3'),
- result: newSymbolNode('3'),
- },
- // |b.From - t.From| > 0
- // |t.To - b.From| > 0
- // |b.To - t.To| > 1
- //
- // Target (t): +--+--+--+
- // Base (b): +--+--+--+--+
- // Result (b - t): +--+--+
- {
- caption: "|b.From - t.From| > 0 && |t.To - b.From| > 0 && |b.To - t.To| > 1",
- target: newRangeSymbolNode('0', '2'),
- base: newRangeSymbolNode('1', '4'),
- result: newRangeSymbolNode('3', '4'),
- },
-
- // t.From > b.From && t.From <= b.To && t.To >= b.To
-
- // |t.From - b.From| = 1
- // |b.To - t.From| = 0
- // |t.To - b.To| = 0
- //
- // Target (t): +--+
- // Base (b): +--+--+
- // Result (b - t): +--+
- {
- caption: "|t.From - b.From| = 1 && |b.To - t.From| = 0 && |t.To - b.To| = 0",
- target: newSymbolNode('1'),
- base: newRangeSymbolNode('0', '1'),
- result: newSymbolNode('0'),
- },
- // |t.From - b.From| = 1
- // |b.To - t.From| = 0
- // |t.To - b.To| > 0
- //
- // Target (t): +--+--+
- // Base (b): +--+--+
- // Result (b - t): +--+
- {
- caption: "|t.From - b.From| = 1 && |b.To - t.From| = 0 && |t.To - b.To| > 0",
- target: newRangeSymbolNode('1', '2'),
- base: newRangeSymbolNode('0', '1'),
- result: newSymbolNode('0'),
- },
- // |t.From - b.From| = 1
- // |b.To - t.From| > 0
- // |t.To - b.To| = 0
- //
- // Target (t): +--+--+
- // Base (b): +--+--+--+
- // Result (b - t): +--+
- {
- caption: "|t.From - b.From| = 1 && |b.To - t.From| > 0 && |t.To - b.To| = 0",
- target: newRangeSymbolNode('1', '2'),
- base: newRangeSymbolNode('0', '2'),
- result: newSymbolNode('0'),
- },
- // |t.From - b.From| = 1
- // |b.To - t.From| > 0
- // |t.To - b.To| > 0
- //
- // Target (t): +--+--+--+
- // Base (b): +--+--+--+
- // Result (b - t): +--+
- {
- caption: "|t.From - b.From| = 1 && |b.To - t.From| > 0 && |t.To - b.To| > 0",
- target: newRangeSymbolNode('1', '3'),
- base: newRangeSymbolNode('0', '2'),
- result: newSymbolNode('0'),
- },
- // |t.From - b.From| > 1
- // |b.To - t.From| = 0
- // |t.To - b.To| = 0
- //
- // Target (t): +--+
- // Base (b): +--+--+--+
- // Result (b - t): +--+--+
- {
- caption: "|t.From - b.From| > 1 && |b.To - t.From| = 0 && |t.To - b.To| = 0",
- target: newSymbolNode('2'),
- base: newRangeSymbolNode('0', '2'),
- result: newRangeSymbolNode('0', '1'),
- },
- // |t.From - b.From| > 1
- // |b.To - t.From| = 0
- // |t.To - b.To| > 0
- //
- // Target (t): +--+--+
- // Base (b): +--+--+--+
- // Result (b - t): +--+--+
- {
- caption: "|t.From - b.From| > 1 && |b.To - t.From| = 0 && |t.To - b.To| > 0",
- target: newRangeSymbolNode('2', '3'),
- base: newRangeSymbolNode('0', '2'),
- result: newRangeSymbolNode('0', '1'),
- },
- // |t.From - b.From| > 1
- // |b.To - t.From| > 0
- // |t.To - b.To| = 0
- //
- // Target (t): +--+--+
- // Base (b): +--+--+--+--+
- // Result (b - t): +--+--+
- {
- caption: "|t.From - b.From| > 1 && |b.To - t.From| > 0 && |t.To - b.To| = 0",
- target: newRangeSymbolNode('2', '3'),
- base: newRangeSymbolNode('0', '3'),
- result: newRangeSymbolNode('0', '1'),
- },
- // |t.From - b.From| > 1
- // |b.To - t.From| > 0
- // |t.To - b.To| > 0
- //
- // Target (t): +--+--+--+
- // Base (b): +--+--+--+--+
- // Result (b - t): +--+--+
- {
- caption: "|t.From - b.From| > 1 && |b.To - t.From| > 0 && |t.To - b.To| > 0",
- target: newRangeSymbolNode('2', '4'),
- base: newRangeSymbolNode('0', '3'),
- result: newRangeSymbolNode('0', '1'),
- },
-
- // t.From <= b.From && t.To >= b.To
-
- // |b.From - t.From| = 0
- // |t.To - b.To| = 0
- //
- // Target (t): +--+
- // Base (b): +--+
- // Result (b - t): N/A
- {
- caption: "|b.From - t.From| = 0 && |t.To - b.To| = 0",
- target: newSymbolNode('0'),
- base: newSymbolNode('0'),
- result: nil,
- },
- // |b.From - t.From| = 0
- // |t.To - b.To| > 0
- //
- // Target (t): +--+--+
- // Base (b): +--+
- // Result (b - t): N/A
- {
- caption: "|b.From - t.From| = 0 && |t.To - b.To| > 0",
- target: newRangeSymbolNode('0', '1'),
- base: newSymbolNode('0'),
- result: nil,
- },
- // |b.From - t.From| > 0
- // |t.To - b.To| = 0
- //
- // Target (t): +--+--+
- // Base (b): +--+
- // Result (b - t): N/A
- {
- caption: "|b.From - t.From| > 0 && |t.To - b.To| = 0",
- target: newRangeSymbolNode('0', '1'),
- base: newSymbolNode('1'),
- result: nil,
- },
- // |b.From - t.From| > 0
- // |t.To - b.To| > 0
- //
- // Target (t): +--+--+--+
- // Base (b): +--+
- // Result (b - t): N/A
- {
- caption: "|b.From - t.From| > 0 && |t.To - b.To| > 0",
- target: newRangeSymbolNode('0', '2'),
- base: newSymbolNode('1'),
- result: nil,
- },
-
- // Others
-
- // |b.From - t.From| = 1
- //
- // Target (t): +--+
- // Base (b): +--+
- // Result (b - t): +--+
- {
- caption: "|b.From - t.From| = 1",
- target: newSymbolNode('0'),
- base: newSymbolNode('1'),
- result: newSymbolNode('1'),
- },
- // |b.From - t.From| > 1
- //
- // Target (t): +--+
- // Base (b): +--+
- // Result (b - t): +--+
- {
- caption: "|b.From - t.From| > 1",
- target: newSymbolNode('0'),
- base: newSymbolNode('2'),
- result: newSymbolNode('2'),
- },
- // |t.To - b.To| = 1
- //
- // Target (t): +--+
- // Base (b): +--+
- // Result (b - t): +--+
- {
- caption: "|t.To - b.To| = 1",
- target: newSymbolNode('1'),
- base: newSymbolNode('0'),
- result: newSymbolNode('0'),
- },
- // |t.To - b.To| > 1
- //
- // Target (t): +--+
- // Base (b): +--+
- // Result (b - t): +--+
- {
- caption: "|t.To - b.To| > 1",
- target: newSymbolNode('2'),
- base: newSymbolNode('0'),
- result: newSymbolNode('0'),
- },
- } {
- t.Run(test.caption, func(t *testing.T) {
- r := exclude(test.target, test.base)
- testAST(t, test.result, r)
- })
- }
-}
-
-func testAST(t *testing.T, expected, actual CPTree) {
- t.Helper()
-
- aTy := reflect.TypeOf(actual)
- eTy := reflect.TypeOf(expected)
- if eTy != aTy {
- t.Fatalf("unexpected node: want: %+v, got: %+v", eTy, aTy)
- }
-
- if actual == nil {
- return
- }
-
- switch e := expected.(type) {
- case *symbolNode:
- a := actual.(*symbolNode)
- if a.From != e.From || a.To != e.To {
- t.Fatalf("unexpected node: want: %+v, got: %+v", e, a)
- }
- }
- eLeft, eRight := expected.children()
- aLeft, aRight := actual.children()
- testAST(t, eLeft, aLeft)
- testAST(t, eRight, aRight)
-}
diff --git a/grammar/lexical/parser/tree.go b/grammar/lexical/parser/tree.go
deleted file mode 100644
index 0d64e1d..0000000
--- a/grammar/lexical/parser/tree.go
+++ /dev/null
@@ -1,459 +0,0 @@
-package parser
-
-import (
- "fmt"
- "io"
- "sort"
-
- spec "spec/grammar"
-)
-
-type CPRange struct {
- From rune
- To rune
-}
-
-type CPTree interface {
- fmt.Stringer
- Range() (rune, rune, bool)
- Optional() (CPTree, bool)
- Repeatable() (CPTree, bool)
- Concatenation() (CPTree, CPTree, bool)
- Alternatives() (CPTree, CPTree, bool)
- Describe() (spec.LexKindName, []spec.LexKindName, error)
-
- children() (CPTree, CPTree)
- clone() CPTree
-}
-
-var (
- _ CPTree = &rootNode{}
- _ CPTree = &symbolNode{}
- _ CPTree = &concatNode{}
- _ CPTree = &altNode{}
- _ CPTree = &quantifierNode{}
- _ CPTree = &fragmentNode{}
-)
-
-type rootNode struct {
- kind spec.LexKindName
- tree CPTree
- fragments map[spec.LexKindName][]*fragmentNode
-}
-
-func newRootNode(kind spec.LexKindName, t CPTree) *rootNode {
- fragments := map[spec.LexKindName][]*fragmentNode{}
- collectFragments(t, fragments)
-
- return &rootNode{
- kind: kind,
- tree: t,
- fragments: fragments,
- }
-}
-
-func collectFragments(n CPTree, fragments map[spec.LexKindName][]*fragmentNode) {
- if n == nil {
- return
- }
-
- if f, ok := n.(*fragmentNode); ok {
- fragments[f.kind] = append(fragments[f.kind], f)
- return
- }
-
- l, r := n.children()
- collectFragments(l, fragments)
- collectFragments(r, fragments)
-}
-
-func (n *rootNode) String() string {
- return fmt.Sprintf("root: %v: %v fragments", n.kind, len(n.fragments))
-}
-
-func (n *rootNode) Range() (rune, rune, bool) {
- return n.tree.Range()
-}
-
-func (n *rootNode) Optional() (CPTree, bool) {
- return n.tree.Optional()
-}
-
-func (n *rootNode) Repeatable() (CPTree, bool) {
- return n.tree.Repeatable()
-}
-
-func (n *rootNode) Concatenation() (CPTree, CPTree, bool) {
- return n.tree.Concatenation()
-}
-
-func (n *rootNode) Alternatives() (CPTree, CPTree, bool) {
- return n.tree.Alternatives()
-}
-
-func (n *rootNode) Describe() (spec.LexKindName, []spec.LexKindName, error) {
- var frags []spec.LexKindName
- for f := range n.fragments {
- frags = append(frags, spec.LexKindName(f))
- }
- sort.Slice(frags, func(i, j int) bool {
- return frags[i] < frags[j]
- })
-
- return n.kind, frags, nil
-}
-
-func (n *rootNode) children() (CPTree, CPTree) {
- return n.tree.children()
-}
-
-func (n *rootNode) clone() CPTree {
- return n.tree.clone()
-}
-
-func (n *rootNode) incomplete() bool {
- return len(n.fragments) > 0
-}
-
-func (n *rootNode) applyFragment(kind spec.LexKindName, fragment CPTree) error {
- root, ok := fragment.(*rootNode)
- if !ok {
- return fmt.Errorf("applyFragment can take only *rootNode: %T", fragment)
- }
- if root.incomplete() {
- return fmt.Errorf("fragment is incomplete")
- }
-
- fs, ok := n.fragments[kind]
- if !ok {
- return nil
- }
- for _, f := range fs {
- f.tree = root.clone()
- }
- delete(n.fragments, kind)
-
- return nil
-}
-
-type symbolNode struct {
- CPRange
-}
-
-func newSymbolNode(cp rune) *symbolNode {
- return &symbolNode{
- CPRange: CPRange{
- From: cp,
- To: cp,
- },
- }
-}
-
-func newRangeSymbolNode(from, to rune) *symbolNode {
- return &symbolNode{
- CPRange: CPRange{
- From: from,
- To: to,
- },
- }
-}
-
-func (n *symbolNode) String() string {
- return fmt.Sprintf("symbol: %X..%X", n.From, n.To)
-}
-
-func (n *symbolNode) Range() (rune, rune, bool) {
- return n.From, n.To, true
-}
-
-func (n *symbolNode) Optional() (CPTree, bool) {
- return nil, false
-}
-
-func (n *symbolNode) Repeatable() (CPTree, bool) {
- return nil, false
-}
-
-func (n *symbolNode) Concatenation() (CPTree, CPTree, bool) {
- return nil, nil, false
-}
-
-func (n *symbolNode) Alternatives() (CPTree, CPTree, bool) {
- return nil, nil, false
-}
-
-func (n *symbolNode) Describe() (spec.LexKindName, []spec.LexKindName, error) {
- return spec.LexKindNameNil, nil, fmt.Errorf("%T cannot describe", n)
-}
-
-func (n *symbolNode) children() (CPTree, CPTree) {
- return nil, nil
-}
-
-func (n *symbolNode) clone() CPTree {
- return newRangeSymbolNode(n.From, n.To)
-}
-
-type concatNode struct {
- left CPTree
- right CPTree
-}
-
-func newConcatNode(left, right CPTree) *concatNode {
- return &concatNode{
- left: left,
- right: right,
- }
-}
-
-func (n *concatNode) String() string {
- return "concat"
-}
-
-func (n *concatNode) Range() (rune, rune, bool) {
- return 0, 0, false
-}
-
-func (n *concatNode) Optional() (CPTree, bool) {
- return nil, false
-}
-
-func (n *concatNode) Repeatable() (CPTree, bool) {
- return nil, false
-}
-
-func (n *concatNode) Concatenation() (CPTree, CPTree, bool) {
- return n.left, n.right, true
-}
-
-func (n *concatNode) Alternatives() (CPTree, CPTree, bool) {
- return nil, nil, false
-}
-
-func (n *concatNode) Describe() (spec.LexKindName, []spec.LexKindName, error) {
- return spec.LexKindNameNil, nil, fmt.Errorf("%T cannot describe", n)
-}
-
-func (n *concatNode) children() (CPTree, CPTree) {
- return n.left, n.right
-}
-
-func (n *concatNode) clone() CPTree {
- if n == nil {
- return nil
- }
- return newConcatNode(n.left.clone(), n.right.clone())
-}
-
-type altNode struct {
- left CPTree
- right CPTree
-}
-
-func newAltNode(left, right CPTree) *altNode {
- return &altNode{
- left: left,
- right: right,
- }
-}
-
-func (n *altNode) String() string {
- return "alt"
-}
-
-func (n *altNode) Range() (rune, rune, bool) {
- return 0, 0, false
-}
-
-func (n *altNode) Optional() (CPTree, bool) {
- return nil, false
-}
-
-func (n *altNode) Repeatable() (CPTree, bool) {
- return nil, false
-}
-
-func (n *altNode) Concatenation() (CPTree, CPTree, bool) {
- return nil, nil, false
-}
-
-func (n *altNode) Alternatives() (CPTree, CPTree, bool) {
- return n.left, n.right, true
-}
-
-func (n *altNode) Describe() (spec.LexKindName, []spec.LexKindName, error) {
- return spec.LexKindNameNil, nil, fmt.Errorf("%T cannot describe", n)
-}
-
-func (n *altNode) children() (CPTree, CPTree) {
- return n.left, n.right
-}
-
-func (n *altNode) clone() CPTree {
- return newAltNode(n.left.clone(), n.right.clone())
-}
-
-type quantifierNode struct {
- optional bool
- repeatable bool
- tree CPTree
-}
-
-func (n *quantifierNode) String() string {
- switch {
- case n.repeatable:
- return "repeatable (>= 0 times)"
- case n.optional:
- return "optional (0 or 1 times)"
- default:
- return "invalid quantifier"
- }
-}
-
-func newRepeatNode(t CPTree) *quantifierNode {
- return &quantifierNode{
- repeatable: true,
- tree: t,
- }
-}
-
-func newRepeatOneOrMoreNode(t CPTree) *concatNode {
- return newConcatNode(
- t,
- &quantifierNode{
- repeatable: true,
- tree: t.clone(),
- })
-}
-
-func newOptionNode(t CPTree) *quantifierNode {
- return &quantifierNode{
- optional: true,
- tree: t,
- }
-}
-
-func (n *quantifierNode) Range() (rune, rune, bool) {
- return 0, 0, false
-}
-
-func (n *quantifierNode) Optional() (CPTree, bool) {
- return n.tree, n.optional
-}
-
-func (n *quantifierNode) Repeatable() (CPTree, bool) {
- return n.tree, n.repeatable
-}
-
-func (n *quantifierNode) Concatenation() (CPTree, CPTree, bool) {
- return nil, nil, false
-}
-
-func (n *quantifierNode) Alternatives() (CPTree, CPTree, bool) {
- return nil, nil, false
-}
-
-func (n *quantifierNode) Describe() (spec.LexKindName, []spec.LexKindName, error) {
- return spec.LexKindNameNil, nil, fmt.Errorf("%T cannot describe", n)
-}
-
-func (n *quantifierNode) children() (CPTree, CPTree) {
- return n.tree, nil
-}
-
-func (n *quantifierNode) clone() CPTree {
- if n.repeatable {
- return newRepeatNode(n.tree.clone())
- }
- return newOptionNode(n.tree.clone())
-}
-
-type fragmentNode struct {
- kind spec.LexKindName
- tree CPTree
-}
-
-func newFragmentNode(kind spec.LexKindName, t CPTree) *fragmentNode {
- return &fragmentNode{
- kind: kind,
- tree: t,
- }
-}
-
-func (n *fragmentNode) String() string {
- return fmt.Sprintf("fragment: %v", n.kind)
-}
-
-func (n *fragmentNode) Range() (rune, rune, bool) {
- return n.tree.Range()
-}
-
-func (n *fragmentNode) Optional() (CPTree, bool) {
- return n.tree.Optional()
-}
-
-func (n *fragmentNode) Repeatable() (CPTree, bool) {
- return n.tree.Repeatable()
-}
-
-func (n *fragmentNode) Concatenation() (CPTree, CPTree, bool) {
- return n.tree.Concatenation()
-}
-
-func (n *fragmentNode) Alternatives() (CPTree, CPTree, bool) {
- return n.tree.Alternatives()
-}
-
-func (n *fragmentNode) Describe() (spec.LexKindName, []spec.LexKindName, error) {
- return spec.LexKindNameNil, nil, fmt.Errorf("%T cannot describe", n)
-}
-
-func (n *fragmentNode) children() (CPTree, CPTree) {
- return n.tree.children()
-}
-
-func (n *fragmentNode) clone() CPTree {
- if n.tree == nil {
- return newFragmentNode(n.kind, nil)
- }
- return newFragmentNode(n.kind, n.tree.clone())
-}
-
-//nolint:unused
-func printCPTree(w io.Writer, t CPTree, ruledLine string, childRuledLinePrefix string) {
- if t == nil {
- return
- }
- fmt.Fprintf(w, "%v%v\n", ruledLine, t)
- children := []CPTree{}
- switch n := t.(type) {
- case *rootNode:
- children = append(children, n.tree)
- case *fragmentNode:
- children = append(children, n.tree)
- default:
- left, right := t.children()
- if left != nil {
- children = append(children, left)
- }
- if right != nil {
- children = append(children, right)
- }
- }
- num := len(children)
- for i, child := range children {
- line := "└─ "
- if num > 1 {
- if i == 0 {
- line = "├─ "
- } else if i < num-1 {
- line = "│ "
- }
- }
- prefix := "│ "
- if i >= num-1 {
- prefix = " "
- }
- printCPTree(w, child, childRuledLinePrefix+line, childRuledLinePrefix+prefix)
- }
-}