[pypy-svn] r20808 - in pypy/dist/pypy/interpreter/pyparser: . test
adim at codespeak.net
adim at codespeak.net
Tue Dec 6 19:12:36 CET 2005
Author: adim
Date: Tue Dec 6 19:12:33 2005
New Revision: 20808
Modified:
pypy/dist/pypy/interpreter/pyparser/ebnfparse.py
pypy/dist/pypy/interpreter/pyparser/grammar.py
pypy/dist/pypy/interpreter/pyparser/test/test_lookahead.py
Log:
fix issue167 (KleenStar becomes KleeneStar)
Modified: pypy/dist/pypy/interpreter/pyparser/ebnfparse.py
==============================================================================
--- pypy/dist/pypy/interpreter/pyparser/ebnfparse.py (original)
+++ pypy/dist/pypy/interpreter/pyparser/ebnfparse.py Tue Dec 6 19:12:33 2005
@@ -1,6 +1,6 @@
#!/usr/bin/env python
from grammar import BaseGrammarBuilder, Alternative, Sequence, Token, \
- KleenStar, GrammarElement, build_first_sets, EmptyToken
+ KleeneStar, GrammarElement, build_first_sets, EmptyToken
from ebnflexer import GrammarSource
from syntaxtree import AbstractSyntaxVisitor
import pytoken
@@ -182,7 +182,7 @@
def handle_option( self, node ):
rule = node.nodes[1].visit(self)
- return self.new_item( KleenStar( self.new_symbol(), 0, 1, rule ) )
+ return self.new_item( KleeneStar( self.new_symbol(), 0, 1, rule ) )
def handle_group( self, node ):
rule = node.nodes[1].visit(self)
@@ -214,10 +214,10 @@
rule_name = self.new_symbol()
tok = star_opt.nodes[0].nodes[0]
if tok.value == '+':
- item = KleenStar(rule_name, _min=1, rule=myrule)
+ item = KleeneStar(rule_name, _min=1, rule=myrule)
return self.new_item(item)
elif tok.value == '*':
- item = KleenStar(rule_name, _min=0, rule=myrule)
+ item = KleeneStar(rule_name, _min=0, rule=myrule)
return self.new_item(item)
else:
raise SyntaxError("Got symbol star_opt with value='%s'"
@@ -260,7 +260,7 @@
S = g_add_symbol
# star: '*' | '+'
star = Alternative( S("star"), [Token(S('*')), Token(S('+'))] )
- star_opt = KleenStar ( S("star_opt"), 0, 1, rule=star )
+ star_opt = KleeneStar ( S("star_opt"), 0, 1, rule=star )
# rule: SYMBOL ':' alternative
symbol = Sequence( S("symbol"), [Token(S('SYMBOL')), star_opt] )
@@ -269,12 +269,12 @@
rule = Sequence( S("rule"), [symboldef, alternative] )
# grammar: rule+
- grammar = KleenStar( S("grammar"), _min=1, rule=rule )
+ grammar = KleeneStar( S("grammar"), _min=1, rule=rule )
# alternative: sequence ( '|' sequence )*
- sequence = KleenStar( S("sequence"), 1 )
+ sequence = KleeneStar( S("sequence"), 1 )
seq_cont_list = Sequence( S("seq_cont_list"), [Token(S('|')), sequence] )
- sequence_cont = KleenStar( S("sequence_cont"),0, rule=seq_cont_list )
+ sequence_cont = KleeneStar( S("sequence_cont"),0, rule=seq_cont_list )
alternative.args = [ sequence, sequence_cont ]
Modified: pypy/dist/pypy/interpreter/pyparser/grammar.py
==============================================================================
--- pypy/dist/pypy/interpreter/pyparser/grammar.py (original)
+++ pypy/dist/pypy/interpreter/pyparser/grammar.py Tue Dec 6 19:12:33 2005
@@ -4,7 +4,7 @@
the objects of the grammar are :
Alternative : as in S -> A | B | C
Sequence : as in S -> A B C
-KleenStar : as in S -> A* or S -> A+
+KleeneStar : as in S -> A* or S -> A+
Token : a lexer token
"""
@@ -122,7 +122,7 @@
#
# we use the term root for a grammar rule to specify rules that are given a name
# by the grammar
-# a rule like S -> A B* is mapped as Sequence( SCODE, KleenStar(-3, B))
+# a rule like S -> A B* is mapped as Sequence( SCODE, KleeneStar(-3, B))
# so S is a root and the subrule describing B* is not.
# SCODE is the numerical value for rule "S"
@@ -190,9 +190,9 @@
return True
-######################################################################
-# Grammar Elements Classes (Alternative, Sequence, KleenStar, Token) #
-######################################################################
+#######################################################################
+# Grammar Elements Classes (Alternative, Sequence, KleeneStar, Token) #
+#######################################################################
class GrammarElement(object):
"""Base parser class"""
@@ -515,14 +515,14 @@
return True
-class KleenStar(GrammarElement):
- """Represents a KleenStar in a grammar rule as in (S -> A+) or (S -> A*)"""
+class KleeneStar(GrammarElement):
+ """Represents a KleeneStar in a grammar rule as in (S -> A+) or (S -> A*)"""
def __init__(self, name, _min = 0, _max = -1, rule=None):
GrammarElement.__init__( self, name )
self.args = [rule]
self.min = _min
if _max == 0:
- raise ValueError("KleenStar needs max==-1 or max>1")
+ raise ValueError("KleeneStar needs max==-1 or max>1")
self.max = _max
self.star = "x"
if self.min == 0:
Modified: pypy/dist/pypy/interpreter/pyparser/test/test_lookahead.py
==============================================================================
--- pypy/dist/pypy/interpreter/pyparser/test/test_lookahead.py (original)
+++ pypy/dist/pypy/interpreter/pyparser/test/test_lookahead.py Tue Dec 6 19:12:33 2005
@@ -1,4 +1,4 @@
-from pypy.interpreter.pyparser.grammar import Alternative, Sequence, KleenStar, \
+from pypy.interpreter.pyparser.grammar import Alternative, Sequence, KleeneStar, \
Token, EmptyToken, build_first_sets
class TestLookAheadBasics:
@@ -32,10 +32,10 @@
def test_basic_kleenstar(self):
tok1, tok2, tok3 = self.tokens
- kstar = KleenStar(self.nextid(), 1, 3, tok1)
+ kstar = KleeneStar(self.nextid(), 1, 3, tok1)
build_first_sets([kstar])
assert kstar.first_set == [tok1]
- kstar = KleenStar(self.nextid(), 0, 3, tok1)
+ kstar = KleeneStar(self.nextid(), 0, 3, tok1)
build_first_sets([kstar])
assert kstar.first_set == [tok1, EmptyToken]
@@ -45,8 +45,8 @@
==> S.first_set = [tok1, tok2, EmptyToken]
"""
tok1, tok2, tok3 = self.tokens
- k1 = KleenStar(self.nextid(), 0, 2, tok1)
- k2 = KleenStar(self.nextid(), 0, 2, tok2)
+ k1 = KleeneStar(self.nextid(), 0, 2, tok1)
+ k2 = KleeneStar(self.nextid(), 0, 2, tok2)
seq = Sequence(self.nextid(), [k1, k2])
build_first_sets([k1, k2, seq])
assert seq.first_set == [tok1, tok2, EmptyToken]
@@ -57,8 +57,8 @@
==> S.first_set = [tok1, tok2]
"""
tok1, tok2, tok3 = self.tokens
- k1 = KleenStar(self.nextid(), 0, 2, tok1)
- k2 = KleenStar(self.nextid(), 1, 2, tok2)
+ k1 = KleeneStar(self.nextid(), 0, 2, tok1)
+ k2 = KleeneStar(self.nextid(), 1, 2, tok2)
seq = Sequence(self.nextid(), [k1, k2])
build_first_sets([k1, k2, seq])
assert seq.first_set == [tok1, tok2]
@@ -83,8 +83,8 @@
self.LOW = Token(LOW, 'low')
self.CAP = Token(CAP ,'cap')
self.A = Alternative(R_A, [])
- k1 = KleenStar(R_k1, 0, rule=self.LOW)
- k2 = KleenStar(R_k2, 0, rule=self.CAP)
+ k1 = KleeneStar(R_k1, 0, rule=self.LOW)
+ k2 = KleeneStar(R_k2, 0, rule=self.CAP)
self.B = Sequence(R_B, [k1, self.A])
self.C = Sequence(R_C, [k2, self.A])
self.A.args = [self.B, self.C]
More information about the Pypy-commit
mailing list