[pypy-svn] r48879 - in pypy/branch/dist-future-fixing/pypy/interpreter: pyparser pyparser/test test
ac at codespeak.net
ac at codespeak.net
Tue Nov 20 22:03:49 CET 2007
Author: ac
Date: Tue Nov 20 22:03:48 2007
New Revision: 48879
Removed:
pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_astbuilder_future.py
Modified:
pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/astbuilder.py
pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfgrammar.py
pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnflexer.py
pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfparse.py
pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/future.py
pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/grammar.py
pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pythonlexer.py
pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pytoken.py
pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_lookahead.py
pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_parser.py
pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_pytokenizer.py
pypy/branch/dist-future-fixing/pypy/interpreter/test/test_compiler.py
Log:
(jacob, arre): More work on making from __future__ support more sane.
Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/astbuilder.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/astbuilder.py (original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/astbuilder.py Tue Nov 20 22:03:48 2007
@@ -764,7 +764,6 @@
else_ = atoms[6]
builder.push(ast.While(test, body, else_, atoms[0].lineno))
-
def build_with_stmt(builder, nb):
"""with_stmt: 'with' test [ NAME expr ] ':' suite"""
@@ -783,7 +782,6 @@
body = atoms[5]
builder.push(ast.With(test, body, var, atoms[0].lineno))
-
def build_import_name(builder, nb):
"""import_name: 'import' dotted_as_names
@@ -838,7 +836,6 @@
import_as_name: NAME [NAME NAME]
"""
atoms = get_atoms(builder, nb)
-
index = 1
incr, from_name = parse_dotted_names(atoms[index:], builder)
index += (incr + 1) # skip 'import'
@@ -882,42 +879,6 @@
builder.push(ast.From(from_name, names, atoms[0].lineno))
-def build_future_import_feature(builder, nb):
- """
- future_import_feature: NAME [('as'|NAME) NAME]
-
- Enables python language future imports. Called once per feature imported,
- no matter how you got to this one particular feature.
- """
-
- atoms = peek_atoms(builder, nb)
-
- feature_name = atoms[0].get_value()
- assert type(feature_name) is str
- space = builder.space
- feature_code = space.unwrap(space.appexec([space.wrap(feature_name)],
- """(feature):
- import __future__ as f
- feature = getattr(f, feature, None)
- return feature and feature.compiler_flag or 0
- """))
-
- # We will call a method on the parser (the method exists only in unit
- # tests).
- if feature_code == consts.CO_FUTURE_WITH_STATEMENT:
- rules = """
- compound_stmt: (if_stmt | while_stmt | for_stmt | try_stmt |
- funcdef | classdef | with_stmt)
- with_stmt: 'with' test [ 'as' expr ] ':' suite
- """
- builder.insert_grammar_rule(rules, {
- 'with_stmt': build_with_stmt})
-
- # We need to keep the rule on the stack so we can share atoms
- # with a later rule
- return True
-
-
def build_yield_stmt(builder, nb):
atoms = get_atoms(builder, nb)
builder.push(ast.Yield(atoms[1], atoms[0].lineno))
@@ -1089,6 +1050,7 @@
'exprlist' : build_exprlist,
'decorator' : build_decorator,
'eval_input' : build_eval_input,
+ 'with_stmt' : build_with_stmt,
}
@@ -1110,11 +1072,6 @@
## self.with_enabled = False
self.build_rules = ASTRULES_Template
self.user_build_rules = {}
- if grammar_version >= "2.5":
- self.build_rules.update({
- 'future_import_feature': build_future_import_feature,
- 'import_from_future': build_import_from,
- })
## def enable_with(self):
## if self.with_enabled:
@@ -1157,7 +1114,9 @@
self.push(astnode)
else:
builder_func = self.build_rules.get(rulename, None)
- if not builder_func or builder_func(self, 1):
+ if builder_func:
+ builder_func(self, 1)
+ else:
self.push_rule(rule.codename, 1, source)
else:
self.push_rule(rule.codename, 1, source)
@@ -1177,7 +1136,9 @@
self.push(astnode)
else:
builder_func = self.build_rules.get(rulename, None)
- if not builder_func or builder_func(self, elts_number):
+ if builder_func:
+ builder_func(self, elts_number)
+ else:
self.push_rule(rule.codename, elts_number, source)
else:
self.push_rule(rule.codename, elts_number, source)
@@ -1228,14 +1189,6 @@
else:
return None
- def insert_grammar_rule(self, rule, buildfuncs):
- """Inserts new grammar rules for the builder
- This allows to change the rules during the parsing
- """
- self.parser.insert_rule(rule)
- self.build_rules.update(buildfuncs)
-
-
def show_stack(before, after):
"""debugging helper function"""
size1 = len(before)
Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfgrammar.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfgrammar.py (original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfgrammar.py Tue Nov 20 22:03:48 2007
@@ -27,7 +27,7 @@
group: '(' alternative ')' star?
"""
p = GRAMMAR_GRAMMAR
- p.add_token(Token('EOF','EOF'))
+ p.add_token(Token('EOF'))
# star: '*' | '+'
star = p.Alternative_n( "star", [p.Token_n('TOK_STAR', '*'), p.Token_n('TOK_ADD', '+')] )
Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnflexer.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnflexer.py (original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnflexer.py Tue Nov 20 22:03:48 2007
@@ -136,7 +136,7 @@
end = len(self.input)
pos = self.skip_empty_lines(inp,pos,end)
if pos==end:
- return Token(_p, _p.EOF, None)
+ return Token(_p.EOF, None)
# at this point nextchar is not a white space nor \n
nextchr = inp[pos]
@@ -148,22 +148,22 @@
self.pos = npos
_endpos = npos - 1
assert _endpos>=0
- return Token(_p, _p.TOK_STRING, inp[pos+1:_endpos])
+ return Token(_p.TOK_STRING, inp[pos+1:_endpos])
else:
npos = match_symbol( inp, pos, end)
if npos!=pos:
self.pos = npos
if npos!=end and inp[npos]==":":
self.pos += 1
- return Token(_p, _p.TOK_SYMDEF, inp[pos:npos])
+ return Token(_p.TOK_SYMDEF, inp[pos:npos])
else:
- return Token(_p, _p.TOK_SYMBOL, inp[pos:npos])
+ return Token(_p.TOK_SYMBOL, inp[pos:npos])
# we still have pos!=end here
chr = inp[pos]
if chr in "[]()*+|":
self.pos = pos+1
- return Token(_p, _p.tok_values[chr], chr)
+ return Token(_p.tok_values[chr], chr)
self.RaiseError( "Unknown token" )
def peek(self):
Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfparse.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfparse.py (original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfparse.py Tue Nov 20 22:03:48 2007
@@ -41,7 +41,7 @@
class NameToken(Token):
"""A token that is not a keyword"""
def __init__(self, parser, keywords=None):
- Token.__init__(self, parser, parser.tokens['NAME'])
+ Token.__init__(self, parser.tokens['NAME'])
self.keywords = keywords
def match(self, source, builder, level=0):
@@ -105,7 +105,7 @@
self.current_rule_name = ""
self.tokens = {}
self.keywords = []
- NAME = dest_parser.add_token(Token(dest_parser, 'NAME'))
+ NAME = dest_parser.add_token(Token('NAME'))
# NAME = dest_parser.tokens['NAME']
self.tokens[NAME] = NameToken(dest_parser, keywords=self.keywords)
@@ -158,7 +158,7 @@
"""Returns a new or existing Token"""
if codename in self.tokens:
return self.tokens[codename]
- token = self.tokens[codename] = Token(self.parser, codename, None)
+ token = self.tokens[codename] = Token(codename, None)
return token
def get_symbolcode(self, name):
@@ -273,12 +273,12 @@
if value in self.parser.tok_values:
# punctuation
tokencode = self.parser.tok_values[value]
- tok = Token(self.parser, tokencode, None)
+ tok = Token(tokencode, None)
else:
if not is_py_name(value):
raise RuntimeError("Unknown STRING value ('%s')" % value)
# assume a keyword
- tok = Token(self.parser, self.parser.tokens['NAME'], value)
+ tok = Token(self.parser.tokens['NAME'], value)
if value not in self.keywords:
self.keywords.append(value)
self.rule_stack.append(tok)
Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/future.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/future.py (original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/future.py Tue Nov 20 22:03:48 2007
@@ -276,7 +276,7 @@
# and that we have a hacked __future__ module.
from pypy.config.pypyoption import get_pypy_config
config = get_pypy_config(translating=False)
-if config.objspace.pyversion == '2.4':
+if config.objspace.pyversion == '2.4' and False:
futureFlags = FutureFlags((2, 4, 4, 'final', 0))
else:
futureFlags = FutureFlags((2, 5, 0, 'final', 0))
Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/grammar.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/grammar.py (original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/grammar.py Tue Nov 20 22:03:48 2007
@@ -655,9 +655,8 @@
class Token(GrammarElement):
"""Represents a Token in a grammar rule (a lexer token)"""
- def __init__(self, parser, codename, value=None):
+ def __init__(self, codename, value=None):
GrammarElement.__init__(self, codename)
- self.parser = parser
self.value = value
self.first_set = [self]
# self.first_set = {self: 1}
@@ -693,11 +692,10 @@
return 0
def display(self, level=0):
- name = self.parser.symbol_repr( self.codename )
if self.value is None:
- return "<%s>" % name
+ return "<%s>" % self.codename
else:
- return "<%s>=='%s'" % (name, self.value)
+ return "<%s>=='%s'" % (self.codename, self.value)
def match_token(self, builder, other):
"""convenience '==' implementation, this is *not* a *real* equality test
@@ -738,7 +736,7 @@
return False
-EmptyToken = Token(None, -1, None)
+EmptyToken = Token(-1, None)
class Parser(object):
def __init__(self):
@@ -875,9 +873,9 @@
# XXX What is the significance of the name_id? Needs to be found
# out for full refactoring of this code.
- t = Token(self, name, value)
+ t = Token(name, value)
name_id = self.add_token(t)
- return Token(self, name_id, value)
+ return Token(name_id, value)
# Debugging functions
def show_rules(self, name):
Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pythonlexer.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pythonlexer.py (original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pythonlexer.py Tue Nov 20 22:03:48 2007
@@ -121,7 +121,7 @@
endmatch = endDFA.recognize(line)
if endmatch >= 0:
pos = end = endmatch
- tok = Token(parser, parser.tokens['STRING'], contstr + line[:end])
+ tok = Token(parser.tokens['STRING'], contstr + line[:end])
token_list.append((tok, line, lnum, pos))
last_comment = ''
# token_list.append((STRING, contstr + line[:end],
@@ -130,7 +130,7 @@
contline = None
elif (needcont and not line.endswith('\\\n') and
not line.endswith('\\\r\n')):
- tok = Token(parser, parser.tokens['ERRORTOKEN'], contstr + line)
+ tok = Token(parser.tokens['ERRORTOKEN'], contstr + line)
token_list.append((tok, line, lnum, pos))
last_comment = ''
# token_list.append((ERRORTOKEN, contstr + line,
@@ -161,10 +161,10 @@
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
- tok = Token(parser, parser.tokens['COMMENT'], line[pos:])
+ tok = Token(parser.tokens['COMMENT'], line[pos:])
last_comment = line[pos:]
else:
- tok = Token(parser, parser.tokens['NL'], line[pos:])
+ tok = Token(parser.tokens['NL'], line[pos:])
last_comment = ''
# XXX Skip NL and COMMENT Tokens
# token_list.append((tok, line, lnum, pos))
@@ -172,12 +172,12 @@
if column > indents[-1]: # count indents or dedents
indents.append(column)
- tok = Token(parser, parser.tokens['INDENT'], line[:pos])
+ tok = Token(parser.tokens['INDENT'], line[:pos])
token_list.append((tok, line, lnum, pos))
last_comment = ''
while column < indents[-1]:
indents = indents[:-1]
- tok = Token(parser, parser.tokens['DEDENT'], '')
+ tok = Token(parser.tokens['DEDENT'], '')
token_list.append((tok, line, lnum, pos))
last_comment = ''
else: # continued statement
@@ -204,22 +204,22 @@
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
- tok = Token(parser, parser.tokens['NUMBER'], token)
+ tok = Token(parser.tokens['NUMBER'], token)
token_list.append((tok, line, lnum, pos))
last_comment = ''
elif initial in '\r\n':
if parenlev > 0:
- tok = Token(parser, parser.tokens['NL'], token)
+ tok = Token(parser.tokens['NL'], token)
last_comment = ''
# XXX Skip NL
else:
- tok = Token(parser, parser.tokens['NEWLINE'], token)
+ tok = Token(parser.tokens['NEWLINE'], token)
# XXX YUCK !
tok.value = last_comment
token_list.append((tok, line, lnum, pos))
last_comment = ''
elif initial == '#':
- tok = Token(parser, parser.tokens['COMMENT'], token)
+ tok = Token(parser.tokens['COMMENT'], token)
last_comment = token
# XXX Skip # token_list.append((tok, line, lnum, pos))
# token_list.append((COMMENT, token, spos, epos, line))
@@ -229,7 +229,7 @@
if endmatch >= 0: # all on one line
pos = endmatch
token = line[start:pos]
- tok = Token(parser, parser.tokens['STRING'], token)
+ tok = Token(parser.tokens['STRING'], token)
token_list.append((tok, line, lnum, pos))
last_comment = ''
else:
@@ -246,11 +246,11 @@
contline = line
break
else: # ordinary string
- tok = Token(parser, parser.tokens['STRING'], token)
+ tok = Token(parser.tokens['STRING'], token)
token_list.append((tok, line, lnum, pos))
last_comment = ''
elif initial in namechars: # ordinary name
- tok = Token(parser, parser.tokens['NAME'], token)
+ tok = Token(parser.tokens['NAME'], token)
token_list.append((tok, line, lnum, pos))
last_comment = ''
elif initial == '\\': # continued stmt
@@ -266,9 +266,9 @@
(lnum-1, 0), token_list)
if token in parser.tok_values:
punct = parser.tok_values[token]
- tok = Token(parser, punct, None)
+ tok = Token(punct, None)
else:
- tok = Token(parser, parser.tokens['OP'], token)
+ tok = Token(parser.tokens['OP'], token)
token_list.append((tok, line, lnum, pos))
last_comment = ''
else:
@@ -279,7 +279,7 @@
raise TokenError(
'EOL while scanning single-quoted string', line,
x(lnum, start), token_list)
- tok = Token(parser, parser.tokens['ERRORTOKEN'], line[pos])
+ tok = Token(parser.tokens['ERRORTOKEN'], line[pos])
token_list.append((tok, line, lnum, pos))
last_comment = ''
pos = pos + 1
@@ -289,17 +289,17 @@
if (token_list and
token_list[-1][0].codename != parser.tokens['NEWLINE']):
token_list.append(
- (Token(parser, parser.tokens['NEWLINE'], ''), '\n', lnum, 0))
+ (Token(parser.tokens['NEWLINE'], ''), '\n', lnum, 0))
for indent in indents[1:]: # pop remaining indent levels
- tok = Token(parser, parser.tokens['DEDENT'], '')
+ tok = Token(parser.tokens['DEDENT'], '')
token_list.append((tok, line, lnum, pos))
#if token_list and token_list[-1][0].codename != pytoken.NEWLINE:
token_list.append(
- (Token(parser, parser.tokens['NEWLINE'], ''), '\n', lnum, 0))
+ (Token(parser.tokens['NEWLINE'], ''), '\n', lnum, 0))
token_list.append(
- (Token(parser, parser.tokens['ENDMARKER'], '',), line, lnum, pos))
+ (Token(parser.tokens['ENDMARKER'], '',), line, lnum, pos))
#for t in token_list:
# print '%20s %-25s %d' % (pytoken.tok_name.get(t[0].codename, '?'), t[0], t[-2])
#print '----------------------------------------- pyparser/pythonlexer.py'
Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pytoken.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pytoken.py (original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pytoken.py Tue Nov 20 22:03:48 2007
@@ -17,62 +17,62 @@
# global tok_rpunct
# For compatibility, this produces the same constant values as Python 2.4.
from grammar import Token
- parser.add_token(Token(parser, 'ENDMARKER' ))
- parser.add_token(Token(parser, 'NAME' ))
- parser.add_token(Token(parser, 'NUMBER' ))
- parser.add_token(Token(parser, 'STRING' ))
- parser.add_token(Token(parser, 'NEWLINE' ))
- parser.add_token(Token(parser, 'INDENT' ))
- parser.add_token(Token(parser, 'DEDENT' ))
- parser.add_token(Token(parser, 'LPAR', "(" ))
- parser.add_token(Token(parser, 'RPAR', ")" ))
- parser.add_token(Token(parser, 'LSQB', "[" ))
- parser.add_token(Token(parser, 'RSQB', "]" ))
- parser.add_token(Token(parser, 'COLON', ":" ))
- parser.add_token(Token(parser, 'COMMA', "," ))
- parser.add_token(Token(parser, 'SEMI', ";" ))
- parser.add_token(Token(parser, 'PLUS', "+" ))
- parser.add_token(Token(parser, 'MINUS', "-" ))
- parser.add_token(Token(parser, 'STAR', "*" ))
- parser.add_token(Token(parser, 'SLASH', "/" ))
- parser.add_token(Token(parser, 'VBAR', "|" ))
- parser.add_token(Token(parser, 'AMPER', "&" ))
- parser.add_token(Token(parser, 'LESS', "<" ))
- parser.add_token(Token(parser, 'GREATER', ">" ))
- parser.add_token(Token(parser, 'EQUAL', "=" ))
- parser.add_token(Token(parser, 'DOT', "." ))
- parser.add_token(Token(parser, 'PERCENT', "%" ))
- parser.add_token(Token(parser, 'BACKQUOTE', "`" ))
- parser.add_token(Token(parser, 'LBRACE', "{" ))
- parser.add_token(Token(parser, 'RBRACE', "}" ))
- parser.add_token(Token(parser, 'EQEQUAL', "==" ))
- ne = parser.add_token(Token(parser, 'NOTEQUAL', "!=" ))
+ parser.add_token(Token('ENDMARKER' ))
+ parser.add_token(Token('NAME' ))
+ parser.add_token(Token('NUMBER' ))
+ parser.add_token(Token('STRING' ))
+ parser.add_token(Token('NEWLINE' ))
+ parser.add_token(Token('INDENT' ))
+ parser.add_token(Token('DEDENT' ))
+ parser.add_token(Token('LPAR', "(" ))
+ parser.add_token(Token('RPAR', ")" ))
+ parser.add_token(Token('LSQB', "[" ))
+ parser.add_token(Token('RSQB', "]" ))
+ parser.add_token(Token('COLON', ":" ))
+ parser.add_token(Token('COMMA', "," ))
+ parser.add_token(Token('SEMI', ";" ))
+ parser.add_token(Token('PLUS', "+" ))
+ parser.add_token(Token('MINUS', "-" ))
+ parser.add_token(Token('STAR', "*" ))
+ parser.add_token(Token('SLASH', "/" ))
+ parser.add_token(Token('VBAR', "|" ))
+ parser.add_token(Token('AMPER', "&" ))
+ parser.add_token(Token('LESS', "<" ))
+ parser.add_token(Token('GREATER', ">" ))
+ parser.add_token(Token('EQUAL', "=" ))
+ parser.add_token(Token('DOT', "." ))
+ parser.add_token(Token('PERCENT', "%" ))
+ parser.add_token(Token('BACKQUOTE', "`" ))
+ parser.add_token(Token('LBRACE', "{" ))
+ parser.add_token(Token('RBRACE', "}" ))
+ parser.add_token(Token('EQEQUAL', "==" ))
+ ne = parser.add_token(Token('NOTEQUAL', "!=" ))
parser.tok_values["<>"] = ne
- parser.add_token(Token(parser, 'LESSEQUAL', "<=" ))
- parser.add_token(Token(parser, 'GREATEREQUAL', ">=" ))
- parser.add_token(Token(parser, 'TILDE', "~" ))
- parser.add_token(Token(parser, 'CIRCUMFLEX', "^" ))
- parser.add_token(Token(parser, 'LEFTSHIFT', "<<" ))
- parser.add_token(Token(parser, 'RIGHTSHIFT', ">>" ))
- parser.add_token(Token(parser, 'DOUBLESTAR', "**" ))
- parser.add_token(Token(parser, 'PLUSEQUAL', "+=" ))
- parser.add_token(Token(parser, 'MINEQUAL', "-=" ))
- parser.add_token(Token(parser, 'STAREQUAL', "*=" ))
- parser.add_token(Token(parser, 'SLASHEQUAL', "/=" ))
- parser.add_token(Token(parser, 'PERCENTEQUAL', "%=" ))
- parser.add_token(Token(parser, 'AMPEREQUAL', "&=" ))
- parser.add_token(Token(parser, 'VBAREQUAL', "|=" ))
- parser.add_token(Token(parser, 'CIRCUMFLEXEQUAL', "^=" ))
- parser.add_token(Token(parser, 'LEFTSHIFTEQUAL', "<<=" ))
- parser.add_token(Token(parser, 'RIGHTSHIFTEQUAL', ">>=" ))
- parser.add_token(Token(parser, 'DOUBLESTAREQUAL', "**=" ))
- parser.add_token(Token(parser, 'DOUBLESLASH', "//" ))
- parser.add_token(Token(parser, 'DOUBLESLASHEQUAL',"//=" ))
- parser.add_token(Token(parser, 'AT', "@" ))
- parser.add_token(Token(parser, 'OP' ))
- parser.add_token(Token(parser, 'ERRORTOKEN' ))
+ parser.add_token(Token('LESSEQUAL', "<=" ))
+ parser.add_token(Token('GREATEREQUAL', ">=" ))
+ parser.add_token(Token('TILDE', "~" ))
+ parser.add_token(Token('CIRCUMFLEX', "^" ))
+ parser.add_token(Token('LEFTSHIFT', "<<" ))
+ parser.add_token(Token('RIGHTSHIFT', ">>" ))
+ parser.add_token(Token('DOUBLESTAR', "**" ))
+ parser.add_token(Token('PLUSEQUAL', "+=" ))
+ parser.add_token(Token('MINEQUAL', "-=" ))
+ parser.add_token(Token('STAREQUAL', "*=" ))
+ parser.add_token(Token('SLASHEQUAL', "/=" ))
+ parser.add_token(Token('PERCENTEQUAL', "%=" ))
+ parser.add_token(Token('AMPEREQUAL', "&=" ))
+ parser.add_token(Token('VBAREQUAL', "|=" ))
+ parser.add_token(Token('CIRCUMFLEXEQUAL', "^=" ))
+ parser.add_token(Token('LEFTSHIFTEQUAL', "<<=" ))
+ parser.add_token(Token('RIGHTSHIFTEQUAL', ">>=" ))
+ parser.add_token(Token('DOUBLESTAREQUAL', "**=" ))
+ parser.add_token(Token('DOUBLESLASH', "//" ))
+ parser.add_token(Token('DOUBLESLASHEQUAL',"//=" ))
+ parser.add_token(Token('AT', "@" ))
+ parser.add_token(Token('OP' ))
+ parser.add_token(Token('ERRORTOKEN' ))
# extra PyPy-specific tokens
- parser.add_token(Token(parser, "COMMENT" ))
- parser.add_token(Token(parser, "NL" ))
+ parser.add_token(Token("COMMENT" ))
+ parser.add_token(Token("NL" ))
Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_lookahead.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_lookahead.py (original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_lookahead.py Tue Nov 20 22:03:48 2007
@@ -86,7 +86,7 @@
p = self.parser
LOW = p.tokens['LOW']
CAP = p.tokens['CAP']
- for s in [Token(p, LOW, 'low'), EmptyToken, Token(p, CAP, 'cap')]:
+ for s in [Token(LOW, 'low'), EmptyToken, Token(CAP, 'cap')]:
assert s in self.A.first_set
assert s in self.B.first_set
assert s in self.C.first_set
Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_parser.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_parser.py (original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_parser.py Tue Nov 20 22:03:48 2007
@@ -7,7 +7,7 @@
def test_symbols():
p = Parser()
x1 = p.add_symbol('sym')
- x2 = p.add_token(Token(p, 'tok'))
+ x2 = p.add_token(Token('tok'))
x3 = p.add_anon_symbol(':sym')
x4 = p.add_anon_symbol(':sym1')
# test basic numbering assumption
@@ -20,7 +20,7 @@
assert x3 < 0
y1 = p.add_symbol('sym')
assert y1 == x1
- y2 = p.add_token(Token(p, 'tok'))
+ y2 = p.add_token(Token('tok'))
assert y2 == x2
y3 = p.add_symbol(':sym')
assert y3 == x3
@@ -50,11 +50,7 @@
def __init__(self, *args, **kw):
self.trace = []
- self.exclude_rules = [
- 'dotted_name', 'dotted_as_name', 'dotted_as_names',
- 'import_stmt', 'small_stmt', 'simple_stmt', 'stmt',
- 'single_input', 'file_input', 'future_import_list',
- 'import_from_future', 'future_import_as_names']
+ self.exclude_rules = []
def __getitem__(self, attr):
if attr in self.exclude_rules:
@@ -76,7 +72,7 @@
self.build_rules = RuleTracer()
-class TestFuture(object):
+class XTestFuture(object):
_grammar_ver = '2.5a'
Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_pytokenizer.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_pytokenizer.py (original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_pytokenizer.py Tue Nov 20 22:03:48 2007
@@ -20,7 +20,7 @@
"""returns list of parsed tokens"""
lexer = Source( P, source.splitlines(True))
tokens = []
- last_token = Token( P, NULLTOKEN, None)
+ last_token = Token(NULLTOKEN, None)
while last_token.codename != ENDMARKER:
last_token = lexer.next()
tokens.append(last_token)
@@ -58,24 +58,24 @@
s = """['a'
]"""
tokens = parse_source(s)
- assert tokens[:4] == [Token(P, LSQB, None), Token(P, STRING, "'a'"),
- Token(P, RSQB, None), Token(P, NEWLINE, '')]
+ assert tokens[:4] == [Token(LSQB, None), Token(STRING, "'a'"),
+ Token(RSQB, None), Token(NEWLINE, '')]
def test_numbers():
"""make sure all kind of numbers are correctly parsed"""
for number in NUMBERS:
- assert parse_source(number)[0] == Token(P, NUMBER, number)
+ assert parse_source(number)[0] == Token(NUMBER, number)
neg = '-%s' % number
- assert parse_source(neg)[:2] == [Token(P, MINUS, None),
- Token(P, NUMBER, number)]
+ assert parse_source(neg)[:2] == [Token(MINUS, None),
+ Token(NUMBER, number)]
for number in BAD_NUMBERS:
- assert parse_source(number)[0] != Token(P, NUMBER, number)
+ assert parse_source(number)[0] != Token(NUMBER, number)
def test_hex_number():
"""basic pasrse"""
tokens = parse_source("a = 0x12L")
- assert tokens[:4] == [Token(P, NAME, 'a'), Token(P, EQUAL, None),
- Token(P, NUMBER, '0x12L'), Token(P, NEWLINE, '')]
+ assert tokens[:4] == [Token(NAME, 'a'), Token(EQUAL, None),
+ Token(NUMBER, '0x12L'), Token(NEWLINE, '')]
def test_punct():
"""make sure each punctuation is correctly parsed"""
Modified: pypy/branch/dist-future-fixing/pypy/interpreter/test/test_compiler.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/test/test_compiler.py (original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/test/test_compiler.py Tue Nov 20 22:03:48 2007
@@ -588,7 +588,7 @@
source2 = "with = 3"
- code = self.compiler.compile(source, '<filename2>', 'exec', 0)
+ code = self.compiler.compile(source2, '<filename2>', 'exec', 0)
assert isinstance(code, PyCode)
assert code.co_filename == '<filename2>'
More information about the Pypy-commit
mailing list