[pypy-svn] r14184 - in pypy/branch/dist-2.4.1: lib-python/modified-2.3.4 pypy/annotation pypy/bin pypy/documentation pypy/interpreter pypy/interpreter/astcompiler pypy/interpreter/pyparser pypy/interpreter/pyparser/data pypy/interpreter/pyparser/test pypy/interpreter/pyparser/test/samples pypy/interpreter/test pypy/lib pypy/module/recparser pypy/module/recparser/compiler pypy/module/recparser/data pypy/module/recparser/test pypy/objspace/std pypy/objspace/std/test pypy/rpython pypy/rpython/test pypy/tool pypy/translator pypy/translator/goal pypy/translator/llvm2 pypy/translator/llvm2/test pypy/translator/test pypy/translator/tool/pygame
arigo at codespeak.net
arigo at codespeak.net
Sun Jul 3 20:36:09 CEST 2005
Author: arigo
Date: Sun Jul 3 20:36:05 2005
New Revision: 14184
Added:
pypy/branch/dist-2.4.1/lib-python/modified-2.3.4/sre_constants.py
- copied unchanged from r14183, pypy/dist/lib-python/modified-2.3.4/sre_constants.py
pypy/branch/dist-2.4.1/pypy/documentation/ext-functions-draft.txt
- copied unchanged from r14183, pypy/dist/pypy/documentation/ext-functions-draft.txt
pypy/branch/dist-2.4.1/pypy/interpreter/astcompiler/ (props changed)
- copied from r14183, pypy/dist/pypy/interpreter/astcompiler/
pypy/branch/dist-2.4.1/pypy/interpreter/astcompiler/__init__.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/astcompiler/__init__.py
pypy/branch/dist-2.4.1/pypy/interpreter/astcompiler/ast.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/astcompiler/ast.py
pypy/branch/dist-2.4.1/pypy/interpreter/astcompiler/consts.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/astcompiler/consts.py
pypy/branch/dist-2.4.1/pypy/interpreter/astcompiler/future.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/astcompiler/future.py
pypy/branch/dist-2.4.1/pypy/interpreter/astcompiler/misc.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/astcompiler/misc.py
pypy/branch/dist-2.4.1/pypy/interpreter/astcompiler/pyassem.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/astcompiler/pyassem.py
pypy/branch/dist-2.4.1/pypy/interpreter/astcompiler/pycodegen.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/astcompiler/pycodegen.py
pypy/branch/dist-2.4.1/pypy/interpreter/astcompiler/symbols.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/astcompiler/symbols.py
pypy/branch/dist-2.4.1/pypy/interpreter/astcompiler/syntax.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/astcompiler/syntax.py
pypy/branch/dist-2.4.1/pypy/interpreter/astcompiler/transformer.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/astcompiler/transformer.py
pypy/branch/dist-2.4.1/pypy/interpreter/astcompiler/visitor.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/astcompiler/visitor.py
pypy/branch/dist-2.4.1/pypy/interpreter/pycompiler.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pycompiler.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/
- copied from r14183, pypy/dist/pypy/interpreter/pyparser/
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/__init__.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/__init__.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/automata.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/automata.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/data/
- copied from r14183, pypy/dist/pypy/interpreter/pyparser/data/
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/data/Grammar2.3
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/data/Grammar2.3
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/data/Grammar2.4
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/data/Grammar2.4
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/ebnflexer.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/ebnflexer.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/ebnfparse.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/ebnfparse.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/grammar.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/grammar.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/pythonlexer.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/pythonlexer.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/pythonparse.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/pythonparse.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/pythonutil.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/pythonutil.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/pytokenize.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/pytokenize.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/syntaxtree.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/syntaxtree.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/ (props changed)
- copied from r14183, pypy/dist/pypy/interpreter/pyparser/test/
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/ (props changed)
- copied from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_1.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_1.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_2.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_2.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_3.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_3.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_4.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_4.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_comment.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_comment.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_encoding_declaration.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_encoding_declaration.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_encoding_declaration2.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_encoding_declaration2.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_encoding_declaration3.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_encoding_declaration3.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_function_calls.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_function_calls.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_generator.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_generator.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_import_statements.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_import_statements.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_list_comps.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_list_comps.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_numbers.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_numbers.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_only_one_comment.DISABLED
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_only_one_comment.DISABLED
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_redirected_prints.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_redirected_prints.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_samples.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_samples.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_simple_assignment.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_simple_assignment.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_simple_class.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_simple_class.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_simple_for_loop.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_simple_for_loop.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_simple_in_expr.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_simple_in_expr.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_slice.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_slice.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/samples/snippet_whitespaces.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/samples/snippet_whitespaces.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/test_lookahead.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/test_lookahead.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/test_pytokenizer.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/test_pytokenizer.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/test_samples.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/test_samples.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/test/unittest_samples.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/test/unittest_samples.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyparser/tuplebuilder.py
- copied unchanged from r14183, pypy/dist/pypy/interpreter/pyparser/tuplebuilder.py
pypy/branch/dist-2.4.1/pypy/lib/binascii.py
- copied unchanged from r14183, pypy/dist/pypy/lib/binascii.py
pypy/branch/dist-2.4.1/pypy/module/recparser/app_class.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/app_class.py
pypy/branch/dist-2.4.1/pypy/module/recparser/astbuilder.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/astbuilder.py
pypy/branch/dist-2.4.1/pypy/module/recparser/codegen.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/codegen.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/
- copied from r14183, pypy/dist/pypy/module/recparser/compiler/
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/__init__.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/__init__.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/ast.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/ast.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/ast.txt
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/ast.txt
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/astfactory.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/astfactory.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/astgen.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/astgen.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/bytecode.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/bytecode.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/consts.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/consts.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/future.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/future.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/misc.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/misc.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/pyassem.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/pyassem.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/pycodegen.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/pycodegen.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/symbols.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/symbols.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/syntax.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/syntax.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/transformer.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/transformer.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compiler/visitor.py
- copied unchanged from r14183, pypy/dist/pypy/module/recparser/compiler/visitor.py
pypy/branch/dist-2.4.1/pypy/rpython/remptydict.py
- copied unchanged from r14183, pypy/dist/pypy/rpython/remptydict.py
pypy/branch/dist-2.4.1/pypy/rpython/test/test_remptydict.py
- copied unchanged from r14183, pypy/dist/pypy/rpython/test/test_remptydict.py
pypy/branch/dist-2.4.1/pypy/translator/goal/targetparser.py
- copied unchanged from r14183, pypy/dist/pypy/translator/goal/targetparser.py
pypy/branch/dist-2.4.1/pypy/translator/llvm2/arraynode.py
- copied unchanged from r14183, pypy/dist/pypy/translator/llvm2/arraynode.py
pypy/branch/dist-2.4.1/pypy/translator/llvm2/node.py
- copied unchanged from r14183, pypy/dist/pypy/translator/llvm2/node.py
pypy/branch/dist-2.4.1/pypy/translator/test/test_backendoptimization.py
- copied unchanged from r14183, pypy/dist/pypy/translator/test/test_backendoptimization.py
Removed:
pypy/branch/dist-2.4.1/pypy/interpreter/compiler.py
pypy/branch/dist-2.4.1/pypy/lib/inprogress_binascii.py
pypy/branch/dist-2.4.1/pypy/module/recparser/data/
pypy/branch/dist-2.4.1/pypy/module/recparser/ebnflexer.py
pypy/branch/dist-2.4.1/pypy/module/recparser/ebnfparse.py
pypy/branch/dist-2.4.1/pypy/module/recparser/grammar.py
pypy/branch/dist-2.4.1/pypy/module/recparser/pythonlexer.py
pypy/branch/dist-2.4.1/pypy/module/recparser/pythonparse.py
pypy/branch/dist-2.4.1/pypy/module/recparser/pythonutil.py
pypy/branch/dist-2.4.1/pypy/module/recparser/syntaxtree.py
pypy/branch/dist-2.4.1/pypy/module/recparser/test/
pypy/branch/dist-2.4.1/pypy/module/recparser/tuplebuilder.py
pypy/branch/dist-2.4.1/pypy/translator/llvm2/cfgtransform.py
Modified:
pypy/branch/dist-2.4.1/pypy/annotation/classdef.py
pypy/branch/dist-2.4.1/pypy/annotation/unaryop.py
pypy/branch/dist-2.4.1/pypy/bin/translator.py
pypy/branch/dist-2.4.1/pypy/documentation/architecture.txt
pypy/branch/dist-2.4.1/pypy/documentation/objspace.txt
pypy/branch/dist-2.4.1/pypy/interpreter/baseobjspace.py
pypy/branch/dist-2.4.1/pypy/interpreter/eval.py
pypy/branch/dist-2.4.1/pypy/interpreter/executioncontext.py
pypy/branch/dist-2.4.1/pypy/interpreter/gateway.py
pypy/branch/dist-2.4.1/pypy/interpreter/pyopcode.py
pypy/branch/dist-2.4.1/pypy/interpreter/test/test_compiler.py
pypy/branch/dist-2.4.1/pypy/interpreter/typedef.py
pypy/branch/dist-2.4.1/pypy/lib/_formatting.py
pypy/branch/dist-2.4.1/pypy/module/recparser/__init__.py
pypy/branch/dist-2.4.1/pypy/module/recparser/compat.py
pypy/branch/dist-2.4.1/pypy/module/recparser/pyparser.py
pypy/branch/dist-2.4.1/pypy/objspace/std/fake.py
pypy/branch/dist-2.4.1/pypy/objspace/std/longobject.py
pypy/branch/dist-2.4.1/pypy/objspace/std/stringobject.py
pypy/branch/dist-2.4.1/pypy/objspace/std/test/test_longobject.py
pypy/branch/dist-2.4.1/pypy/objspace/std/test/test_stringobject.py
pypy/branch/dist-2.4.1/pypy/rpython/lltype.py
pypy/branch/dist-2.4.1/pypy/rpython/rclass.py
pypy/branch/dist-2.4.1/pypy/rpython/rconstantdict.py
pypy/branch/dist-2.4.1/pypy/rpython/rdict.py
pypy/branch/dist-2.4.1/pypy/rpython/rpbc.py
pypy/branch/dist-2.4.1/pypy/rpython/rstr.py
pypy/branch/dist-2.4.1/pypy/rpython/rtyper.py
pypy/branch/dist-2.4.1/pypy/rpython/test/test_lltype.py
pypy/branch/dist-2.4.1/pypy/rpython/test/test_rclass.py
pypy/branch/dist-2.4.1/pypy/rpython/test/test_rconstantdict.py
pypy/branch/dist-2.4.1/pypy/rpython/test/test_rpbc.py
pypy/branch/dist-2.4.1/pypy/rpython/test/test_rstr.py
pypy/branch/dist-2.4.1/pypy/tool/option.py
pypy/branch/dist-2.4.1/pypy/translator/backendoptimization.py
pypy/branch/dist-2.4.1/pypy/translator/goal/translate_pypy.py
pypy/branch/dist-2.4.1/pypy/translator/goal/unixcheckpoint.py
pypy/branch/dist-2.4.1/pypy/translator/llvm2/build_llvm_module.py
pypy/branch/dist-2.4.1/pypy/translator/llvm2/codewriter.py
pypy/branch/dist-2.4.1/pypy/translator/llvm2/database.py
pypy/branch/dist-2.4.1/pypy/translator/llvm2/funcnode.py
pypy/branch/dist-2.4.1/pypy/translator/llvm2/genllvm.py
pypy/branch/dist-2.4.1/pypy/translator/llvm2/pyxwrapper.py
pypy/branch/dist-2.4.1/pypy/translator/llvm2/structnode.py
pypy/branch/dist-2.4.1/pypy/translator/llvm2/test/test_genllvm.py
pypy/branch/dist-2.4.1/pypy/translator/tool/pygame/drawgraph.py
Log:
Merged the trunk into the dist-2.4.1 branch:
svn merge -r14032:14183 http://codespeak.net/svn/pypy/dist
Modified: pypy/branch/dist-2.4.1/pypy/annotation/classdef.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/annotation/classdef.py (original)
+++ pypy/branch/dist-2.4.1/pypy/annotation/classdef.py Sun Jul 3 20:36:05 2005
@@ -315,7 +315,7 @@
return None
return None
- def matching(self, pbc, name):
+ def matching(self, pbc, name=None):
d = {}
uplookup = None
upfunc = None
@@ -341,7 +341,7 @@
# PBC dictionary to track more precisely with which 'self' the
# method is called.
d[upfunc] = self
- elif meth:
+ elif meth and name is not None:
self.check_missing_attribute_update(name)
if d:
return SomePBC(d)
Modified: pypy/branch/dist-2.4.1/pypy/annotation/unaryop.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/annotation/unaryop.py (original)
+++ pypy/branch/dist-2.4.1/pypy/annotation/unaryop.py Sun Jul 3 20:36:05 2005
@@ -301,6 +301,9 @@
def method_endswith(str, frag):
return SomeBool()
+ def method_find(str, frag):
+ return SomeInteger()
+
def method_join(str, s_list):
getbookkeeper().count("str_join", str)
s_item = s_list.listdef.read_item()
@@ -324,6 +327,12 @@
def method_replace(str, s1, s2):
return SomeString()
+ def method_lower(str):
+ return SomeString()
+
+ def method_upper(str):
+ return SomeString()
+
class __extend__(SomeChar):
Modified: pypy/branch/dist-2.4.1/pypy/bin/translator.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/bin/translator.py (original)
+++ pypy/branch/dist-2.4.1/pypy/bin/translator.py Sun Jul 3 20:36:05 2005
@@ -226,8 +226,11 @@
import atexit
atexit.register(readline.write_history_file, histfile)
-
if __name__ == '__main__':
+ try:
+ setup_readline()
+ except ImportError, err:
+ print "Disabling readline support (%s)" % err
from pypy.translator.test import snippet as test
from pypy.translator.llvm.test import llvmsnippet as test2
from pypy.rpython.rtyper import RPythonTyper
Modified: pypy/branch/dist-2.4.1/pypy/documentation/architecture.txt
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/documentation/architecture.txt (original)
+++ pypy/branch/dist-2.4.1/pypy/documentation/architecture.txt Sun Jul 3 20:36:05 2005
@@ -105,7 +105,7 @@
Please note that we are using the term *interpreter* most often in
reference to the *plain interpreter* which just knows enough to read,
dispatch and implement *bytecodes* thus shuffling objects around on the
-stack and between namespaces. The (plain) interpreter is completly
+stack and between namespaces. The (plain) interpreter is completely
ignorant of how to access, modify or construct objects and their
structure and thus delegates such operations to a so called `Object Space`_.
Modified: pypy/branch/dist-2.4.1/pypy/documentation/objspace.txt
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/documentation/objspace.txt (original)
+++ pypy/branch/dist-2.4.1/pypy/documentation/objspace.txt Sun Jul 3 20:36:05 2005
@@ -354,7 +354,7 @@
:args: list of arguments. Each one is a Constant or a Variable seen previously in the basic block.
:result: a *new* Variable into which the result is to be stored.
- Note that operations usually cannot implicitely raise exceptions at run-time; so for example, code generators can assume that a ``getitem`` operation on a list is safe and can be performed without bound checking. The exceptions to this rule are: (1) if the operation is the last in the block, which ends with ``exitswitch == Constant(last_exception)``, then the implicit exceptions must be checked for, generated, and caught appropriately; (2) calls to other functions, as per ``simple_call`` or ``call_args``, can always raise whatever the called function can raise --- and such exceptions must be passed through to the parent unless they are caught as above.
+ Note that operations usually cannot implicitly raise exceptions at run-time; so for example, code generators can assume that a ``getitem`` operation on a list is safe and can be performed without bound checking. The exceptions to this rule are: (1) if the operation is the last in the block, which ends with ``exitswitch == Constant(last_exception)``, then the implicit exceptions must be checked for, generated, and caught appropriately; (2) calls to other functions, as per ``simple_call`` or ``call_args``, can always raise whatever the called function can raise --- and such exceptions must be passed through to the parent unless they are caught as above.
Variable
Modified: pypy/branch/dist-2.4.1/pypy/interpreter/baseobjspace.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/interpreter/baseobjspace.py (original)
+++ pypy/branch/dist-2.4.1/pypy/interpreter/baseobjspace.py Sun Jul 3 20:36:05 2005
@@ -1,9 +1,12 @@
from pypy.interpreter.executioncontext import ExecutionContext
from pypy.interpreter.error import OperationError
from pypy.interpreter.argument import Arguments
+from pypy.interpreter.pycompiler import CPythonCompiler
+from pypy.interpreter.pycompiler import PythonCompiler, PyPyCompiler
from pypy.interpreter.miscutils import ThreadLocals
from pypy.tool.cache import Cache
from pypy.rpython.rarithmetic import r_uint
+import pypy.tool.option
__all__ = ['ObjSpace', 'OperationError', 'Wrappable', 'BaseWrappable',
'W_Root']
@@ -92,12 +95,17 @@
full_exceptions = True # full support for exceptions (normalization & more)
- def __init__(self):
+ def __init__(self, options=None):
"NOT_RPYTHON: Basic initialization of objects."
self.fromcache = InternalSpaceCache(self).getorbuild
self.threadlocals = ThreadLocals()
# set recursion limit
# sets all the internal descriptors
+
+ # XXX: Options in option.py is replaced by a function so
+ # it's not really clean to do a from option import Options
+ # since changing import order can change the Options object
+ self.options = options or pypy.tool.option.Options()
self.initialize()
def __repr__(self):
@@ -134,9 +142,11 @@
#self.setbuiltinmodule('_codecs')
# XXX we need to resolve unwrapping issues to
# make this the default _sre module
- #self.setbuiltinmodule("_sre", "_sre_pypy")
-
- # XXX disabled: self.setbuiltinmodule('parser')
+ #self.setbuiltinmodule("_sre", "_sre_pypy")
+ if self.options.useparsermodule == "recparser":
+ self.setbuiltinmodule('parser', 'recparser')
+ elif self.options.useparsermodule == "parser":
+ self.setbuiltinmodule('parser')
# initialize with "bootstrap types" from objspace (e.g. w_None)
for name, value in self.__dict__.items():
@@ -173,6 +183,20 @@
"Factory function for execution contexts."
return ExecutionContext(self)
+ def createcompiler(self):
+ "Factory function creating a compiler object."
+ if self.options.parser == 'recparser':
+ if self.options.compiler == 'cpython':
+ return PythonCompiler(self)
+ else:
+ return PyPyCompiler(self)
+ elif self.options.compiler == 'pyparse':
+ # <=> options.parser == 'cpython'
+ return PythonCompiler(self)
+ else:
+ # <=> options.compiler == 'cpython' and options.parser == 'cpython'
+ return CPythonCompiler(self)
+
# Following is a friendly interface to common object space operations
# that can be defined in term of more primitive ones. Subclasses
# may also override specific functions for performance.
Deleted: /pypy/branch/dist-2.4.1/pypy/interpreter/compiler.py
==============================================================================
--- /pypy/branch/dist-2.4.1/pypy/interpreter/compiler.py Sun Jul 3 20:36:05 2005
+++ (empty file)
@@ -1,165 +0,0 @@
-"""
-General classes for bytecode compilers.
-Compiler instances are stored into 'space.getexecutioncontext().compiler'.
-"""
-from codeop import PyCF_DONT_IMPLY_DEDENT
-from pypy.interpreter.error import OperationError
-
-
-class Compiler:
- """Abstract base class for a bytecode compiler."""
-
- # The idea is to grow more methods here over the time,
- # e.g. to handle .pyc files in various ways if we have multiple compilers.
-
- def __init__(self, space):
- self.space = space
-
- def compile(self, source, filename, mode, flags):
- """Compile and return an pypy.interpreter.eval.Code instance."""
- raise NotImplementedError
-
- def getcodeflags(self, code):
- """Return the __future__ compiler flags that were used to compile
- the given code object."""
- return 0
-
- def compile_command(self, source, filename, mode, flags):
- """Same as compile(), but tries to compile a possibly partial
- interactive input. If more input is needed, it returns None.
- """
- # Hackish default implementation based on the stdlib 'codeop' module.
- # See comments over there.
- space = self.space
- flags |= PyCF_DONT_IMPLY_DEDENT
- # Check for source consisting of only blank lines and comments
- if mode != "eval":
- in_comment = False
- for c in source:
- if c in ' \t\f\v': # spaces
- pass
- elif c == '#':
- in_comment = True
- elif c in '\n\r':
- in_comment = False
- elif not in_comment:
- break # non-whitespace, non-comment character
- else:
- source = "pass" # Replace it with a 'pass' statement
-
- try:
- code = self.compile(source, filename, mode, flags)
- return code # success
- except OperationError, err:
- if not err.match(space, space.w_SyntaxError):
- raise
-
- try:
- self.compile(source + "\n", filename, mode, flags)
- return None # expect more
- except OperationError, err1:
- if not err1.match(space, space.w_SyntaxError):
- raise
-
- try:
- self.compile(source + "\n\n", filename, mode, flags)
- raise # uh? no error with \n\n. re-raise the previous error
- except OperationError, err2:
- if not err2.match(space, space.w_SyntaxError):
- raise
-
- if space.eq_w(err1.w_value, err2.w_value):
- raise # twice the same error, re-raise
-
- return None # two different errors, expect more
-
-
-# ____________________________________________________________
-# faked compiler
-
-import warnings
-import __future__
-compiler_flags = 0
-for fname in __future__.all_feature_names:
- compiler_flags |= getattr(__future__, fname).compiler_flag
-
-
-class CPythonCompiler(Compiler):
- """Faked implementation of a compiler, using the underlying compile()."""
-
- def compile(self, source, filename, mode, flags):
- flags |= __future__.generators.compiler_flag # always on (2.2 compat)
- space = self.space
- try:
- old = self.setup_warn_explicit(warnings)
- try:
- c = compile(source, filename, mode, flags, True)
- finally:
- self.restore_warn_explicit(warnings, old)
- # It would be nice to propagate all exceptions to app level,
- # but here we only propagate the 'usual' ones, until we figure
- # out how to do it generically.
- except SyntaxError,e:
- w_synerr = space.newtuple([space.wrap(e.msg),
- space.newtuple([space.wrap(e.filename),
- space.wrap(e.lineno),
- space.wrap(e.offset),
- space.wrap(e.text)])])
- raise OperationError(space.w_SyntaxError, w_synerr)
- except ValueError,e:
- raise OperationError(space.w_ValueError,space.wrap(str(e)))
- except TypeError,e:
- raise OperationError(space.w_TypeError,space.wrap(str(e)))
- from pypy.interpreter.pycode import PyCode
- return space.wrap(PyCode(space)._from_code(c))
- compile._annspecialcase_ = "override:cpy_compile"
-
- def getcodeflags(self, code):
- from pypy.interpreter.pycode import PyCode
- if isinstance(code, PyCode):
- return code.co_flags & compiler_flags
- else:
- return 0
-
- def _warn_explicit(self, message, category, filename, lineno,
- module=None, registry=None):
- if hasattr(category, '__bases__') and \
- issubclass(category, SyntaxWarning):
- assert isinstance(message, str)
- space = self.space
- w_mod = space.sys.getmodule('warnings')
- if w_mod is not None:
- w_dict = w_mod.getdict()
- w_reg = space.call_method(w_dict, 'setdefault',
- space.wrap("__warningregistry__"),
- space.newdict([]))
- try:
- space.call_method(w_mod, 'warn_explicit',
- space.wrap(message),
- space.w_SyntaxWarning,
- space.wrap(filename),
- space.wrap(lineno),
- space.w_None,
- space.w_None)
- except OperationError, e:
- if e.match(space, space.w_SyntaxWarning):
- raise OperationError(
- space.w_SyntaxError,
- space.wrap(message))
- raise
-
- def setup_warn_explicit(self, warnings):
- """
- this is a hack until we have our own parsing/compiling
- in place: we bridge certain warnings to the applevel
- warnings module to let it decide what to do with
- a syntax warning ...
- """
- # there is a hack to make the flow space happy:
- # 'warnings' should not look like a Constant
- old_warn_explicit = warnings.warn_explicit
- warnings.warn_explicit = self._warn_explicit
- return old_warn_explicit
-
- def restore_warn_explicit(self, warnings, old_warn_explicit):
- warnings.warn_explicit = old_warn_explicit
Modified: pypy/branch/dist-2.4.1/pypy/interpreter/eval.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/interpreter/eval.py (original)
+++ pypy/branch/dist-2.4.1/pypy/interpreter/eval.py Sun Jul 3 20:36:05 2005
@@ -9,6 +9,7 @@
class Code(Wrappable):
"""A code is a compiled version of some source code.
Abstract base class."""
+ hidden_applevel = False
def __init__(self, co_name):
self.co_name = co_name
Modified: pypy/branch/dist-2.4.1/pypy/interpreter/executioncontext.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/interpreter/executioncontext.py (original)
+++ pypy/branch/dist-2.4.1/pypy/interpreter/executioncontext.py Sun Jul 3 20:36:05 2005
@@ -1,7 +1,6 @@
import sys
from pypy.interpreter.miscutils import Stack
from pypy.interpreter.error import OperationError
-from pypy.interpreter.compiler import CPythonCompiler
class ExecutionContext:
"""An ExecutionContext holds the state of an execution thread
@@ -13,7 +12,7 @@
self.w_tracefunc = None
self.w_profilefunc = None
self.is_tracing = 0
- self.compiler = CPythonCompiler(space)
+ self.compiler = space.createcompiler()
def enter(self, frame):
if self.framestack.depth() > self.space.sys.recursionlimit:
Modified: pypy/branch/dist-2.4.1/pypy/interpreter/gateway.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/interpreter/gateway.py (original)
+++ pypy/branch/dist-2.4.1/pypy/interpreter/gateway.py Sun Jul 3 20:36:05 2005
@@ -332,6 +332,7 @@
class BuiltinCode(eval.Code):
"The code object implementing a built-in (interpreter-level) hook."
+ hidden_applevel = True
# When a BuiltinCode is stored in a Function object,
# you get the functionality of CPython's built-in function type.
Modified: pypy/branch/dist-2.4.1/pypy/interpreter/pyopcode.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/interpreter/pyopcode.py (original)
+++ pypy/branch/dist-2.4.1/pypy/interpreter/pyopcode.py Sun Jul 3 20:36:05 2005
@@ -53,7 +53,7 @@
oparg = self.nextarg()
fn(self, oparg)
else:
- fn = self.dispatch_table_no_arg[opcode]
+ fn = self.dispatch_table_no_arg[opcode]
fn(self)
def nextop(self):
Modified: pypy/branch/dist-2.4.1/pypy/interpreter/test/test_compiler.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/interpreter/test/test_compiler.py (original)
+++ pypy/branch/dist-2.4.1/pypy/interpreter/test/test_compiler.py Sun Jul 3 20:36:05 2005
@@ -1,13 +1,13 @@
import __future__
import autopath
import py
-from pypy.interpreter.compiler import CPythonCompiler, Compiler
+from pypy.interpreter.pycompiler import CPythonCompiler, PythonCompiler
from pypy.interpreter.pycode import PyCode
-class TestCompiler:
+class BaseTestCompiler:
def setup_method(self, method):
- self.compiler = CPythonCompiler(self.space)
+ self.compiler = self.space.createcompiler()
def test_compile(self):
code = self.compiler.compile('6*7', '<hello>', 'eval', 0)
@@ -37,6 +37,7 @@
'if 1:\n x x', '?', 'exec', 0)
def test_getcodeflags(self):
+ py.test.skip("flags don't work correctly when using the compiler package")
code = self.compiler.compile('from __future__ import division\n',
'<hello>', 'exec', 0)
flags = self.compiler.getcodeflags(code)
@@ -48,6 +49,18 @@
assert flags == flags2
-class TestECCompiler(TestCompiler):
+class TestECCompiler(BaseTestCompiler):
def setup_method(self, method):
self.compiler = self.space.getexecutioncontext().compiler
+
+class TestPyCCompiler(BaseTestCompiler):
+ def setup_method(self, method):
+ self.compiler = CPythonCompiler(self.space)
+
+class TestPurePythonCompiler(BaseTestCompiler):
+ def setup_method(self, method):
+ self.compiler = PythonCompiler(self.space)
+
+class SkippedForNowTestPyPyCompiler(BaseTestCompiler):
+ def setup_method(self, method):
+ self.compiler = PyPyCompiler(self.space)
Modified: pypy/branch/dist-2.4.1/pypy/interpreter/typedef.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/interpreter/typedef.py (original)
+++ pypy/branch/dist-2.4.1/pypy/interpreter/typedef.py Sun Jul 3 20:36:05 2005
@@ -39,6 +39,7 @@
return get_unique_interplevel_NoDictWithSlots(cls)
else:
return get_unique_interplevel_NoDictNoSlots(cls)
+get_unique_interplevel_subclass._annspecialcase_ = "specialize:arg0"
for hasdict in False, True:
for wants_slots in False, True:
Modified: pypy/branch/dist-2.4.1/pypy/lib/_formatting.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/lib/_formatting.py (original)
+++ pypy/branch/dist-2.4.1/pypy/lib/_formatting.py Sun Jul 3 20:36:05 2005
@@ -453,9 +453,12 @@
try:
f = format_registry[t[0]]
except KeyError:
+ char = t[0]
+ if isinstance(char, unicode):
+ char = char.encode(sys.getdefaultencoding(), 'replace')
raise ValueError("unsupported format character "
"'%s' (0x%x) at index %d"
- %(t[0], ord(t[0]), fmtiter.i-1))
+ % (char, ord(t[0]), fmtiter.i - 1))
# Trying to translate this using the flow space.
# Currently, star args give a problem there,
# so let's be explicit about the args:
@@ -467,12 +470,7 @@
# Switch to using the unicode formatters and retry.
do_unicode = True
format_registry = unicode_format_registry
- try:
- f = format_registry[t[0]]
- except KeyError:
- raise ValueError("unsupported format character "
- "'%s' (0x%x) at index %d"
- %(t[0], ord(t[0]), fmtiter.i-1))
+ f = format_registry[t[0]]
r.append(f(char, flags, width, prec, value).format())
else:
Deleted: /pypy/branch/dist-2.4.1/pypy/lib/inprogress_binascii.py
==============================================================================
--- /pypy/branch/dist-2.4.1/pypy/lib/inprogress_binascii.py Sun Jul 3 20:36:05 2005
+++ (empty file)
@@ -1,299 +0,0 @@
-class Error(Exception):
- pass
-
-class Incomplete(Exception):
- pass
-
-def a2b_uu(s):
- length = (ord(s[0]) - 0x20) % 64
- a = quadruplets(s[1:].rstrip())
- try:
- result = [''.join(
- [chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)),
- chr(((B - 0x20) & 0xF) << 4 | (((C - 0x20) >> 2) & 0xF)),
- chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3F))
- ]) for A, B, C, D in a]
- except ValueError:
- raise Error, 'Illegal char'
- result = ''.join(result)
- trailingdata = result[length:]
- if trailingdata.strip('\x00'):
- raise Error, 'Trailing garbage'
- result = result[:length]
- if len(result) < length:
- result += ((length - len(result)) * '\x00')
- return result
-
-def quadruplets(s):
- while s:
- try:
- a, b, c, d = s[0], s[1], s[2], s[3]
- except IndexError:
- s += ' '
- yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
- return
- s = s[4:]
- yield ord(a), ord(b), ord(c), ord(d)
-
-def b2a_uu(s):
- length = len(s)
- if length > 45:
- raise Error, 'At most 45 bytes at once'
-
- a = triples(s)
- result = [''.join(
- [chr(0x20 + (( A >> 2 ) & 0x3F)),
- chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)),
- chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)),
- chr(0x20 + (( C ) & 0x3F))]) for A, B, C in a]
- return chr(ord(' ') + (length & 077)) + ''.join(result) + '\n'
-
-def triples(s):
- while s:
- try:
- a, b, c = s[0], s[1], s[2]
- except IndexError:
- s += '\0\0'
- yield ord(s[0]), ord(s[1]), ord(s[2])
- return
- s = s[3:]
- yield ord(a), ord(b), ord(c)
-
-
-table_a2b_base64 = {
- 'A': 0,
- 'B': 1,
- 'C': 2,
- 'D': 3,
- 'E': 4,
- 'F': 5,
- 'G': 6,
- 'H': 7,
- 'I': 8,
- 'J': 9,
- 'K': 10,
- 'L': 11,
- 'M': 12,
- 'N': 13,
- 'O': 14,
- 'P': 15,
- 'Q': 16,
- 'R': 17,
- 'S': 18,
- 'T': 19,
- 'U': 20,
- 'V': 21,
- 'W': 22,
- 'X': 23,
- 'Y': 24,
- 'Z': 25,
- 'a': 26,
- 'b': 27,
- 'c': 28,
- 'd': 29,
- 'e': 30,
- 'f': 31,
- 'g': 32,
- 'h': 33,
- 'i': 34,
- 'j': 35,
- 'k': 36,
- 'l': 37,
- 'm': 38,
- 'n': 39,
- 'o': 40,
- 'p': 41,
- 'q': 42,
- 'r': 43,
- 's': 44,
- 't': 45,
- 'u': 46,
- 'v': 47,
- 'w': 48,
- 'x': 49,
- 'y': 50,
- 'z': 51,
- '0': 52,
- '1': 53,
- '2': 54,
- '3': 55,
- '4': 56,
- '5': 57,
- '6': 58,
- '7': 59,
- '8': 60,
- '9': 61,
- '+': 62,
- '/': 63,
-}
-
-def quadruplets_base64(s):
- while s:
- a, b, c, d = table_a2b_base64[s[0]], table_a2b_base64[s[1]], table_a2b_base64[s[2]], table_a2b_base64[s[3]]
- s = s[4:]
- yield a, b, c, d
-
-def a2b_base64(s):
- s = s.rstrip()
- # clean out all invalid characters, this also strips the final '=' padding
- clean_s = []
- for item in s:
- if item in table_a2b_base64:
- clean_s.append(item)
- s = ''.join(clean_s)
- # Add '=' padding back into the string
- if len(s) % 4:
- s = s + ('=' * (4 - len(s) % 4))
-
- a = quadruplets_base64(s[:-4])
- result = [
- chr(A << 2 | ((B >> 4) & 0x3)) +
- chr((B & 0xF) << 4 | ((C >> 2 ) & 0xF)) +
- chr((C & 0x3) << 6 | D )
- for A, B, C, D in a]
-
- if s:
- final = s[-4:]
- if final[2] == '=':
- A = table_a2b_base64[final[0]]
- B = table_a2b_base64[final[1]]
- snippet = chr(A << 2 | ((B >> 4) & 0x3))
- elif final[3] == '=':
- A = table_a2b_base64[final[0]]
- B = table_a2b_base64[final[1]]
- C = table_a2b_base64[final[2]]
- snippet = chr(A << 2 | ((B >> 4) & 0x3)) + \
- chr((B & 0xF) << 4 | ((C >> 2 ) & 0xF))
- else:
- A = table_a2b_base64[final[0]]
- B = table_a2b_base64[final[1]]
- C = table_a2b_base64[final[2]]
- D = table_a2b_base64[final[3]]
- snippet = chr(A << 2 | ((B >> 4) & 0x3)) + \
- chr((B & 0xF) << 4 | ((C >> 2 ) & 0xF)) + \
- chr((C & 0x3) << 6 | D )
- result.append(snippet)
-
- return ''.join(result)
-
-table_b2a_base64 = \
-"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
-
-def b2a_base64(s):
- length = len(s)
- final_length = length % 3
-
- a = triples(s[ :length - final_length])
-
- result = [''.join(
- [table_b2a_base64[( A >> 2 ) & 0x3F],
- table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F],
- table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F],
- table_b2a_base64[( C ) & 0x3F]])
- for A, B, C in a]
-
- final = s[length - final_length:]
- if final_length == 0:
- snippet = ''
- elif final_length == 1:
- a = ord(final[0])
- snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \
- table_b2a_base64[(a << 4 ) & 0x3F] + '=='
- else:
- a = ord(final[0])
- b = ord(final[1])
- snippet = table_b2a_base64[(a >> 2) & 0x3F] + \
- table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \
- table_b2a_base64[(b << 2) & 0x3F] + '='
- return ''.join(result) + snippet + '\n'
-
-def a2b_qp(s):
- parts = s.rstrip().split('=')
-
- # Change the parts in-place
- for index, part in enumerate(parts[1:]):
- if len(part) > 1 and part[0] in hex_numbers and part[1] in hex_numbers:
- parts[index + 1] = chr(strhex_to_int(part[0:2])) + part[2:]
- elif index == len(parts) - 2 and len(part) < 2:
- parts[index + 1] = ''
- else:
- parts[index + 1] = '=' + parts[index + 1]
-
- return ''.join(parts)
-
-def b2a_qp(s):
- """ In this implementation, we are quoting all spaces and tab character.
- This is ok by the standard, though it slightly reduces the
- readability of the quoted string. The CPython implementation
- preserves internal whitespace, which is a different way of
- implementing the standard. The reason we are doing things differently
- is that it greatly simplifies the code.
-
- The CPython implementation does not escape CR and LF characters
- and does not encode newlines as CRLF sequences. This seems to be
- non-standard, and we copy this behaviour.
- """
- crlf = s.find('\r\n')
- lf = s.find('\n')
- linebreak = None
- if crlf >= 0 and crlf <= lf:
- linebreak = '\r\n'
- elif lf > 0:
- linebreak = '\n'
-
- if linebreak:
- s = s.replace('\r\n', '\n')
-
- lines = s.split('\n')
-
- result = []
- for line in lines:
- charlist = []
- count = 0
- for c in line:
- if '!' <= c <= '<' or '>' <= c <= '~' or c in '\n\r':
- if count >= 75:
- charlist.append('=\r\n')
- count = 0
- charlist.append(c)
- count += 1
- else:
- if count >= 72:
- charlist.append('=\r\n')
- count = 0
- snippet = '=' + two_hex_digits(ord(c))
- count += len(snippet)
- charlist.append(snippet)
- result.append(''.join(charlist))
- return linebreak.join(result)
-
-hex_numbers = '0123456789ABCDEF'
-def hex(n):
- if n == 0:
- return '0'
-
- if n < 0:
- n = -n
- sign = '-'
- else:
- sign = ''
- arr = []
- for nibble in hexgen(n):
- arr = [hex_numbers[nibble]] + arr
- return sign + ''.join(arr)
-
-def two_hex_digits(n):
- return hex_numbers[n / 0x10] + hex_numbers[n % 0x10]
-
-def hexgen(n):
- """ Yield a nibble at a time. """
- while n:
- remainder = n % 0x10
- n = n / 0x10
- yield remainder
-
-def strhex_to_int(s):
- i = 0
- for c in s:
- i = i * 0x10 + hex_numbers.index(c)
- return i
Modified: pypy/branch/dist-2.4.1/pypy/module/recparser/__init__.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/module/recparser/__init__.py (original)
+++ pypy/branch/dist-2.4.1/pypy/module/recparser/__init__.py Sun Jul 3 20:36:05 2005
@@ -1,44 +1,44 @@
from pypy.interpreter.error import OperationError, debug_print
-from pypy.interpreter import module
-from pypy.interpreter.mixedmodule import MixedModule
-
+import pypy.interpreter.pyparser.pythonparse
-import pythonutil
+from pypy.interpreter.mixedmodule import MixedModule
-debug_print( "Loading grammar %s" % pythonutil.PYTHON_GRAMMAR )
-PYTHON_PARSER = pythonutil.python_grammar()
+# Forward imports so they run at startup time
+import pyparser
+import pypy.interpreter.pyparser.pythonlexer
+import pypy.interpreter.pyparser.pythonparse
class Module(MixedModule):
- """The builtin parser module.
- """
+ """The builtin parser module.
+ """
- appleveldefs = {
- # 'ParserError' : 'app_class.ParserError',
- }
- interpleveldefs = {
- '__name__' : '(space.wrap("parser"))',
- '__doc__' : '(space.wrap("parser (recparser version) module"))',
-
- 'suite' : 'pyparser.suite',
- 'expr' : 'pyparser.expr',
- 'STType' : 'pyparser.STType',
- 'ast2tuple' : 'pyparser.ast2tuple',
-# 'ASTType' : 'pyparser.STType',
- # 'sequence2st' : 'pyparser.sequence2st',
- #'eval_input' : 'pyparser.eval_input',
- #'file_input' : 'pyparser.file_input',
- #'compileast' : 'pyparser.compileast',
- #'st2tuple' : 'pyparser.st2tuple',
- #'st2list' : 'pyparser.st2list',
- #'issuite' : 'pyparser.issuite',
- #'ast2tuple' : 'pyparser.ast2tuple',
- #'tuple2st' : 'pyparser.tuple2st',
- #'isexpr' : 'pyparser.isexpr',
- #'ast2list' : 'pyparser.ast2list',
- #'sequence2ast' : 'pyparser.sequence2ast',
- #'tuple2ast' : 'pyparser.tuple2ast',
- #'_pickler' : 'pyparser._pickler',
- #'compilest' : 'pyparser.compilest',
- }
+ appleveldefs = {
+ 'ParserError' : 'app_class.ParserError',
+ }
+ interpleveldefs = {
+ '__name__' : '(space.wrap("parser"))',
+ '__doc__' : '(space.wrap("parser (recparser version) module"))',
+
+ 'suite' : 'pyparser.suite',
+ 'expr' : 'pyparser.expr',
+ 'STType' : 'pyparser.STType',
+ 'ast2tuple' : 'pyparser.ast2tuple',
+## # 'ASTType' : 'pyparser.STType',
+## # 'sequence2st' : 'pyparser.sequence2st',
+## #'eval_input' : 'pyparser.eval_input',
+## #'file_input' : 'pyparser.file_input',
+## #'compileast' : 'pyparser.compileast',
+## #'st2tuple' : 'pyparser.st2tuple',
+## #'st2list' : 'pyparser.st2list',
+## #'issuite' : 'pyparser.issuite',
+## #'ast2tuple' : 'pyparser.ast2tuple',
+## #'tuple2st' : 'pyparser.tuple2st',
+## #'isexpr' : 'pyparser.isexpr',
+## #'ast2list' : 'pyparser.ast2list',
+## #'sequence2ast' : 'pyparser.sequence2ast',
+## #'tuple2ast' : 'pyparser.tuple2ast',
+## #'_pickler' : 'pyparser._pickler',
+## #'compilest' : 'pyparser.compilest',
+ }
Modified: pypy/branch/dist-2.4.1/pypy/module/recparser/compat.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/module/recparser/compat.py (original)
+++ pypy/branch/dist-2.4.1/pypy/module/recparser/compat.py Sun Jul 3 20:36:05 2005
@@ -1,16 +1,28 @@
"""Compatibility layer for CPython's parser module"""
from pythonparse import parse_python_source
-from pypy.module.recparser import PYTHON_PARSER
+from pythonutil import PYTHON_PARSER
from compiler import transformer, compile as pycompile
-
+
def suite( source ):
- builder = parse_python_source( source, PYTHON_PARSER, "file_input" )
- return builder.stack[-1]
+ strings = [line+'\n' for line in source.split('\n')]
+ builder = parse_python_source( strings, PYTHON_PARSER, "file_input" )
+ nested_tuples = builder.stack[-1].as_tuple()
+ if builder.source_encoding is not None:
+ return (symbol.encoding_decl, nested_tuples, builder.source_encoding)
+ else:
+ return (None, nested_tuples, None)
+ return nested_tuples
def expr( source ):
- builder = parse_python_source( source, PYTHON_PARSER, "eval_input" )
- return builder.stack[-1]
+ strings = [line+'\n' for line in source.split('\n')]
+ builder = parse_python_source( strings, PYTHON_PARSER, "eval_input" )
+ nested_tuples = builder.stack[-1].as_tuple()
+ if builder.source_encoding is not None:
+ return (symbol.encoding_decl, nested_tuples, builder.source_encoding)
+ else:
+ return (None, nested_tuples, None)
+ return nested_tuples
def ast2tuple(node, line_info=False):
"""Quick dummy implementation of parser.ast2tuple(tree) function"""
Deleted: /pypy/branch/dist-2.4.1/pypy/module/recparser/ebnflexer.py
==============================================================================
--- /pypy/branch/dist-2.4.1/pypy/module/recparser/ebnflexer.py Sun Jul 3 20:36:05 2005
+++ (empty file)
@@ -1,71 +0,0 @@
-"""This is a lexer for a Python recursive descent parser
-it obeys the TokenSource interface defined for the grammar
-analyser in grammar.py
-"""
-
-import re
-from grammar import TokenSource
-
-DEBUG = False
-
-## Lexer for Python's grammar ########################################
-g_symdef = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*:",re.M)
-g_symbol = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*",re.M)
-g_string = re.compile(r"'[^']+'",re.M)
-g_tok = re.compile(r"\[|\]|\(|\)|\*|\+|\|",re.M)
-g_skip = re.compile(r"\s*(#.*$)?",re.M)
-
-class GrammarSource(TokenSource):
- """The grammar tokenizer"""
- def __init__(self, inpstring ):
- TokenSource.__init__(self)
- self.input = inpstring
- self.pos = 0
-
- def context(self):
- return self.pos
-
- def offset(self, ctx=None):
- if ctx is None:
- return self.pos
- else:
- assert type(ctx)==int
- return ctx
-
- def restore(self, ctx ):
- self.pos = ctx
-
- def next(self):
- pos = self.pos
- inp = self.input
- m = g_skip.match(inp, pos)
- while m and pos!=m.end():
- pos = m.end()
- if pos==len(inp):
- self.pos = pos
- return None, None
- m = g_skip.match(inp, pos)
- m = g_symdef.match(inp,pos)
- if m:
- tk = m.group(0)
- self.pos = m.end()
- return 'SYMDEF',tk[:-1]
- m = g_tok.match(inp,pos)
- if m:
- tk = m.group(0)
- self.pos = m.end()
- return tk,tk
- m = g_string.match(inp,pos)
- if m:
- tk = m.group(0)
- self.pos = m.end()
- return 'STRING',tk[1:-1]
- m = g_symbol.match(inp,pos)
- if m:
- tk = m.group(0)
- self.pos = m.end()
- return 'SYMBOL',tk
- raise ValueError("Unknown token at pos=%d context='%s'" % (pos,inp[pos:pos+20]) )
-
- def debug(self):
- return self.input[self.pos:self.pos+20]
Deleted: /pypy/branch/dist-2.4.1/pypy/module/recparser/ebnfparse.py
==============================================================================
--- /pypy/branch/dist-2.4.1/pypy/module/recparser/ebnfparse.py Sun Jul 3 20:36:05 2005
+++ (empty file)
@@ -1,253 +0,0 @@
-#!/usr/bin/env python
-from grammar import BaseGrammarBuilder, Alternative, Sequence, Token, \
- KleenStar, GrammarElement
-from ebnflexer import GrammarSource
-
-import re
-py_name = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*", re.M)
-
-punct=['>=', '<>', '!=', '<', '>', '<=', '==', '\\*=',
- '//=', '%=', '^=', '<<=', '\\*\\*=', '\\', '=',
- '\\+=', '>>=', '=', '&=', '/=', '-=', '\n,', '^', '>>', '&', '\\+', '\\*', '-', '/', '\\.', '\\*\\*', '%', '<<', '//', '\\', '', '\n\\)', '\\(', ';', ':', '@', '\\[', '\\]', '`', '\\{', '\\}']
-
-py_punct = re.compile(r"""
->=|<>|!=|<|>|<=|==|~|
-\*=|//=|%=|\^=|<<=|\*\*=|\|=|\+=|>>=|=|&=|/=|-=|
-,|\^|>>|&|\+|\*|-|/|\.|\*\*|%|<<|//|\||
-\)|\(|;|:|@|\[|\]|`|\{|\}
-""", re.M | re.X)
-
-
-TERMINALS = [
- 'NAME', 'NUMBER', 'STRING', 'NEWLINE', 'ENDMARKER',
- 'INDENT', 'DEDENT' ]
-
-
-## Grammar Visitors ##################################################
-# FIXME: parsertools.py ? parser/__init__.py ?
-
-class NameToken(Token):
- """A token that is not a keyword"""
- def __init__(self, keywords=None ):
- Token.__init__(self, "NAME")
- self.keywords = keywords
-
- def match(self, source, builder):
- """Matches a token.
- the default implementation is to match any token whose type
- corresponds to the object's name. You can extend Token
- to match anything returned from the lexer. for exemple
- type, value = source.next()
- if type=="integer" and int(value)>=0:
- # found
- else:
- # error unknown or negative integer
- """
- ctx = source.context()
- tk_type, tk_value = source.next()
- if tk_type==self.name:
- if tk_value not in self.keywords:
- ret = builder.token( tk_type, tk_value, source )
- return self.debug_return( ret, tk_type, tk_value )
- source.restore( ctx )
- return None
-
-
-class EBNFVisitor(object):
- def __init__(self):
- self.rules = {}
- self.terminals = {}
- self.current_rule = None
- self.current_subrule = 0
- self.tokens = {}
- self.items = []
- self.terminals['NAME'] = NameToken()
-
- def new_name( self ):
- rule_name = ":%s_%s" % (self.current_rule, self.current_subrule)
- self.current_subrule += 1
- return rule_name
-
- def new_item( self, itm ):
- self.items.append( itm )
- return itm
-
- def visit_grammar( self, node ):
- # print "Grammar:"
- for rule in node.nodes:
- rule.visit(self)
- # the rules are registered already
- # we do a pass through the variables to detect
- # terminal symbols from non terminals
- for r in self.items:
- for i,a in enumerate(r.args):
- if a.name in self.rules:
- assert isinstance(a,Token)
- r.args[i] = self.rules[a.name]
- if a.name in self.terminals:
- del self.terminals[a.name]
- # XXX .keywords also contains punctuations
- self.terminals['NAME'].keywords = self.tokens.keys()
-
- def visit_rule( self, node ):
- symdef = node.nodes[0].value
- self.current_rule = symdef
- self.current_subrule = 0
- alt = node.nodes[1]
- rule = alt.visit(self)
- if not isinstance( rule, Token ):
- rule.name = symdef
- self.rules[symdef] = rule
-
- def visit_alternative( self, node ):
- items = [ node.nodes[0].visit(self) ]
- items+= node.nodes[1].visit(self)
- if len(items)==1 and items[0].name.startswith(':'):
- return items[0]
- alt = Alternative( self.new_name(), *items )
- return self.new_item( alt )
-
- def visit_sequence( self, node ):
- """ """
- items = []
- for n in node.nodes:
- items.append( n.visit(self) )
- if len(items)==1:
- return items[0]
- elif len(items)>1:
- return self.new_item( Sequence( self.new_name(), *items) )
- raise SyntaxError("Found empty sequence")
-
- def visit_sequence_cont( self, node ):
- """Returns a list of sequences (possibly empty)"""
- return [n.visit(self) for n in node.nodes]
-## L = []
-## for n in node.nodes:
-## L.append( n.visit(self) )
-## return L
-
- def visit_seq_cont_list(self, node):
- return node.nodes[1].visit(self)
-
-
- def visit_symbol(self, node):
- star_opt = node.nodes[1]
- sym = node.nodes[0].value
- terminal = self.terminals.get( sym )
- if not terminal:
- terminal = Token( sym )
- self.terminals[sym] = terminal
-
- return self.repeat( star_opt, terminal )
-
- def visit_option( self, node ):
- rule = node.nodes[1].visit(self)
- return self.new_item( KleenStar( self.new_name(), 0, 1, rule ) )
-
- def visit_group( self, node ):
- rule = node.nodes[1].visit(self)
- return self.repeat( node.nodes[3], rule )
-
- def visit_STRING( self, node ):
- value = node.value
- tok = self.tokens.get(value)
- if not tok:
- if py_punct.match( value ):
- tok = Token( value )
- elif py_name.match( value ):
- tok = Token('NAME', value)
- else:
- raise SyntaxError("Unknown STRING value ('%s')" % value )
- self.tokens[value] = tok
- return tok
-
- def visit_sequence_alt( self, node ):
- res = node.nodes[0].visit(self)
- assert isinstance( res, GrammarElement )
- return res
-
- def repeat( self, star_opt, myrule ):
- if star_opt.nodes:
- rule_name = self.new_name()
- tok = star_opt.nodes[0].nodes[0]
- if tok.value == '+':
- return self.new_item( KleenStar( rule_name, _min=1, rule = myrule ) )
- elif tok.value == '*':
- return self.new_item( KleenStar( rule_name, _min=0, rule = myrule ) )
- else:
- raise SyntaxError("Got symbol star_opt with value='%s'" % tok.value )
- return myrule
-
-
-def grammar_grammar():
- """Builds the grammar for the grammar file
-
- Here's the description of the grammar's grammar ::
-
- grammar: rule+
- rule: SYMDEF alternative
-
- alternative: sequence ( '|' sequence )+
- star: '*' | '+'
- sequence: (SYMBOL star? | STRING | option | group star? )+
- option: '[' alternative ']'
- group: '(' alternative ')' star?
- """
- # star: '*' | '+'
- star = Alternative( "star", Token('*'), Token('+') )
- star_opt = KleenStar ( "star_opt", 0, 1, rule=star )
-
- # rule: SYMBOL ':' alternative
- symbol = Sequence( "symbol", Token('SYMBOL'), star_opt )
- symboldef = Token( "SYMDEF" )
- alternative = Sequence( "alternative" )
- rule = Sequence( "rule", symboldef, alternative )
-
- # grammar: rule+
- grammar = KleenStar( "grammar", _min=1, rule=rule )
-
- # alternative: sequence ( '|' sequence )*
- sequence = KleenStar( "sequence", 1 )
- seq_cont_list = Sequence( "seq_cont_list", Token('|'), sequence )
- sequence_cont = KleenStar( "sequence_cont",0, rule=seq_cont_list )
-
- alternative.args = [ sequence, sequence_cont ]
-
- # option: '[' alternative ']'
- option = Sequence( "option", Token('['), alternative, Token(']') )
-
- # group: '(' alternative ')'
- group = Sequence( "group", Token('('), alternative, Token(')'), star_opt )
-
- # sequence: (SYMBOL | STRING | option | group )+
- string = Token('STRING')
- alt = Alternative( "sequence_alt", symbol, string, option, group )
- sequence.args = [ alt ]
-
- return grammar
-
-
-def parse_grammar(stream):
- """parses the grammar file
-
- stream : file-like object representing the grammar to parse
- """
- source = GrammarSource(stream.read())
- rule = grammar_grammar()
- builder = BaseGrammarBuilder()
- result = rule.match(source, builder)
- node = builder.stack[-1]
- vis = EBNFVisitor()
- node.visit(vis)
- return vis
-
-
-from pprint import pprint
-if __name__ == "__main__":
- grambuild = parse_grammar(file('../python/Grammar'))
- for i,r in enumerate(grambuild.items):
- print "% 3d : %s" % (i, r)
- pprint(grambuild.terminals.keys())
- pprint(grambuild.tokens)
- print "|".join(grambuild.tokens.keys() )
-
Deleted: /pypy/branch/dist-2.4.1/pypy/module/recparser/grammar.py
==============================================================================
--- /pypy/branch/dist-2.4.1/pypy/module/recparser/grammar.py Sun Jul 3 20:36:05 2005
+++ (empty file)
@@ -1,322 +0,0 @@
-"""
-a generic recursive descent parser
-the grammar is defined as a composition of objects
-the objects of the grammar are :
-Alternative : as in S -> A | B | C
-Sequence : as in S -> A B C
-KleenStar : as in S -> A* or S -> A+
-Token : a lexer token
-"""
-
-DEBUG = 0
-
-#### Abstract interface for a lexer/tokenizer
-class TokenSource(object):
- """Abstract base class for a source tokenizer"""
- def context(self):
- """Returns a context to restore the state of the object later"""
-
- def restore(self, ctx):
- """Restore the context"""
-
- def next(self):
- """Returns the next token from the source
- a token is a tuple : (type,value) or (None,None) if the end of the
- source has been found
- """
-
- def offset(self, ctx=None):
- """Returns the position we're at so far in the source
- optionnally provide a context and you'll get the offset
- of the context"""
- return -1
-
- def current_line(self):
- """Returns the current line number"""
- return 0
-
-
-######################################################################
-
-from syntaxtree import SyntaxNode, TempSyntaxNode, TokenNode
-
-class BaseGrammarBuilder(object):
- """Base/default class for a builder"""
- def __init__( self, rules=None, debug=0):
- self.rules = rules or {} # a dictionary of grammar rules for debug/reference
- self.debug = debug
- self.stack = []
-
- def context(self):
- """Returns the state of the builder to be restored later"""
- #print "Save Stack:", self.stack
- return len(self.stack)
-
- def restore(self, ctx):
- del self.stack[ctx:]
- #print "Restore Stack:", self.stack
-
- def alternative(self, rule, source):
- # Do nothing, keep rule on top of the stack
- if rule.is_root():
- elems = self.stack[-1].expand()
- self.stack[-1] = SyntaxNode(rule.name, source, *elems)
- if self.debug:
- self.stack[-1].dumpstr()
- return True
-
- def sequence(self, rule, source, elts_number):
- """ """
- items = []
- for node in self.stack[-elts_number:]:
- items += node.expand()
- if rule.is_root():
- node_type = SyntaxNode
- else:
- node_type = TempSyntaxNode
- # replace N elements with 1 element regrouping them
- if elts_number >= 1:
- elem = node_type(rule.name, source, *items)
- del self.stack[-elts_number:]
- self.stack.append(elem)
- elif elts_number == 0:
- self.stack.append(node_type(rule.name, source))
- if self.debug:
- self.stack[-1].dumpstr()
- return True
-
- def token(self, name, value, source):
- self.stack.append(TokenNode(name, source, value))
- if self.debug:
- self.stack[-1].dumpstr()
- return True
-
-
-######################################################################
-# Grammar Elements Classes (Alternative, Sequence, KleenStar, Token) #
-######################################################################
-class GrammarElement(object):
- """Base parser class"""
- def __init__(self, name):
- # the rule name
- self.name = name
- self.args = []
- self._is_root = False
-
- def is_root(self):
- """This is a root node of the grammar, that is one that will
- be included in the syntax tree"""
- if self.name!=":" and self.name.startswith(":"):
- return False
- return True
-
- def match(self, source, builder):
- """Try to match a grammar rule
-
- If next set of tokens matches this grammar element, use <builder>
- to build an appropriate object, otherwise returns None.
-
- /!\ If the sets of element didn't match the current grammar
- element, then the <source> is restored as it was before the
- call to the match() method
-
- returns None if no match or an object build by builder
- """
- return None
-
- def parse(self, source):
- """Returns a simplified grammar if the rule matched at the source
- current context or None"""
- # **NOT USED** **NOT IMPLEMENTED**
- # To consider if we need to improve speed in parsing
- pass
-
- def first_set(self):
- """Returns a list of possible tokens that can start this rule
- None means the rule can be empty
- """
- # **NOT USED** **NOT IMPLEMENTED**
- # To consider if we need to improve speed in parsing
- pass
-
- def __str__(self):
- return self.display(0)
-
- def __repr__(self):
- return self.display(0)
-
- def display(self, level):
- """Helper function used to represent the grammar.
- mostly used for debugging the grammar itself"""
- return "GrammarElement"
-
-
- def debug_return(self, ret, *args ):
- # FIXME: use a wrapper of match() methods instead of debug_return()
- # to prevent additional indirection
- if ret and DEBUG>0:
- sargs = ",".join( [ str(i) for i in args ] )
- print "matched %s (%s): %s" % (self.__class__.__name__, sargs, self.display() )
- return ret
-
-class Alternative(GrammarElement):
- """Represents an alternative in a grammar rule (as in S -> A | B | C)"""
- def __init__(self, name, *args):
- GrammarElement.__init__(self, name )
- self.args = list(args)
- for i in self.args:
- assert isinstance( i, GrammarElement )
-
- def match(self, source, builder):
- """If any of the rules in self.args matches
- returns the object built from the first rules that matches
- """
- if DEBUG>1:
- print "try alt:", self.display()
- # Here we stop at the first match we should
- # try instead to get the longest alternative
- # to see if this solve our problems with infinite recursion
- for rule in self.args:
- m = rule.match( source, builder )
- if m:
- ret = builder.alternative( self, source )
- return self.debug_return( ret )
- return False
-
- def display(self, level=0):
- if level==0:
- name = self.name + " -> "
- elif not self.name.startswith(":"):
- return self.name
- else:
- name = ""
- items = [ a.display(1) for a in self.args ]
- return name+"(" + "|".join( items ) + ")"
-
-
-class Sequence(GrammarElement):
- """Reprensents a Sequence in a grammar rule (as in S -> A B C)"""
- def __init__(self, name, *args):
- GrammarElement.__init__(self, name )
- self.args = list(args)
- for i in self.args:
- assert isinstance( i, GrammarElement )
-
- def match(self, source, builder):
- """matches all of the symbols in order"""
- if DEBUG>1:
- print "try seq:", self.display()
- ctx = source.context()
- bctx = builder.context()
- for rule in self.args:
- m = rule.match(source, builder)
- if not m:
- # Restore needed because some rules may have been matched
- # before the one that failed
- source.restore(ctx)
- builder.restore(bctx)
- return None
- ret = builder.sequence(self, source, len(self.args))
- return self.debug_return( ret )
-
- def display(self, level=0):
- if level == 0:
- name = self.name + " -> "
- elif not self.name.startswith(":"):
- return self.name
- else:
- name = ""
- items = [a.display(1) for a in self.args]
- return name + "(" + " ".join( items ) + ")"
-
-class KleenStar(GrammarElement):
- """Represents a KleenStar in a grammar rule as in (S -> A+) or (S -> A*)"""
- def __init__(self, name, _min = 0, _max = -1, rule=None):
- GrammarElement.__init__( self, name )
- self.args = [rule]
- self.min = _min
- if _max == 0:
- raise ValueError("KleenStar needs max==-1 or max>1")
- self.max = _max
- self.star = "x"
-
- def match(self, source, builder):
- """matches a number of times self.args[0]. the number must be comprised
- between self._min and self._max inclusive. -1 is used to represent infinity"""
- if DEBUG>1:
- print "try kle:", self.display()
- ctx = source.context()
- bctx = builder.context()
- rules = 0
- rule = self.args[0]
- while True:
- m = rule.match(source, builder)
- if not m:
- # Rule should be matched at least 'min' times
- if rules<self.min:
- source.restore(ctx)
- builder.restore(bctx)
- return None
- ret = builder.sequence(self, source, rules)
- return self.debug_return( ret, rules )
- rules += 1
- if self.max>0 and rules == self.max:
- ret = builder.sequence(self, source, rules)
- return self.debug_return( ret, rules )
-
- def display(self, level=0):
- if level==0:
- name = self.name + " -> "
- elif not self.name.startswith(":"):
- return self.name
- else:
- name = ""
- star = "{%d,%d}" % (self.min,self.max)
- if self.min==0 and self.max==1:
- star = "?"
- elif self.min==0 and self.max==-1:
- star = "*"
- elif self.min==1 and self.max==-1:
- star = "+"
- s = self.args[0].display(1)
- return name + "%s%s" % (s, star)
-
-
-class Token(GrammarElement):
- """Represents a Token in a grammar rule (a lexer token)"""
- def __init__( self, name, value = None):
- GrammarElement.__init__( self, name )
- self.value = value
-
- def match(self, source, builder):
- """Matches a token.
- the default implementation is to match any token whose type
- corresponds to the object's name. You can extend Token
- to match anything returned from the lexer. for exemple
- type, value = source.next()
- if type=="integer" and int(value)>=0:
- # found
- else:
- # error unknown or negative integer
- """
- ctx = source.context()
- tk_type, tk_value = source.next()
- if tk_type==self.name:
- if self.value is None:
- ret = builder.token( tk_type, tk_value, source )
- return self.debug_return( ret, tk_type )
- elif self.value == tk_value:
- ret = builder.token( tk_type, tk_value, source )
- return self.debug_return( ret, tk_type, tk_value )
- if DEBUG>1:
- print "tried tok:", self.display()
- source.restore( ctx )
- return None
-
- def display(self, level=0):
- if self.value is None:
- return "<%s>" % self.name
- else:
- return "<%s>=='%s'" % (self.name, self.value)
-
-
Modified: pypy/branch/dist-2.4.1/pypy/module/recparser/pyparser.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/module/recparser/pyparser.py (original)
+++ pypy/branch/dist-2.4.1/pypy/module/recparser/pyparser.py Sun Jul 3 20:36:05 2005
@@ -7,9 +7,9 @@
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.typedef import interp_attrproperty, GetSetProperty
from pypy.interpreter.pycode import PyCode
-from syntaxtree import SyntaxNode
-from pythonparse import parse_python_source
-from pypy.module.recparser import PYTHON_PARSER
+from pypy.interpreter.pyparser.syntaxtree import SyntaxNode
+from pypy.interpreter.pyparser.pythonparse import parse_python_source
+from pypy.interpreter.pyparser.pythonutil import PYTHON_PARSER
__all__ = [ "ASTType", "STType", "suite", "expr" ]
@@ -100,20 +100,24 @@
)
def suite( space, source ):
- builder = parse_python_source( source, PYTHON_PARSER, "file_input" )
+ # make the annotator life easier (don't use str.splitlines())
+ strings = [line + '\n' for line in source.split('\n')]
+ builder = parse_python_source( strings, PYTHON_PARSER, "file_input" )
return space.wrap( STType(space, builder.stack[-1]) )
suite.unwrap_spec = [ObjSpace, str]
def expr( space, source ):
- builder = parse_python_source( source, PYTHON_PARSER, "eval_input" )
+ # make the annotator life easier (don't use str.splitlines())
+ strings = [line + '\n' for line in source.split('\n')]
+ builder = parse_python_source( strings, PYTHON_PARSER, "eval_input" )
return space.wrap( STType(space, builder.stack[-1]) )
expr.unwrap_spec = [ObjSpace, str]
-def ast2tuple(space, node, line_info=False):
+def ast2tuple(space, node, line_info=0):
"""Quick dummy implementation of parser.ast2tuple(tree) function"""
tuples = node.totuple(line_info)
return space.wrap(tuples)
-ast2tuple.unwrap_spec = [ObjSpace, STType, bool]
+ast2tuple.unwrap_spec = [ObjSpace, STType, int]
Deleted: /pypy/branch/dist-2.4.1/pypy/module/recparser/pythonlexer.py
==============================================================================
--- /pypy/branch/dist-2.4.1/pypy/module/recparser/pythonlexer.py Sun Jul 3 20:36:05 2005
+++ (empty file)
@@ -1,411 +0,0 @@
-"""This is a lexer for a Python recursive descent parser
-it obeys the TokenSource interface defined for the grammar
-analyser in grammar.py
-"""
-
-from grammar import TokenSource
-
-DEBUG = False
-import re
-
-KEYWORDS = [
- 'and', 'assert', 'break', 'class', 'continue', 'def', 'del',
- 'elif', 'if', 'import', 'in', 'is', 'finally', 'for', 'from',
- 'global', 'else', 'except', 'exec', 'lambda', 'not', 'or',
- 'pass', 'print', 'raise', 'return', 'try', 'while', 'yield'
- ]
-
-py_keywords = re.compile(r'(%s)$' % ('|'.join(KEYWORDS)), re.M | re.X)
-
-py_punct = re.compile(r"""
-<>|!=|==|~|
-<=|<<=|<<|<|
->=|>>=|>>|>|
-\*=|\*\*=|\*\*|\*|
-//=|/=|//|/|
-%=|\^=|\|=|\+=|=|&=|-=|
-,|\^|&|\+|-|\.|%|\||
-\)|\(|;|:|@|\[|\]|`|\{|\}
-""", re.M | re.X)
-
-g_symdef = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*:", re.M)
-g_string = re.compile(r"'[^']+'", re.M)
-py_name = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*", re.M)
-py_comment = re.compile(r"#.*$|[ \t\014]*$", re.M)
-py_ws = re.compile(r" *", re.M)
-py_skip = re.compile(r"[ \t\014]*(#.*$)?", re.M)
-py_encoding = re.compile(r"coding[:=]\s*([-\w.]+)")
-# py_number = re.compile(r"0x[0-9a-z]+|[0-9]+l|([0-9]+\.[0-9]*|\.[0-9]+|[0-9]+)(e[+-]?[0-9]+)?j?||[0-9]+", re.I)
-
-# 0x[\da-f]+l matches hexadecimal numbers, possibly defined as long
-# \d+l matches and only matches long integers
-# (\d+\.\d*|\.\d+|\d+)(e[+-]?\d+)?j? matches simple integers,
-# exponential notations and complex
-py_number = re.compile(r"""0x[\da-f]+l?|
-\d+l|
-(\d+\.\d*|\.\d+|\d+)(e[+-]?\d+)?j?
-""", re.I | re.X)
-
-def _normalize_encoding(encoding):
- """returns normalized name for <encoding>
-
- see dist/src/Parser/tokenizer.c 'get_normal_name()'
- for implementation details / reference
-
- NOTE: for now, parser.suite() raises a MemoryError when
- a bad encoding is used. (SF bug #979739)
- """
- # lower() + '_' / '-' conversion
- encoding = encoding.replace('_', '-').lower()
- if encoding.startswith('utf-8'):
- return 'utf-8'
- for variant in ('latin-1', 'iso-latin-1', 'iso-8859-1'):
- if encoding.startswith(variant):
- return 'iso-8859-1'
- return encoding
-
-class PythonSource(TokenSource):
- """The Python tokenizer"""
- def __init__(self, inpstring):
- TokenSource.__init__(self)
- self.input = inpstring
- self.pos = 0
- self.indent = 0
- self.indentstack = [ 0 ]
- self.atbol = True
- self.line = 1
- self._current_line = 1
- self.pendin = 0 # indentation change waiting to be reported
- self.level = 0
- self.linestart = 0
- self.stack = []
- self.stack_pos = 0
- self.comment = ''
- self.encoding = None
-
- def current_line(self):
- return self._current_line
-
- def context(self):
- return self.stack_pos
-
- def restore(self, ctx):
- self.stack_pos = ctx
-
- def offset(self, ctx=None):
- if ctx is None:
- return self.stack_pos
- else:
- assert type(ctx)==int
- return ctx
-
- def _next(self):
- """returns the next token from source"""
- inp = self.input
- pos = self.pos
- input_length = len(inp)
- if pos >= input_length:
- return self.end_of_file()
- # Beginning of line
- if self.atbol:
- self.linestart = pos
- col = 0
- m = py_ws.match(inp, pos)
- pos = m.end()
- col = pos - self.linestart
- self.atbol = False
- # skip blanklines
- m = py_comment.match(inp, pos)
- if m:
- if not self.comment:
- self.comment = m.group(0)
- # <HACK> XXX FIXME: encoding management
- if self.line <= 2:
- # self.comment can be the previous comment, so don't use it
- comment = m.group(0)[1:]
- m_enc = py_encoding.search(comment)
- if m_enc is not None:
- self.encoding = _normalize_encoding(m_enc.group(1))
- # </HACK>
- self.pos = m.end() + 1
- self.line += 1
- self.atbol = True
- return self._next()
- # the current block is more indented than the previous one
- if col > self.indentstack[-1]:
- self.indentstack.append(col)
- return "INDENT", None
- # the current block is less indented than the previous one
- while col < self.indentstack[-1]:
- self.pendin += 1
- self.indentstack.pop(-1)
- if col != self.indentstack[-1]:
- raise SyntaxError("Indentation Error")
- if self.pendin > 0:
- self.pendin -= 1
- return "DEDENT", None
- m = py_skip.match(inp, pos)
- if m.group(0)[-1:] == '\n':
- self.line += 1
- self.comment = m.group(1) or ''
- pos = m.end() # always match
- if pos >= input_length:
- return self.end_of_file()
- self.pos = pos
-
- # STRING
- c = inp[pos]
- if c in ('r','R'):
- if pos < input_length-1 and inp[pos+1] in ("'",'"'):
- return self.next_string(raw=1)
- elif c in ('u','U'):
- if pos < input_length-1:
- if inp[pos+1] in ("r",'R'):
- if pos<input_length-2 and inp[pos+2] in ("'",'"'):
- return self.next_string( raw = 1, uni = 1 )
- elif inp[pos+1] in ( "'", '"' ):
- return self.next_string( uni = 1 )
- elif c in ( '"', "'" ):
- return self.next_string()
-
- # NAME
- m = py_name.match(inp, pos)
- if m:
- self.pos = m.end()
- val = m.group(0)
-# if py_keywords.match(val):
-# return val, None
- return "NAME", val
-
- # NUMBER
- m = py_number.match(inp, pos)
- if m:
- self.pos = m.end()
- return "NUMBER", m.group(0)
-
- # NEWLINE
- if c == '\n':
- self.pos += 1
- self.line += 1
- if self.level > 0:
- return self._next()
- else:
- self.atbol = True
- comment = self.comment
- self.comment = ''
- return "NEWLINE", comment
-
- if c == '\\':
- if pos < input_length-1 and inp[pos+1] == '\n':
- self.pos += 2
- return self._next()
-
- m = py_punct.match(inp, pos)
- if m:
- punct = m.group(0)
- if punct in ( '(', '{', '[' ):
- self.level += 1
- if punct in ( ')', '}', ']' ):
- self.level -= 1
- self.pos = m.end()
- return punct, None
- raise SyntaxError("Unrecognized token '%s'" % inp[pos:pos+20] )
-
- def next(self):
- if self.stack_pos >= len(self.stack):
- tok, val = self._next()
- self.stack.append( (tok, val, self.line) )
- self._current_line = self.line
- else:
- tok,val,line = self.stack[self.stack_pos]
- self._current_line = line
- self.stack_pos += 1
- if DEBUG:
- print "%d/%d: %s, %s" % (self.stack_pos, len(self.stack), tok, val)
- return (tok, val)
-
- def end_of_file(self):
- """return DEDENT and ENDMARKER"""
- if len(self.indentstack) == 1:
- self.indentstack.pop(-1)
- return "NEWLINE", '' #self.comment
- elif len(self.indentstack) > 1:
- self.indentstack.pop(-1)
- return "DEDENT", None
- return "ENDMARKER", None
-
-
- def next_string(self, raw=0, uni=0):
- pos = self.pos + raw + uni
- inp = self.input
- quote = inp[pos]
- qsize = 1
- if inp[pos:pos+3] == 3*quote:
- pos += 3
- quote = 3*quote
- qsize = 3
- else:
- pos += 1
- while True:
- if inp[pos:pos+qsize] == quote:
- s = inp[self.pos:pos+qsize]
- self.pos = pos+qsize
- return "STRING", s
- # FIXME : shouldn't it be inp[pos] == os.linesep ?
- if inp[pos:pos+2] == "\n" and qsize == 1:
- return None, None
- if inp[pos] == "\\":
- pos += 1
- pos += 1
-
- def debug(self):
- """return context for debug information"""
- if not hasattr(self, '_lines'):
- # split lines only once
- self._lines = self.input.splitlines()
- if self.line > len(self._lines):
- lineno = len(self._lines)
- else:
- lineno = self.line
- return 'line %s : %s' % (lineno, self._lines[lineno-1])
-
- ## ONLY refactor ideas ###########################################
-## def _mynext(self):
-## """returns the next token from source"""
-## inp = self.input
-## pos = self.pos
-## input_length = len(inp)
-## if pos >= input_length:
-## return self.end_of_file()
-## # Beginning of line
-## if self.atbol:
-## self.linestart = pos
-## col = 0
-## m = py_ws.match(inp, pos)
-## pos = m.end()
-## col = pos - self.linestart
-## self.atbol = False
-## # skip blanklines
-## m = py_comment.match(inp, pos)
-## if m:
-## self.pos = m.end() + 1
-## self.line += 1
-## self.atbol = True
-## return self._next()
-## # the current block is more indented than the previous one
-## if col > self.indentstack[-1]:
-## self.indentstack.append(col)
-## return "INDENT", None
-## # the current block is less indented than the previous one
-## while col < self.indentstack[-1]:
-## self.pendin += 1
-## self.indentstack.pop(-1)
-## if col != self.indentstack[-1]:
-## raise SyntaxError("Indentation Error")
-## if self.pendin > 0:
-## self.pendin -= 1
-## return "DEDENT", None
-## m = py_skip.match(inp, pos)
-## if m.group(0)[-1:] == '\n':
-## self.line += 1
-## pos = m.end() # always match
-## if pos >= input_length:
-## return self.end_of_file()
-## self.pos = pos
-
-## c = inp[pos]
-## chain = (self._check_string, self._check_name, self._check_number,
-## self._check_newline, self._check_backslash, self._check_punct)
-## for check_meth in chain:
-## token_val_pair = check_meth(c, pos)
-## if token_val_pair is not None:
-## return token_val_pair
-
-
-## def _check_string(self, c, pos):
-## inp = self.input
-## input_length = len(inp)
-## # STRING
-## if c in ('r', 'R'):
-## if pos < input_length-1 and inp[pos+1] in ("'",'"'):
-## return self.next_string(raw=1)
-## elif c in ('u','U'):
-## if pos < input_length - 1:
-## if inp[pos+1] in ("r", 'R'):
-## if pos<input_length-2 and inp[pos+2] in ("'",'"'):
-## return self.next_string(raw = 1, uni = 1)
-## elif inp[pos+1] in ( "'", '"' ):
-## return self.next_string(uni = 1)
-## elif c in ( '"', "'" ):
-## return self.next_string()
-## return None
-
-## def _check_name(self, c, pos):
-## inp = self.input
-## # NAME
-## m = py_name.match(inp, pos)
-## if m:
-## self.pos = m.end()
-## val = m.group(0)
-## if py_keywords.match(val):
-## return val, None
-## return "NAME", val
-## return None
-
-## def _check_number(self, c, pos):
-## inp = self.input
-## # NUMBER
-## m = py_number.match(inp, pos)
-## if m:
-## self.pos = m.end()
-## return "NUMBER", m.group(0)
-## return None
-
-## def _check_newline(self, c, pos):
-## # NEWLINE
-## if c == '\n':
-## self.pos += 1
-## self.line += 1
-## if self.level > 0:
-## return self._next()
-## else:
-## self.atbol = True
-## return "NEWLINE", None
-## return None
-
-## def _check_backslash(self, c, pos):
-## inp = self.input
-## input_length = len(inp)
-## if c == '\\':
-## if pos < input_length-1 and inp[pos+1] == '\n':
-## self.pos += 2
-## return self._next()
-## return None
-
-## def _check_punct(self, c, pos):
-## inp = self.input
-## input_length = len(inp)
-## m = py_punct.match(inp, pos)
-## if m:
-## punct = m.group(0)
-## if punct in ( '(', '{' ):
-## self.level += 1
-## if punct in ( ')', '}' ):
-## self.level -= 1
-## self.pos = m.end()
-## return punct, None
-## raise SyntaxError("Unrecognized token '%s'" % inp[pos:pos+20] )
-
-
-
-def tokenize_file(filename):
- f = file(filename).read()
- src = PythonSource(f)
- token = src.next()
- while token!=("ENDMARKER",None) and token!=(None,None):
- print token
- token = src.next()
-
-if __name__ == '__main__':
- import sys
- tokenize_file(sys.argv[1])
Deleted: /pypy/branch/dist-2.4.1/pypy/module/recparser/pythonparse.py
==============================================================================
--- /pypy/branch/dist-2.4.1/pypy/module/recparser/pythonparse.py Sun Jul 3 20:36:05 2005
+++ (empty file)
@@ -1,65 +0,0 @@
-#!/usr/bin/env python
-from grammar import BaseGrammarBuilder
-from pythonlexer import PythonSource
-from ebnfparse import parse_grammar
-import sys
-import pythonutil
-import symbol
-
-def parse_python_source( textsrc, gram, goal ):
- """Parse a python source according to goal"""
- target = gram.rules[goal]
- src = PythonSource(textsrc)
- builder = BaseGrammarBuilder(debug=False, rules=gram.rules)
- result = target.match(src, builder)
- # <HACK> XXX find a clean way to process encoding declarations
- if src.encoding:
- builder._source_encoding = src.encoding
- # </HACK>
- if not result:
- raise SyntaxError("at %s" % src.debug() )
- return builder
-
-def parse_file_input(pyf, gram):
- """Parse a python file"""
- return parse_python_source( pyf.read(), gram, "file_input" )
-
-def parse_single_input(textsrc, gram):
- """Parse a python file"""
- return parse_python_source( textsrc, gram, "single_input" )
-
-def parse_eval_input(textsrc, gram):
- """Parse a python file"""
- return parse_python_source( textsrc, gram, "eval_input" )
-
-def pypy_parse(filename):
- """parse <filename> using PyPy's parser module and return nested tuples
- """
- pyf = file(filename)
- builder = parse_file_input(pyf, pythonutil.python_grammar())
- pyf.close()
- if builder.stack:
- # print builder.stack[-1]
- root_node = builder.stack[-1]
- nested_tuples = root_node.totuple()
- if hasattr(builder, '_source_encoding'):
- # XXX: maybe the parser could fix that instead ?
- return ( symbol.encoding_decl, nested_tuples, builder._source_encoding)
- else:
- return nested_tuples
- return None # XXX raise an exception instead
-
-if __name__ == "__main__":
- if len(sys.argv) < 2:
- print "python parse.py [-d N] test_file.py"
- sys.exit(1)
- if sys.argv[1] == "-d":
- debug_level = int(sys.argv[2])
- test_file = sys.argv[3]
- else:
- test_file = sys.argv[1]
- print "-"*20
- print
- print "pyparse \n", pypy_parse(test_file)
- print "parser \n", pythonutil.python_parse(test_file)
-
Deleted: /pypy/branch/dist-2.4.1/pypy/module/recparser/pythonutil.py
==============================================================================
--- /pypy/branch/dist-2.4.1/pypy/module/recparser/pythonutil.py Sun Jul 3 20:36:05 2005
+++ (empty file)
@@ -1,35 +0,0 @@
-__all__ = ["python_grammar", "PYTHON_GRAMMAR" ]
-
-import os
-import sys
-
-_ver = ".".join([str(i) for i in sys.version_info[:2]])
-PYTHON_GRAMMAR = os.path.join( os.path.dirname(__file__), "data", "Grammar" + _ver )
-
-def python_grammar():
- """returns a """
- from ebnfparse import parse_grammar
- level = get_debug()
- set_debug( 0 )
- gram = parse_grammar( file(PYTHON_GRAMMAR) )
- set_debug( level )
- return gram
-
-def get_debug():
- """Return debug level"""
- import grammar
- return grammar.DEBUG
-
-def set_debug( level ):
- """sets debug mode to <level>"""
- import grammar
- grammar.DEBUG = level
-
-
-def python_parse(filename):
- """parse <filename> using CPython's parser module and return nested tuples
- """
- pyf = file(filename)
- import parser
- tp2 = parser.suite(pyf.read())
- return tp2.totuple()
Deleted: /pypy/branch/dist-2.4.1/pypy/module/recparser/syntaxtree.py
==============================================================================
--- /pypy/branch/dist-2.4.1/pypy/module/recparser/syntaxtree.py Sun Jul 3 20:36:05 2005
+++ (empty file)
@@ -1,160 +0,0 @@
-import symbol
-import token
-
-TOKEN_MAP = {
- "STRING" : token.STRING,
- "NUMBER" : token.NUMBER,
- "NAME" : token.NAME,
- "NEWLINE" : token.NEWLINE,
- "DEDENT" : token.DEDENT,
- "ENDMARKER" : token.ENDMARKER,
- "INDENT" : token.INDENT,
- "NEWLINE" : token.NEWLINE,
- "NT_OFFSET" : token.NT_OFFSET,
- "N_TOKENS" : token.N_TOKENS,
- "OP" : token.OP,
- "?ERRORTOKEN" : token.ERRORTOKEN,
- "&" : token.AMPER,
- "&=" : token.AMPEREQUAL,
- "`" : token.BACKQUOTE,
- "^" : token.CIRCUMFLEX,
- "^=" : token.CIRCUMFLEXEQUAL,
- ":" : token.COLON,
- "," : token.COMMA,
- "." : token.DOT,
- "//" : token.DOUBLESLASH,
- "//=" : token.DOUBLESLASHEQUAL,
- "**" : token.DOUBLESTAR,
- "**=" : token.DOUBLESTAREQUAL,
- "==" : token.EQEQUAL,
- "=" : token.EQUAL,
- ">" : token.GREATER,
- ">=" : token.GREATEREQUAL,
- "{" : token.LBRACE,
- "}" : token.RBRACE,
- "<<" : token.LEFTSHIFT,
- "<<=" : token.LEFTSHIFTEQUAL,
- "<" : token.LESS,
- "<=" : token.LESSEQUAL,
- "(" : token.LPAR,
- "[" : token.LSQB,
- "-=" : token.MINEQUAL,
- "-" : token.MINUS,
- "!=" : token.NOTEQUAL,
- "<>" : token.NOTEQUAL,
- "%" : token.PERCENT,
- "%=" : token.PERCENTEQUAL,
- "+" : token.PLUS,
- "+=" : token.PLUSEQUAL,
- ")" : token.RBRACE,
- ">>" : token.RIGHTSHIFT,
- ">>=" : token.RIGHTSHIFTEQUAL,
- ")" : token.RPAR,
- "]" : token.RSQB,
- ";" : token.SEMI,
- "/" : token.SLASH,
- "/=" : token.SLASHEQUAL,
- "*" : token.STAR,
- "*=" : token.STAREQUAL,
- "~" : token.TILDE,
- "|" : token.VBAR,
- "|=" : token.VBAREQUAL,
- }
-
-SYMBOLS = {}
-# copies the numerical mapping between symbol name and symbol value
-# into SYMBOLS
-for k,v in symbol.__dict__.items():
- if type(v)==int:
- SYMBOLS[k] = v
-
-
-class SyntaxNode(object):
- """A syntax node"""
- def __init__(self, name, source, *args):
- self.name = name
- self.nodes = list(args)
- self.lineno = source.current_line()
-
- def dumptree(self, treenodes, indent):
- treenodes.append(self.name)
- if len(self.nodes) > 1:
- treenodes.append(" -> (\n")
- treenodes.append(indent+" ")
- for node in self.nodes:
- node.dumptree(treenodes, indent+" ")
- treenodes.append(")\n")
- treenodes.append(indent)
- elif len(self.nodes) == 1:
- treenodes.append(" ->\n")
- treenodes.append(indent+" ")
- self.nodes[0].dumptree(treenodes, indent+" ")
-
- def dumpstr(self):
- treenodes = []
- self.dumptree(treenodes, "")
- return "".join(treenodes)
-
- def __repr__(self):
- return "<node [%s] at 0x%x>" % (self.name, id(self))
-
- def __str__(self):
- return "(%s)" % self.name
-
- def visit(self, visitor):
- visit_meth = getattr(visitor, "visit_%s" % self.name, None)
- if visit_meth:
- return visit_meth(self)
- # helper function for nodes that have only one subnode:
- if len(self.nodes) == 1:
- return self.nodes[0].visit(visitor)
- raise RuntimeError("Unknonw Visitor for %r" % self.name)
-
- def expand(self):
- return [ self ]
-
- def totuple(self, lineno=False ):
- symvalue = SYMBOLS.get( self.name, (0,self.name) )
- l = [ symvalue ]
- l += [node.totuple(lineno) for node in self.nodes]
- return tuple(l)
-
-
-class TempSyntaxNode(SyntaxNode):
- """A temporary syntax node to represent intermediate rules"""
- def expand(self):
- return self.nodes
-
-class TokenNode(SyntaxNode):
- """A token node"""
- def __init__(self, name, source, value):
- SyntaxNode.__init__(self, name, source)
- self.value = value
-
- def dumptree(self, treenodes, indent):
- if self.value:
- treenodes.append("%s='%s' (%d) " % (self.name, self.value, self.lineno))
- else:
- treenodes.append("'%s' (%d) " % (self.name, self.lineno))
-
- def __repr__(self):
- if self.value is not None:
- return "<%s=%s>" % ( self.name, repr(self.value))
- else:
- return "<%s!>" % (self.name,)
-
- def totuple(self, lineno=False):
- num = TOKEN_MAP.get(self.name, -1)
- if num == -1:
- print "Unknown", self.name, self.value
- if self.value is not None:
- val = self.value
- else:
- if self.name not in ("NEWLINE", "INDENT", "DEDENT", "ENDMARKER"):
- val = self.name
- else:
- val = self.value or ''
- if lineno:
- return (num, val, self.lineno)
- else:
- return (num, val)
Deleted: /pypy/branch/dist-2.4.1/pypy/module/recparser/tuplebuilder.py
==============================================================================
--- /pypy/branch/dist-2.4.1/pypy/module/recparser/tuplebuilder.py Sun Jul 3 20:36:05 2005
+++ (empty file)
@@ -1,61 +0,0 @@
-
-from grammar import BaseGrammarBuilder
-from syntaxtree import TOKEN_MAP, SYMBOLS, NT_OFFSET
-
-
-def _expand_nodes( nodes ):
- expanded = []
- for n in nodes:
- if n[0]==-2:
- expanded.extend( expand_nodes(n[1:]) )
- else:
- expanded.append(n)
- return tuple(expanded)
-
-def expand_nodes( nodes ):
- r = _expand_nodes( nodes )
- for n in nodes:
- assert type(n[0])==int
- return r
-
-class TupleBuilder(BaseGrammarBuilder):
- """A builder that directly produce the AST"""
-
- def __init__( self, rules=None, debug=0, lineno=False ):
- BaseGrammarBuilder.__init__(self, rules, debug )
- self.lineno = True
-
- def alternative( self, rule, source ):
- # Do nothing, keep rule on top of the stack
- if rule.is_root():
- node = [ SYMBOLS.get( rule.name, (0,rule.name) ) ]
- node += expand_nodes( [self.stack[-1]] )
- self.stack[-1] = tuple(node)
- return True
-
- def sequence(self, rule, source, elts_number):
- """ """
- if rule.is_root():
- node = [ SYMBOLS.get( rule.name, (0,rule.name) ) ]
- else:
- node = [ -2 ]
- if elts_number>0:
- node += expand_nodes( self.stack[-elts_number:] )
- self.stack[-elts_number:] = [tuple(node)]
- else:
- self.stack.append( tuple(node) )
- return True
-
- def token(self, name, value, source):
- num = TOKEN_MAP.get( name, -1)
- lineno = source.current_line()
- if value is None:
- if name not in ("NEWLINE", "INDENT", "DEDENT", "ENDMARKER"):
- value = name
- else:
- value = ''
- if self.lineno:
- self.stack.append( (num, value, lineno) )
- else:
- self.stack.append( (num, value) )
- return True
Modified: pypy/branch/dist-2.4.1/pypy/objspace/std/fake.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/objspace/std/fake.py (original)
+++ pypy/branch/dist-2.4.1/pypy/objspace/std/fake.py Sun Jul 3 20:36:05 2005
@@ -142,13 +142,17 @@
self.unwrappedargs = self.space.unwrap(w_args)
self.unwrappedkwds = self.space.unwrap(w_kwds)
except UnwrapError, e:
- raise UnwrapError('calling %s: %s' % (self.code.cpy_callable, e))
+ code = self.code
+ assert isinstance(code, CPythonFakeCode)
+ raise UnwrapError('calling %s: %s' % (code.cpy_callable, e))
def getfastscope(self):
raise OperationError(self.space.w_TypeError,
self.space.wrap("cannot get fastscope of a CPythonFakeFrame"))
def run(self):
- fn = self.code.cpy_callable
+ code = self.code
+ assert isinstance(code, CPythonFakeCode)
+ fn = code.cpy_callable
try:
result = apply(fn, self.unwrappedargs, self.unwrappedkwds)
except:
Modified: pypy/branch/dist-2.4.1/pypy/objspace/std/longobject.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/objspace/std/longobject.py (original)
+++ pypy/branch/dist-2.4.1/pypy/objspace/std/longobject.py Sun Jul 3 20:36:05 2005
@@ -582,7 +582,7 @@
return z
-#Substract the absolute values of two longs
+# Substract the absolute values of two longs
def _x_sub(a, b, space):
size_a = len(a.digits)
size_b = len(b.digits)
@@ -649,8 +649,244 @@
return z
def _inplace_divrem1(pout, pin, n):
- rem = r_uint(0, space)
+ """
+ Divide long pin by non-zero digit n, storing quotient
+ in pout, and returning the remainder. It's OK for pin == pout on entry.
+ """
+ rem = r_uint(0)
assert n > 0 and n <= SHORT_MASK
size = len(pin.digits) * 2 - 1
while size >= 0:
rem = (rem << SHORT_BIT) + pin._getshort(size)
+ hi = rem // n
+ pout._setshort(size, hi)
+ rem -= hi * n
+ size -= 1
+ return rem
+
+def _divrem1(space, a, n):
+ """
+ Divide a long integer by a digit, returning both the quotient
+ and the remainder as a tuple.
+ The sign of a is ignored; n should not be zero.
+ """
+ assert n > 0 and n <= SHORT_MASK
+ size = len(a.digits)
+ z = W_LongObject(space, [r_uint(0)] * size, 1)
+ rem = _inplace_divrem1(z, a, n)
+ z._normalize()
+ return z, rem
+
+def _muladd1(space, a, n, extra):
+ """Multiply by a single digit and add a single digit, ignoring the sign.
+ """
+ digitpairs = len(a.digits)
+ size_a = digitpairs * 2
+ if a._getshort(size_a-1) == 0:
+ size_a -= 1
+ z = W_LongObject(space, [r_uint(0)] * (digitpairs+1), 1)
+ carry = extra
+ for i in range(size_a):
+ carry += a._getshort(i) * n
+ z._setshort(i, carry & SHORT_MASK)
+ carry >>= SHORT_BIT
+ i += 1
+ z._setshort(i, carry)
+ z._normalize()
+ return z
+
+# for the carry in _x_divrem, we need something that can hold
+# two digits plus a sign.
+# for the time being, we here implement such a 33 bit number just
+# for the purpose of the division.
+# In the long term, it might be considered to implement the
+# notation of a "double anything" unsigned type, which could
+# be used recursively to implement longs of any size.
+
+class r_suint(object):
+ # we do not inherit from r_uint, because we only
+ # support a few operations for our purpose
+ def __init__(self, value=0):
+ if isinstance(value, r_suint):
+ self.value = value.value
+ self.sign = value.sign
+ else:
+ self.value = r_uint(value)
+ self.sign = -(value < 0)
+
+ def longval(self):
+ if self.sign:
+ return -long(-self.value)
+ else:
+ return long(self.value)
+
+ def __repr__(self):
+ return repr(self.longval())
+
+ def __str__(self):
+ return str(self.longval())
+
+ def __iadd__(self, other):
+ hold = self.value
+ self.value += other
+ self.sign ^= - ( (other < 0) != (self.value < hold) )
+ return self
+
+ def __add__(self, other):
+ res = r_suint(self)
+ res += other
+ return res
+
+ def __isub__(self, other):
+ hold = self.value
+ self.value -= other
+ self.sign ^= - ( (other < 0) != (self.value > hold) )
+ return self
+
+ def __sub__(self, other):
+ res = r_suint(self)
+ res -= other
+ return res
+
+ def __irshift__(self, n):
+ self.value >>= n
+ if self.sign:
+ self.value += LONG_MASK << (LONG_BIT - n)
+ return self
+
+ def __rshift__(self, n):
+ res = r_suint(self)
+ res >>= n
+ return res
+
+ def __and__(self, mask):
+ # only used to get bits from the value
+ return self.value & mask
+
+ def __eq__(self, other):
+ if not isinstance(other,r_suint):
+ other = r_suint(other)
+ return self.sign == other.sign and self.value == other.value
+
+def _x_divrem(space, v1, w1): # return as tuple, PyLongObject **prem)
+ size_w = len(w1.digits) * 2
+ # hack for the moment:
+ # find where w1 is really nonzero
+ if w1._getshort(size_w-1) == 0:
+ size_w -= 1
+ d = (SHORT_MASK+1) // (w1._getshort(size_w-1) + 1)
+ v = _muladd1(space, v1, d, r_uint(0))
+ w = _muladd1(space, w1, d, r_uint(0))
+ size_v = len(v.digits) * 2
+ if v._getshort(size_v-1) == 0:
+ size_v -= 1
+ size_w = len(w.digits) * 2
+ if w._getshort(size_w-1) == 0:
+ size_w -= 1
+ assert size_v >= size_w and size_w > 1 # Assert checks by div()
+
+ size_a = size_v - size_w + 1
+ digitpairs = (size_a + 1) // 2
+ a = W_LongObject(space, [r_uint(0)] * digitpairs, 1)
+ j = size_v
+ for k in range(size_a-1, -1, -1):
+ if j >= size_v:
+ vj = r_uint(0)
+ else:
+ vj = v._getshort(j)
+ carry = r_suint(0) # note: this must hold two digits and sign!
+
+ if vj == w._getshort(size_w-1):
+ q = r_uint(SHORT_MASK)
+ else:
+ q = ((vj << SHORT_BIT) + v._getshort(j-1)) // w._getshort(size_w-1)
+
+ while (w._getshort(size_w-2) * q >
+ ((
+ (vj << SHORT_BIT)
+ + v._getshort(j-1)
+ - q * w._getshort(size_w-1)
+ ) << SHORT_BIT)
+ + v._getshort(j-2)):
+ q -= 1
+
+ for i in range(size_w):
+ if i+k >= size_v:
+ break
+ z = w._getshort(i) * q
+ zz = z >> SHORT_BIT
+ carry += v._getshort(i+k) + (zz << SHORT_BIT)
+ carry -= z
+ v._setshort(i+k, r_uint(carry.value & SHORT_MASK))
+ carry >>= SHORT_BIT
+ carry -= zz
+
+ i += 1 # compare C code which re-uses i of loop
+ if i+k < size_v:
+ carry += v._getshort(i+k)
+ v._setshort(i+k, r_uint(0))
+
+ if carry == 0:
+ a._setshort(k, q)
+ else:
+ #assert carry == -1
+ # the above would hold if we didn't minimize size_w
+ a._setshort(k, q-1)
+ carry = r_suint(0)
+
+ for i in range(size_w):
+ if i+k >= size_v:
+ break
+ carry += v._getshort(i+k) + w._getshort(i)
+ v._setshort(i+k, r_uint(carry) & SHORT_MASK)
+ carry >>= SHORT_BIT
+ j -= 1
+
+ a._normalize()
+ rem, _ = _divrem1(space, v, d)
+ return a, rem
+
+
+##def _divrem(a, b)
+## size_a = len(a.digits) * 2
+## size_b = len(b.digits) * 2
+## PyLongObject *z;
+##
+## if (size_b == 0) {
+## PyErr_SetString(PyExc_ZeroDivisionError,
+## "long division or modulo by zero");
+## return -1;
+## }
+## if (size_a < size_b ||
+## (size_a == size_b &&
+## a->ob_digit[size_a-1] < b->ob_digit[size_b-1])) {
+## /* |a| < |b|. */
+## *pdiv = _PyLong_New(0);
+## Py_INCREF(a);
+## *prem = (PyLongObject *) a;
+## return 0;
+## }
+## if (size_b == 1) {
+## digit rem = 0;
+## z = divrem1(a, b->ob_digit[0], &rem);
+## if (z == NULL)
+## return -1;
+## *prem = (PyLongObject *) PyLong_FromLong((long)rem);
+## }
+## else {
+## z = x_divrem(a, b, prem);
+## if (z == NULL)
+## return -1;
+## }
+## /* Set the signs.
+## The quotient z has the sign of a*b;
+## the remainder r has the sign of a,
+## so a = b*z + r. */
+## if ((a->ob_size < 0) != (b->ob_size < 0))
+## z->ob_size = -(z->ob_size);
+## if (a->ob_size < 0 && (*prem)->ob_size != 0)
+## (*prem)->ob_size = -((*prem)->ob_size);
+## *pdiv = z;
+## return 0;
+
+## XXXX
Modified: pypy/branch/dist-2.4.1/pypy/objspace/std/stringobject.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/objspace/std/stringobject.py (original)
+++ pypy/branch/dist-2.4.1/pypy/objspace/std/stringobject.py Sun Jul 3 20:36:05 2005
@@ -1084,7 +1084,8 @@
# CPython's logic for deciding if ""%values is
# an error (1 value, 0 %-formatters) or not
# (values is of a mapping type)
- if hasattr(values, '__getitem__') and not isinstance(values, str):
+ if (hasattr(values, '__getitem__')
+ and not isinstance(values, basestring)):
return _formatting.format(format, (values,), values)
else:
return _formatting.format(format, (values,), None)
Modified: pypy/branch/dist-2.4.1/pypy/objspace/std/test/test_longobject.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/objspace/std/test/test_longobject.py (original)
+++ pypy/branch/dist-2.4.1/pypy/objspace/std/test/test_longobject.py Sun Jul 3 20:36:05 2005
@@ -50,6 +50,24 @@
result = lobj.mul__Long_Long(self.space, f1, f2)
assert result.longval() == x * y
+ def test__inplace_divrem1(self):
+ # signs are not handled in the helpers!
+ x = 1238585838347L
+ y = 3
+ f1 = lobj.W_LongObject(self.space, *lobj.args_from_long(x))
+ f2 = r_uint(y)
+ remainder = lobj._inplace_divrem1(f1, f1, f2)
+ assert (f1.longval(), remainder) == divmod(x, y)
+
+ def test__divrem1(self):
+ # signs are not handled in the helpers!
+ x = 1238585838347L
+ y = 3
+ f1 = lobj.W_LongObject(self.space, *lobj.args_from_long(x))
+ f2 = r_uint(y)
+ div, rem = lobj._divrem1(self.space, f1, f2)
+ assert (div.longval(), rem) == divmod(x, y)
+
def test_eq(self):
x = 5858393919192332223L
y = 585839391919233111223311112332L
Modified: pypy/branch/dist-2.4.1/pypy/objspace/std/test/test_stringobject.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/objspace/std/test/test_stringobject.py (original)
+++ pypy/branch/dist-2.4.1/pypy/objspace/std/test/test_stringobject.py Sun Jul 3 20:36:05 2005
@@ -116,9 +116,24 @@
assert self.space.eq_w(space.getitem(w_str, w_slice), w('el'))
class AppTestStringObject:
+
def test_format_wrongchar(self):
raises(ValueError, 'a%Zb'.__mod__, ((23,),))
+ def test_format(self):
+ raises(TypeError, "foo".__mod__, "bar")
+ raises(TypeError, u"foo".__mod__, "bar")
+ raises(TypeError, "foo".__mod__, u"bar")
+
+ for format, arg, cls in [("a %s b", "foo", str),
+ (u"a %s b", "foo", unicode),
+ ("a %s b", u"foo", unicode),
+ (u"a %s b", u"foo", unicode)]:
+ raises(TypeError, format[:2].__mod__, arg)
+ result = format % arg
+ assert result == "a foo b"
+ assert isinstance(result, cls)
+
def test_split(self):
assert "".split() == []
assert " ".split() == []
Modified: pypy/branch/dist-2.4.1/pypy/rpython/lltype.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/rpython/lltype.py (original)
+++ pypy/branch/dist-2.4.1/pypy/rpython/lltype.py Sun Jul 3 20:36:05 2005
@@ -4,6 +4,8 @@
from pypy.tool.uid import Hashable
from pypy.tool.tls import tlsobject
+log = py.log.Producer('lltype')
+
TLS = tlsobject()
def saferecursive(func, defl):
@@ -141,6 +143,30 @@
raise AttributeError, 'struct %s has no field %r' % (self._name,
name)
+
+
+
+ def _names_without_voids(self, at_root=True):
+ if at_root: #XXX debug stuff
+ log('_names_without_voids: ' + self._str_without_voids())
+ names_without_voids = [name for name in self._names if self._flds[name] is not Void]
+ if names_without_voids != list(self._names):
+ log('_names_without_voids: removed Void(s) _names=%s, return=%s' % (str(list(self._names)), str(names_without_voids)))
+ #return self._names
+ return names_without_voids
+
+ def _str_fields_without_voids(self):
+ return ', '.join(['%s: %s' % (name, self._flds[name])
+ for name in self._names_without_voids(False)])
+ _str_fields_without_voids = saferecursive(_str_fields_without_voids, '...')
+
+ def _str_without_voids(self):
+ return "%s %s { %s }" % (self.__class__.__name__,
+ self._name, self._str_fields_without_voids())
+
+
+
+
def _str_fields(self):
return ', '.join(['%s: %s' % (name, self._flds[name])
for name in self._names])
@@ -240,6 +266,10 @@
return self.RESULT._defl()
return _func(self, _callable=ex)
+ def _trueargs(self):
+ return [arg for arg in self.ARGS if arg is not Void]
+
+
class OpaqueType(ContainerType):
def __init__(self, tag):
Modified: pypy/branch/dist-2.4.1/pypy/rpython/rclass.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/rpython/rclass.py (original)
+++ pypy/branch/dist-2.4.1/pypy/rpython/rclass.py Sun Jul 3 20:36:05 2005
@@ -156,6 +156,7 @@
# as MethodType has a custom __get__ too and we don't support
# it, it's a very bad idea anyway.
if isinstance(s_value, annmodel.SomePBC):
+ s_value = self.classdef.matching(s_value)
debound = {}
count = 0
for x, classdef in s_value.prebuiltinstances.items():
@@ -396,8 +397,11 @@
result.super)
# then add instance attributes from this level
for name, (mangled_name, r) in self.fields.items():
- attrvalue = getattr(value, name)
- llattrvalue = r.convert_const(attrvalue)
+ if r.lowleveltype == Void:
+ llattrvalue = None
+ else:
+ attrvalue = getattr(value, name)
+ llattrvalue = r.convert_const(attrvalue)
setattr(result, mangled_name, llattrvalue)
else:
# OBJECT part
Modified: pypy/branch/dist-2.4.1/pypy/rpython/rconstantdict.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/rpython/rconstantdict.py (original)
+++ pypy/branch/dist-2.4.1/pypy/rpython/rconstantdict.py Sun Jul 3 20:36:05 2005
@@ -84,10 +84,10 @@
#def make_iterator_repr(self):
# return StrDictIteratorRepr(self)
- #def rtype_method_get(self, hop):
- # v_dict, v_key, v_default = hop.inputargs(self, string_repr,
- # self.value_repr)
- # return hop.gendirectcall(ll_get, v_dict, v_key, v_default)
+ def rtype_method_get(self, hop):
+ v_dict, v_key, v_default = hop.inputargs(self, self.key_repr,
+ self.value_repr)
+ return hop.gendirectcall(ll_constantdict_get, v_dict, v_key, v_default)
class __extend__(pairtype(ConstantDictRepr, rmodel.Repr)):
@@ -125,6 +125,13 @@
entry = ll_constantdict_lookup(d, key)#, hashcompute)
return entry.valid
+def ll_constantdict_get(d, key, default):#, hashcompute):
+ entry = ll_constantdict_lookup(d, key)#, hashcompute)
+ if entry.valid:
+ return entry.value
+ else:
+ return default
+
def ll_constantdict_setnewitem(d, key, value):#, hashcompute):
entry = ll_constantdict_lookup(d, key)#, hashcompute)
assert not entry.valid
Modified: pypy/branch/dist-2.4.1/pypy/rpython/rdict.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/rpython/rdict.py (original)
+++ pypy/branch/dist-2.4.1/pypy/rpython/rdict.py Sun Jul 3 20:36:05 2005
@@ -3,7 +3,7 @@
from pypy.objspace.flow.model import Constant
from pypy.rpython import rmodel, lltype, rstr
from pypy.rpython.rarithmetic import r_uint
-from pypy.rpython import rlist, rconstantdict
+from pypy.rpython import rlist, rconstantdict, remptydict
# ____________________________________________________________
#
@@ -39,6 +39,8 @@
return rconstantdict.ConstantDictRepr(
rtyper.getrepr(dictkey.s_value),
rtyper.getrepr(dictvalue.s_value))
+ elif isinstance(s_key, annmodel.SomeImpossibleValue):
+ return remptydict.EmptyDictRepr()
else:
raise rmodel.TyperError("cannot make repr of %r" %(self.dictdef,))
@@ -331,37 +333,37 @@
# _____________________________________________________________
# methods
-def ll_get(v_dict, v_key, v_default):
- entry = ll_strdict_lookup(v_dict, v_key)
+def ll_get(dict, key, default):
+ entry = ll_strdict_lookup(dict, key)
if entry.key and entry.key != deleted_entry_marker:
return entry.value
else:
- return v_default
+ return default
-def ll_copy(v_dict):
- DICTPTR = lltype.typeOf(v_dict)
+def ll_copy(dict):
+ DICTPTR = lltype.typeOf(dict)
d = lltype.malloc(DICTPTR.TO)
- d.entries = lltype.malloc(DICTPTR.TO.entries.TO, len(v_dict.entries))
- d.num_items = v_dict.num_items
- d.num_pristine_entries = v_dict.num_pristine_entries
+ d.entries = lltype.malloc(DICTPTR.TO.entries.TO, len(dict.entries))
+ d.num_items = dict.num_items
+ d.num_pristine_entries = dict.num_pristine_entries
i = 0
dictlen = len(d.entries)
while i < dictlen:
d_entry = d.entries[i]
- v_entry = v_dict.entries[i]
- d_entry.key = v_entry.key
- d_entry.value = v_entry.value
+ entry = dict.entries[i]
+ d_entry.key = entry.key
+ d_entry.value = entry.value
i += 1
return d
-def ll_update(v_dic1, v_dic2):
- d2len =len(v_dic2.entries)
- entries = v_dic2.entries
+def ll_update(dic1, dic2):
+ d2len =len(dic2.entries)
+ entries = dic2.entries
i = 0
while i < d2len:
entry = entries[i]
if entry.key and entry.key != deleted_entry_marker:
- ll_strdict_setitem(v_dic1, entry.key, entry.value)
+ ll_strdict_setitem(dic1, entry.key, entry.value)
i += 1
def dum_keys(): pass
@@ -370,13 +372,13 @@
# this is an implementation of keys(), values() and items()
# in a single function.
-# note that by specialization on v_func, three different
+# note that by specialization on func, three different
# and very efficient functions are created.
-def ll_kvi(v_dic, LISTPTR, v_func):
- res = rlist.ll_newlist(LISTPTR, v_dic.num_items)
- dlen = len(v_dic.entries)
- entries = v_dic.entries
+def ll_kvi(dic, LISTPTR, func):
+ res = rlist.ll_newlist(LISTPTR, dic.num_items)
+ dlen = len(dic.entries)
+ entries = dic.entries
items = res.items
i = 0
p = 0
@@ -384,14 +386,14 @@
entry = entries[i]
key = entry.key
if key and key != deleted_entry_marker:
- if v_func is dum_items:
+ if func is dum_items:
r = lltype.malloc(LISTPTR.TO.items.TO.OF.TO)
r.item0 = key
r.item1 = entry.value
items[p] = r
- elif v_func is dum_keys:
+ elif func is dum_keys:
items[p] = key
- elif v_func is dum_values:
+ elif func is dum_values:
items[p] = entry.value
p += 1
i += 1
Modified: pypy/branch/dist-2.4.1/pypy/rpython/rpbc.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/rpython/rpbc.py (original)
+++ pypy/branch/dist-2.4.1/pypy/rpython/rpbc.py Sun Jul 3 20:36:05 2005
@@ -16,6 +16,7 @@
# categories below, and doesn't for example mix functions, classes
# and methods.
call_families = rtyper.annotator.getpbccallfamilies()
+ userclasses = rtyper.annotator.getuserclasses()
choices = {}
for x, classdef in self.prebuiltinstances.items():
cdefflag = isclassdef(classdef)
@@ -26,28 +27,40 @@
if isinstance(x, types.MethodType) and x.im_self is None:
x = x.im_func
- # callable or frozen object?
- if (classdef, x) in call_families:
- # what type of callable?
- if isinstance(x, types.FunctionType):
- if cdefflag:
- choice = MethodsPBCRepr
- cdefflag = False
- else:
- choice = FunctionsPBCRepr
- elif isinstance(x, (type, types.ClassType)):
+ if cdefflag:
+ # methods of a run-time instance
+ if not isinstance(x, types.FunctionType):
+ raise TyperError("%r appears to be a method bound to %r, "
+ "but it is not a function" % (
+ x, classdef))
+ choice = MethodsPBCRepr
+
+ elif isinstance(x, (type, types.ClassType)):
+ # classes
+ if x in userclasses:
+ # user classes
choice = ClassesPBCRepr
+ elif type(x) is type and x.__module__ == '__builtin__':
+ # special case for built-in types, seen in faking
+ choice = getPyObjRepr
+ else:
+ raise TyperError("don't known about class %r" % (x,))
+
+ elif (classdef, x) in call_families:
+ # other kind of callable
+ if isinstance(x, types.FunctionType):
+ # function
+ choice = FunctionsPBCRepr
elif isinstance(x, types.MethodType):
+ # prebuilt bound method
choice = MethodOfFrozenPBCRepr
else:
raise TyperError("don't know about callable %r" % (x,))
+
else:
- # frozen object
+ # otherwise, just assume it's a plain frozen object
choice = getFrozenPBCRepr
- if cdefflag:
- raise TyperError("unexpected classdef in PBC set %r" % (
- self.prebuiltinstances,))
choices[choice] = True
if len(choices) > 1:
@@ -63,6 +76,9 @@
# ____________________________________________________________
+def getPyObjRepr(rtyper, s_pbc):
+ return robject.pyobj_repr
+
def getFrozenPBCRepr(rtyper, s_pbc):
if len(s_pbc.prebuiltinstances) <= 1:
Modified: pypy/branch/dist-2.4.1/pypy/rpython/rstr.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/rpython/rstr.py (original)
+++ pypy/branch/dist-2.4.1/pypy/rpython/rstr.py Sun Jul 3 20:36:05 2005
@@ -26,6 +26,8 @@
STR = GcStruct('str', ('hash', Signed),
('chars', Array(Char)))
+SIGNED_ARRAY = GcArray(Signed)
+
class __extend__(annmodel.SomeString):
def rtyper_makerepr(self, rtyper):
@@ -104,6 +106,18 @@
v_str, v_value = hop.inputargs(string_repr, string_repr)
return hop.gendirectcall(ll_endswith, v_str, v_value)
+ def rtype_method_find(_, hop):
+ v_str, v_value = hop.inputargs(string_repr, string_repr)
+ return hop.gendirectcall(ll_find, v_str, v_value)
+
+ def rtype_method_upper(_, hop):
+ v_str, = hop.inputargs(string_repr)
+ return hop.gendirectcall(ll_upper, v_str)
+
+ def rtype_method_lower(_, hop):
+ v_str, = hop.inputargs(string_repr)
+ return hop.gendirectcall(ll_lower, v_str)
+
def rtype_method_join(_, hop):
r_lst = hop.args_r[1]
s_item = r_lst.listitem.s_value
@@ -556,8 +570,83 @@
return True
+def ll_find(s1, s2):
+ """Knuth Morris Prath algorithm for substring match"""
+ len1 = len(s1.chars)
+ len2 = len(s2.chars)
+ # Construct the array of possible restarting positions
+ # T = Array_of_ints [-1..len2]
+ # T[-1] = -1 s2.chars[-1] is supposed to be unequal to everything else
+ T = malloc( SIGNED_ARRAY, len2 )
+ i = 0
+ j = -1
+ while i<len2:
+ if j>=0 and s2.chars[i] == s2.chars[j]:
+ j += 1
+ T[i] = j
+ i += 1
+ elif j>0:
+ j = T[j-1]
+ else:
+ T[i] = 0
+ i += 1
+ j = 0
+
+ # Now the find algorithm
+ i = 0
+ m = 0
+ while m+i<len1:
+ if s1.chars[m+i]==s2.chars[i]:
+ i += 1
+ if i==len2:
+ return m
+ else:
+ # mismatch, go back to the last possible starting pos
+ if i==0:
+ e = -1
+ else:
+ e = T[i-1]
+ m = m + i - e
+ if i>0:
+ i = e
+ return -1
+
emptystr = string_repr.convert_const("")
+def ll_upper(s):
+ s_chars = s.chars
+ s_len = len(s_chars)
+ if s_len == 0:
+ return emptystr
+ i = 0
+ result = malloc(STR, s_len)
+ while i < s_len:
+ ochar = ord(s_chars[i])
+ if ochar >= 97 and ochar <= 122:
+ upperchar = ochar - 32
+ else:
+ upperchar = ochar
+ result.chars[i] = chr(upperchar)
+ i += 1
+ return result
+
+def ll_lower(s):
+ s_chars = s.chars
+ s_len = len(s_chars)
+ if s_len == 0:
+ return emptystr
+ i = 0
+ result = malloc(STR, s_len)
+ while i < s_len:
+ ochar = ord(s_chars[i])
+ if ochar >= 65 and ochar <= 96:
+ lowerchar = ochar + 32
+ else:
+ lowerchar = ochar
+ result.chars[i] = chr(lowerchar)
+ i += 1
+ return result
+
def ll_join(s, items):
s_chars = s.chars
s_len = len(s_chars)
Modified: pypy/branch/dist-2.4.1/pypy/rpython/rtyper.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/rpython/rtyper.py (original)
+++ pypy/branch/dist-2.4.1/pypy/rpython/rtyper.py Sun Jul 3 20:36:05 2005
@@ -267,7 +267,8 @@
# in all generated operations.
if hop.s_result.is_constant():
if isinstance(resultvar, Constant) and \
- isinstance(hop.r_result.lowleveltype, Primitive):
+ isinstance(hop.r_result.lowleveltype, Primitive) and \
+ hop.r_result.lowleveltype != Void:
assert resultvar.value == hop.s_result.const
resulttype = resultvar.concretetype
op.result.concretetype = hop.r_result.lowleveltype
Modified: pypy/branch/dist-2.4.1/pypy/rpython/test/test_lltype.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/rpython/test/test_lltype.py (original)
+++ pypy/branch/dist-2.4.1/pypy/rpython/test/test_lltype.py Sun Jul 3 20:36:05 2005
@@ -261,6 +261,10 @@
py.test.raises(TypeError, pf, 0, 0)
py.test.raises(TypeError, pf, 'a')
+def test_truargs():
+ F = FuncType((Void, Signed, Void, Unsigned), Float)
+ assert Void not in F._trueargs()
+
def test_inconsistent_gc_containers():
A = GcArray(('y', Signed))
S = GcStruct('b', ('y', Signed))
Modified: pypy/branch/dist-2.4.1/pypy/rpython/test/test_rclass.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/rpython/test/test_rclass.py (original)
+++ pypy/branch/dist-2.4.1/pypy/rpython/test/test_rclass.py Sun Jul 3 20:36:05 2005
@@ -63,6 +63,16 @@
res = interpret(dummyfn, [])
assert res == 6
+def test_prebuilt_instances_with_void():
+ def marker():
+ return 42
+ a = EmptyBase()
+ a.nothing_special = marker
+ def dummyfn():
+ return a.nothing_special()
+ res = interpret(dummyfn, [])
+ assert res == 42
+
# method calls
class A:
def f(self):
Modified: pypy/branch/dist-2.4.1/pypy/rpython/test/test_rconstantdict.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/rpython/test/test_rconstantdict.py (original)
+++ pypy/branch/dist-2.4.1/pypy/rpython/test/test_rconstantdict.py Sun Jul 3 20:36:05 2005
@@ -16,3 +16,12 @@
assert res is False
res = interpret(func, [4])
assert res is True
+
+def test_constantdict_get():
+ d = {1: -11, 4: -44, 16: -66}
+ def func(i, j):
+ return d.get(i, j)
+ res = interpret(func, [15, 62])
+ assert res == 62
+ res = interpret(func, [4, 25])
+ assert res == -44
Modified: pypy/branch/dist-2.4.1/pypy/rpython/test/test_rpbc.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/rpython/test/test_rpbc.py (original)
+++ pypy/branch/dist-2.4.1/pypy/rpython/test/test_rpbc.py Sun Jul 3 20:36:05 2005
@@ -37,6 +37,10 @@
def m(self, x):
return self.z - x
+class MyStrangerSubclass(MyBase):
+ def m(self, x, y):
+ return x*y
+
def test_method_call():
def f(a, b):
obj = MyBase()
@@ -58,17 +62,51 @@
res = interpret(f, [-1, 2.3])
assert res == -3.3
+def test_stranger_subclass_1():
+ def f1():
+ obj = MyStrangerSubclass()
+ obj.z = 100
+ return obj.m(6, 7)
+ res = interpret(f1, [])
+ assert res == 42
+
+def test_stranger_subclass_2():
+ def f2():
+ obj = MyStrangerSubclass()
+ obj.z = 100
+ return obj.m(6, 7) + MyBase.m(obj, 58)
+ res = interpret(f2, [])
+ assert res == 200
+
class MyBaseWithInit:
def __init__(self, a):
self.a1 = a
+class MySubclassWithInit(MyBaseWithInit):
+ def __init__(self, a, b):
+ MyBaseWithInit.__init__(self, a)
+ self.b1 = b
+
def test_class_init():
def f(a):
instance = MyBaseWithInit(a)
return instance.a1
assert interpret(f, [5]) == 5
+def test_class_init_2():
+ def f(a, b):
+ instance = MySubclassWithInit(a, b)
+ return instance.a1 * instance.b1
+ assert interpret(f, [6, 7]) == 42
+
+def test_class_calling_init():
+ def f():
+ instance = MySubclassWithInit(1, 2)
+ instance.__init__(3, 4)
+ return instance.a1 * instance.b1
+ assert interpret(f, []) == 12
+
class Freezing:
def _freeze_(self):
Modified: pypy/branch/dist-2.4.1/pypy/rpython/test/test_rstr.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/rpython/test/test_rstr.py (original)
+++ pypy/branch/dist-2.4.1/pypy/rpython/test/test_rstr.py Sun Jul 3 20:36:05 2005
@@ -180,6 +180,32 @@
res = interpret(fn, [i,j])
assert res is fn(i, j)
+def test_find():
+ def fn(i, j):
+ s1 = ['one two three', 'abc abcdab abcdabcdabde']
+ s2 = ['one', 'two', 'abcdab', 'one tou', 'abcdefgh', 'fortytwo']
+ return s1[i].find(s2[j])
+ for i in range(2):
+ for j in range(6):
+ res = interpret(fn, [i,j])
+ assert res == fn(i, j)
+
+def test_upper():
+ def fn(i):
+ strings = ['', ' ', 'upper', 'UpPeR', ',uppEr,']
+ return strings[i].upper()
+ for i in range(5):
+ res = interpret(fn, [i])
+ assert ''.join(res.chars) == fn(i)
+
+def test_lower():
+ def fn(i):
+ strings = ['', ' ', 'lower', 'LoWeR', ',lowEr,']
+ return strings[i].lower()
+ for i in range(5):
+ res = interpret(fn, [i])
+ assert ''.join(res.chars) == fn(i)
+
def test_join():
res = interpret(lambda: ''.join([]), [])
assert ''.join(res.chars) == ""
Modified: pypy/branch/dist-2.4.1/pypy/tool/option.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/tool/option.py (original)
+++ pypy/branch/dist-2.4.1/pypy/tool/option.py Sun Jul 3 20:36:05 2005
@@ -10,6 +10,11 @@
spaces = []
oldstyle = 0
uselibfile = 0
+ useparsermodule = "cpython" # "cpython" / "recparser" / "parser"
+ parser = "cpython" # "cpython" / "pyparse"
+ compiler = "cpython" # "cpython"
+ # "pyparse" pypy parser, cpython compiler
+ # "pycomp" pypy parser and compiler (TBD)
def run_tb_server(option, opt, value, parser):
from pypy.tool import tb_server
@@ -28,7 +33,7 @@
options.append(make_option(
'--oldstyle', action="store_true",dest="oldstyle",
- help="enable oldstyle classes as default metaclass (std objspace only)"))
+ help="enable oldstyle classes as default metaclass (std objspace only)"))
options.append(make_option(
'--file', action="store_true",dest="uselibfile",
help="enable our custom file implementation"))
@@ -39,6 +44,13 @@
'-H', action="callback",
callback=run_tb_server,
help="use web browser for traceback info"))
+ options.append(make_option(
+ '--pyparse', action="store_const", dest="compiler", const="pyparse",
+ help="enable the internal pypy parser with CPython compiler"))
+ options.append(make_option(
+ '--parsermodule', action="store",type="string", dest="useparsermodule",
+ help="select the parser module to use",
+ metavar="[cpython|recparser|parser]"))
return options
@@ -67,7 +79,7 @@
except KeyError:
module = __import__("pypy.objspace.%s" % name, None, None, ["Space"])
Space = module.Space
- space = Space()
+ space = Space( Options() )
if name == 'std' and Options.oldstyle:
space.enable_old_style_classes_as_default_metaclass()
if Options.uselibfile:
Modified: pypy/branch/dist-2.4.1/pypy/translator/backendoptimization.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/translator/backendoptimization.py (original)
+++ pypy/branch/dist-2.4.1/pypy/translator/backendoptimization.py Sun Jul 3 20:36:05 2005
@@ -1,9 +1,10 @@
import autopath
from pypy.translator.translator import Translator
from pypy.objspace.flow.model import Variable, Constant, Block, Link
+from pypy.objspace.flow.model import SpaceOperation
from pypy.objspace.flow.model import traverse, mkentrymap, checkgraph
from pypy.tool.unionfind import UnionFind
-
+from pypy.rpython.lltype import Void
def remove_same_as(graph):
"""Remove all 'same_as' operations.
@@ -41,6 +42,21 @@
traverse(visit, graph)
+def remove_void(translator):
+ for func, graph in translator.flowgraphs.iteritems():
+ args = [arg for arg in graph.startblock.inputargs
+ if arg.concretetype is not Void]
+ graph.startblock.inputargs = args
+ def visit(block):
+ if isinstance(block, Block):
+ for op in block.operations:
+ if op.opname == 'direct_call':
+ args = [arg for arg in op.args
+ if arg.concretetype is not Void]
+ op.args = args
+ for func, graph in translator.flowgraphs.iteritems():
+ traverse(visit, graph)
+
def SSI_to_SSA(graph):
"""Rename the variables in a flow graph as much as possible without
violating the SSA rule. 'SSI' means that each Variable in a flow graph is
Modified: pypy/branch/dist-2.4.1/pypy/translator/goal/translate_pypy.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/translator/goal/translate_pypy.py (original)
+++ pypy/branch/dist-2.4.1/pypy/translator/goal/translate_pypy.py Sun Jul 3 20:36:05 2005
@@ -25,6 +25,8 @@
-no-d Disable recording of debugging information
-huge=% Threshold in the number of functions after which only a local call
graph and not a full one is displayed
+ -no-snapshot
+ Don't redirect imports to the translation snapshot
-save filename
saves the translator to a file. The file type can either
be .py or .zip (recommended).
@@ -34,32 +36,33 @@
"""
import autopath, sys, os
-# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-basedir = autopath.this_dir
+if '-no-snapshot' not in sys.argv:
+ # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ basedir = autopath.this_dir
-pypy_translation_snapshot_dir = os.path.join(basedir, 'pypy-translation-snapshot')
+ pypy_translation_snapshot_dir = os.path.join(basedir, 'pypy-translation-snapshot')
-if not os.path.isdir(pypy_translation_snapshot_dir):
- print """
-Translation is performed on a specific revision of PyPy which lives on
-a branch. This needs to be checked out into translator/goal with:
+ if not os.path.isdir(pypy_translation_snapshot_dir):
+ print """
+ Translation is performed on a specific revision of PyPy which lives on
+ a branch. This needs to be checked out into translator/goal with:
-svn co http://codespeak.net/svn/pypy/branch/pypy-translation-snapshot
-"""[1:]
- sys.exit(2)
+ svn co http://codespeak.net/svn/pypy/branch/pypy-translation-snapshot
+ """[1:]
+ sys.exit(2)
-# override imports from pypy head with imports from pypy-translation-snapshot
-import pypy
-pypy.__path__.insert(0, pypy_translation_snapshot_dir)
+ # override imports from pypy head with imports from pypy-translation-snapshot
+ import pypy
+ pypy.__path__.insert(0, pypy_translation_snapshot_dir)
-# complement imports from pypy.objspace (from pypy-translation-snapshot)
-# with pypy head objspace/
-import pypy.objspace
-pypy.objspace.__path__.append(os.path.join(autopath.pypydir, 'objspace'))
+ # complement imports from pypy.objspace (from pypy-translation-snapshot)
+ # with pypy head objspace/
+ import pypy.objspace
+ pypy.objspace.__path__.append(os.path.join(autopath.pypydir, 'objspace'))
-print "imports redirected to pypy-translation-snapshot."
+ print "imports redirected to pypy-translation-snapshot."
-# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
import threading, pdb
@@ -77,6 +80,7 @@
from pypy.translator.tool import buildpyxmodule
buildpyxmodule.enable_fast_compilation()
+annmodel.DEBUG = False
@@ -258,6 +262,7 @@
'-no-o': False,
'-tcc': False,
'-no-d': False,
+ '-no-snapshot' : False,
'-load': False,
'-save': False,
'-fork': False,
Modified: pypy/branch/dist-2.4.1/pypy/translator/goal/unixcheckpoint.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/translator/goal/unixcheckpoint.py (original)
+++ pypy/branch/dist-2.4.1/pypy/translator/goal/unixcheckpoint.py Sun Jul 3 20:36:05 2005
@@ -6,8 +6,8 @@
print '---> Checkpoint: run / quit / pdb ?'
try:
line = raw_input().strip().lower()
- except KeyboardInterrupt:
- print '(KeyboardInterrupt ignored)'
+ except (KeyboardInterrupt, EOFError), e:
+ print '(%s ignored)' % e.__class__.__name__
continue
if line == 'run':
break
Modified: pypy/branch/dist-2.4.1/pypy/translator/llvm2/build_llvm_module.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/translator/llvm2/build_llvm_module.py (original)
+++ pypy/branch/dist-2.4.1/pypy/translator/llvm2/build_llvm_module.py Sun Jul 3 20:36:05 2005
@@ -12,6 +12,7 @@
from pypy.translator.pyrex.genpyrex import GenPyrex
from pypy.translator.tool.buildpyxmodule import make_c_from_pyxfile
from pypy.translator.tool import stdoutcapture
+from pypy.translator.llvm2.genllvm import use_boehm_gc
debug = True
@@ -20,6 +21,22 @@
OPTIMIZATION_SWITCHES = "-simplifycfg -mem2reg -instcombine -dce -inline"
+def compile_module(module, source_files, object_files, library_files):
+ open("%s_setup.py" % module, "w").write(str(py.code.Source(
+ '''
+ from distutils.core import setup
+ from distutils.extension import Extension
+ setup(name="%(module)s",
+ ext_modules = [Extension(
+ name = "%(module)s",
+ sources = %(source_files)s,
+ libraries = %(library_files)s,
+ extra_objects = %(object_files)s)])
+ ''' % locals())))
+ cmd = "python %s_setup.py build_ext --inplace" % module
+ if debug: print cmd
+ cmdexec(cmd)
+
def make_module_from_llvm(llvmfile, pyxfile, optimize=False):
include_dir = py.magic.autopath().dirpath()
dirpath = llvmfile.dirpath()
@@ -27,29 +44,32 @@
os.chdir(str(dirpath))
modname = pyxfile.purebasename
b = llvmfile.purebasename
+ source_files = [ "%s.c" % modname ]
+ object_files = []
+ library_files = []
+ if use_boehm_gc:
+ library_files.append('gc')
if sys.maxint == 2147483647: #32 bit platform
if optimize:
- ops1 = ["llvm-as %s.ll -f -o %s.bc" % (b, b),
+ cmds = ["llvm-as %s.ll -f -o %s.bc" % (b, b),
"opt %s -f %s.bc -o %s_optimized.bc" % (OPTIMIZATION_SWITCHES, b, b),
- "llc -enable-correct-eh-support %s_optimized.bc -f -o %s.s" % (b, b),
- "as %s.s -o %s.o" % (b, b)]
+ "llc -enable-correct-eh-support %s_optimized.bc -f -o %s.s" % (b, b)]
else:
- ops1 = ["llvm-as %s.ll -f -o %s.bc" % (b, b),
- "llc -enable-correct-eh-support %s.bc -f -o %s.s" % (b, b),
- "as %s.s -o %s.o" % (b, b)]
- ops2 = ["gcc -c -shared -I/usr/include/python2.3 %s.c" % pyxfile.purebasename,
- "gcc -shared %s.o %s.o -o %s.so" % (b, modname, modname)]
+ cmds = ["llvm-as %s.ll -f -o %s.bc" % (b, b),
+ "llc -enable-correct-eh-support %s.bc -f -o %s.s" % (b, b)]
+ cmds.append("as %s.s -o %s.o" % (b, b))
+ object_files.append("%s.o" % b)
else: #assume 64 bit platform (x86-64?)
#this special case for x86-64 (called ia64 in llvm) can go as soon as llc supports ia64 assembly output!
if optimize:
- ops1 = ["llvm-as %s.ll -f -o %s.bc" % (b, b),
+ cmds = ["llvm-as %s.ll -f -o %s.bc" % (b, b),
"opt %s -f %s.bc -o %s_optimized.bc" % (OPTIMIZATION_SWITCHES, b, b),
"llc -enable-correct-eh-support %s_optimized.bc -march=c -f -o %s.c" % (b, b)]
else:
- ops1 = ["llvm-as %s.ll -f -o %s.bc" % (b, b),
+ cmds = ["llvm-as %s.ll -f -o %s.bc" % (b, b),
"llc -enable-correct-eh-support %s.bc -march=c -f -o %s.c" % (b, b)]
- ops2 = ["gcc -shared -fPIC -I/usr/include/python2.3 %s.c %s.c -o %s.so" % (b, modname, modname)]
+ source_files.append("%s.c" % b)
try:
if debug: print "modname", modname
@@ -57,13 +77,11 @@
if debug: print "working in", path.local()
try:
try:
- for op in ops1:
- if debug: print op
- cmdexec(op)
+ for cmd in cmds:
+ if debug: print cmd
+ cmdexec(cmd)
make_c_from_pyxfile(pyxfile)
- for op in ops2:
- if debug: print op
- cmdexec(op)
+ compile_module(modname, source_files, object_files, library_files)
finally:
foutput, foutput = c.done()
except:
Deleted: /pypy/branch/dist-2.4.1/pypy/translator/llvm2/cfgtransform.py
==============================================================================
--- /pypy/branch/dist-2.4.1/pypy/translator/llvm2/cfgtransform.py Sun Jul 3 20:36:05 2005
+++ (empty file)
@@ -1,44 +0,0 @@
-from pypy.objspace.flow.model import traverse, Block, checkgraph
-from pypy.translator.unsimplify import remove_double_links
-
-
-def remove_same_as(graph):
- same_as_positions = []
- def visit(node):
- if isinstance(node, Block):
- for i, op in enumerate(node.operations):
- if op.opname == 'same_as':
- same_as_positions.append((node, i))
- traverse(visit, graph)
- while same_as_positions:
- block, index = same_as_positions.pop()
- same_as_result = block.operations[index].result
- same_as_arg = block.operations[index].args[0]
- # replace the new variable (same_as_result) with the old variable
- # (from all subsequent positions)
- for op in block.operations[index:]:
- if op is not None:
- for i in range(len(op.args)):
- if op.args[i] == same_as_result:
- op.args[i] = same_as_arg
- for link in block.exits:
- for i in range(len(link.args)):
- if link.args[i] == same_as_result:
- link.args[i] = same_as_arg
- if block.exitswitch == same_as_result:
- block.exitswitch = same_as_arg
- block.operations[index] = None
-
- # remove all same_as operations
- def visit(node):
- if isinstance(node, Block) and node.operations:
- node.operations[:] = filter(None, node.operations)
- traverse(visit, graph)
- checkgraph(graph)
-
-
-def prepare_graph(graph, translator):
- remove_same_as(graph)
- remove_double_links(translator, graph)
- return graph
-
Modified: pypy/branch/dist-2.4.1/pypy/translator/llvm2/codewriter.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/translator/llvm2/codewriter.py (original)
+++ pypy/branch/dist-2.4.1/pypy/translator/llvm2/codewriter.py Sun Jul 3 20:36:05 2005
@@ -1,25 +1,48 @@
import py
+from itertools import count
from pypy.translator.llvm2.log import log
+from pypy.translator.llvm2.genllvm import use_boehm_gc
log = log.codewriter
+show_line_numbers = True
+count = count().next
class CodeWriter(object):
def __init__(self):
self._lines = []
+ self.append('declare sbyte* %GC_malloc(uint)')
def append(self, line):
+ if show_line_numbers:
+ line = "%-75s; %d" % (line, len(self._lines) + 1)
self._lines.append(line)
log(line)
+ def comment(self, line):
+ self.append(";; " + line)
+
+ def newline(self):
+ self.append("")
+
def indent(self, line):
self.append(" " + line)
def label(self, name):
self.append(" %s:" % name)
+ def globalinstance(self, name, type, data):
+ self.append("%s = internal constant %s {%s}" % (name, type, data))
+
def structdef(self, name, typereprs):
self.append("%s = type { %s }" %(name, ", ".join(typereprs)))
+ def arraydef(self, name, typerepr):
+ self.append("%s = type { int, [0 x %s] }" % (name, typerepr))
+
+ def funcdef(self, name, rettyperepr, argtypereprs):
+ self.append("%s = type %s (%s)" % (name, rettyperepr,
+ ", ".join(argtypereprs)))
+
def declare(self, decl):
self.append("declare %s" %(decl,))
@@ -42,7 +65,10 @@
self.append("}")
def ret(self, type_, ref):
- self.indent("ret %s %s" % (type_, ref))
+ self.indent("ret %s %s" % (type_, ref))
+
+ def ret_void(self):
+ self.indent("ret void")
def phi(self, targetvar, type_, refs, blocknames):
assert targetvar.startswith('%')
@@ -60,16 +86,28 @@
self.indent("%s = call %s %s(%s)" % (targetvar, returntype, functionref,
", ".join(arglist)))
+ def call_void(self, functionref, argrefs, argtypes):
+ arglist = ["%s %s" % item for item in zip(argtypes, argrefs)]
+ self.indent("call void %s(%s)" % (functionref, ", ".join(arglist)))
+
def cast(self, targetvar, fromtype, fromvar, targettype):
self.indent("%(targetvar)s = cast %(fromtype)s "
"%(fromvar)s to %(targettype)s" % locals())
- def malloc(self, targetvar, type):
- self.indent("%(targetvar)s = malloc %(type)s" % locals())
-
- def getelementptr(self, targetvar, type, typevar, index):
- self.indent("%(targetvar)s = getelementptr "
- "%(type)s %(typevar)s, int 0, uint %(index)s" % locals())
+ def malloc(self, targetvar, type_, size=1):
+ if use_boehm_gc:
+ cnt = count()
+ self.indent("%%malloc.Size.%(cnt)d = getelementptr %(type_)s* null, int %(size)d" % locals())
+ self.indent("%%malloc.SizeU.%(cnt)d = cast %(type_)s* %%malloc.Size.%(cnt)d to uint" % locals())
+ self.indent("%%malloc.Ptr.%(cnt)d = call sbyte* %%GC_malloc(uint %%malloc.SizeU.%(cnt)d)" % locals())
+ self.indent("%(targetvar)s = cast sbyte* %%malloc.Ptr.%(cnt)d to %(type_)s*" % locals())
+ else:
+ self.indent("%(targetvar)s = malloc %(type_)s, uint %(size)s" % locals())
+
+ def getelementptr(self, targetvar, type, typevar, *indices):
+ res = "%(targetvar)s = getelementptr %(type)s %(typevar)s, int 0, " % locals()
+ res += ", ".join(["%s %s" % (t, i) for t, i in indices])
+ self.indent(res)
def load(self, targetvar, targettype, ptr):
self.indent("%(targetvar)s = load %(targettype)s* %(ptr)s" % locals())
Modified: pypy/branch/dist-2.4.1/pypy/translator/llvm2/database.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/translator/llvm2/database.py (original)
+++ pypy/branch/dist-2.4.1/pypy/translator/llvm2/database.py Sun Jul 3 20:36:05 2005
@@ -1,21 +1,60 @@
from pypy.translator.llvm2.log import log
-from pypy.translator.llvm2.funcnode import FuncNode
-from pypy.translator.llvm2.structnode import StructNode
+from pypy.translator.llvm2.funcnode import FuncNode, FuncTypeNode
+from pypy.translator.llvm2.structnode import StructNode, StructTypeNode, StructVarsizeTypeNode
+from pypy.translator.llvm2.arraynode import ArrayNode, ArrayTypeNode
from pypy.rpython import lltype
from pypy.objspace.flow.model import Block, Constant, Variable
log = log.database
PRIMITIVES_TO_LLVM = {lltype.Signed: "int",
- lltype.Bool: "bool"}
+ lltype.Char: "sbyte",
+ lltype.Unsigned: "uint",
+ lltype.Bool: "bool",
+ lltype.Float: "double",
+ lltype.Void: "void"}
+
+class NormalizingDict(object):
+ """ this is a helper dict for obj2node in order
+ to allow saner key-unification for Ptrs to functions
+ (and possibly other stuff in the future)
+ """
+ def __init__(self):
+ self._dict = {}
+ def __repr__(self):
+ return repr(self._dict)
+ def dump(self):
+ for x,y in self._dict.items():
+ print x, y
+ def _get(self, key):
+ if isinstance(key, Constant):
+ if isinstance(key.value, lltype._ptr):
+ key = key.value._obj
+ return key
+ def __getitem__(self, key):
+ key = self._get(key)
+ return self._dict[key]
+ def __contains__(self, key):
+ key = self._get(key)
+ return key in self._dict
+ def __setitem__(self, key, value):
+ key = self._get(key)
+ self._dict[key] = value
+ def __delitem__(self, key):
+ key = self._get(key)
+ del self._dict[key]
+ def values(self):
+ return self._dict.values()
+ def items(self):
+ return self._dict.items()
class Database(object):
def __init__(self, translator):
self._translator = translator
- self.obj2node = {}
+ self.obj2node = NormalizingDict()
self._pendingsetup = []
self._tmpcount = 1
-
+
def addpending(self, key, node):
assert key not in self.obj2node, (
"node with key %r already known!" %(key,))
@@ -24,16 +63,40 @@
self._pendingsetup.append(node)
def prepare_repr_arg(self, const_or_var):
+ """if const_or_var is not already in a dictionary self.obj2node,
+ the appropriate node gets constructed and gets added to
+ self._pendingsetup and to self.obj2node"""
if const_or_var in self.obj2node:
return
if isinstance(const_or_var, Constant):
- if isinstance(const_or_var.concretetype, lltype.Primitive):
- pass
- #log.prepare(const_or_var, "(is primitive)")
+
+ ct = const_or_var.concretetype
+ while isinstance(ct, lltype.Ptr):
+ ct = ct.TO
+
+ if isinstance(ct, lltype.FuncType):
+ self.addpending(const_or_var, FuncNode(self, const_or_var))
else:
- self.addpending(const_or_var, FuncNode(self, const_or_var))
+ value = const_or_var.value
+ while hasattr(value, "_obj"):
+ value = value._obj
+
+ if isinstance(ct, lltype.Struct):
+ self.addpending(const_or_var, StructNode(self, value))
+
+ elif isinstance(ct, lltype.Array):
+ self.addpending(const_or_var, ArrayNode(self, value))
+
+ elif isinstance(ct, lltype.Primitive):
+ log.prepare(const_or_var, "(is primitive)")
+ else:
+ log.XXX("not sure what to do about %s(%s)" % (ct, const_or_var))
else:
- log.prepare.ignore(const_or_var)
+ log.prepare(const_or_var, type(const_or_var)) #XXX dont checkin
+
+ def prepare_repr_arg_multi(self, args):
+ for const_or_var in args:
+ self.prepare_repr_arg(const_or_var)
def prepare_repr_arg_type(self, type_):
if type_ in self.obj2node:
@@ -42,24 +105,47 @@
pass
elif isinstance(type_, lltype.Ptr):
self.prepare_repr_arg_type(type_.TO)
- elif isinstance(type_, lltype.Struct):
- self.addpending(type_, StructNode(self, type_))
+
+ elif isinstance(type_, lltype.Struct):
+ if type_._arrayfld:
+ self.addpending(type_, StructVarsizeTypeNode(self, type_))
+ else:
+ self.addpending(type_, StructTypeNode(self, type_))
+ elif isinstance(type_, lltype.FuncType):
+ self.addpending(type_, FuncTypeNode(self, type_))
+
+ elif isinstance(type_, lltype.Array):
+ self.addpending(type_, ArrayTypeNode(self, type_))
+
else:
log.XXX("need to prepare typerepr", type_)
+ def prepare_repr_arg_type_multi(self, types):
+ for type_ in types:
+ self.prepare_repr_arg_type(type_)
+
def prepare_arg(self, const_or_var):
log.prepare(const_or_var)
- self.prepare_repr_arg(const_or_var)
self.prepare_repr_arg_type(const_or_var.concretetype)
+ self.prepare_repr_arg(const_or_var)
- def process(self):
- if self._pendingsetup:
- self._pendingsetup.pop().setup()
- return bool(self._pendingsetup)
-
- def getobjects(self):
- return self.obj2node.values()
+ def setup_all(self):
+ while self._pendingsetup:
+ x = self._pendingsetup.pop()
+ log.setup_all(x)
+ x.setup()
+
+ def getobjects(self, subset_types=None):
+ res = []
+ for v in self.obj2node.values():
+ if subset_types is None or isinstance(v, subset_types):
+ res.append(v)
+ return res
+
+ # __________________________________________________________
+ # Representing variables and constants in LLVM source code
+
def repr_arg(self, arg):
if (isinstance(arg, Constant) and
isinstance(arg.concretetype, lltype.Primitive)):
@@ -91,5 +177,3 @@
count = self._tmpcount
self._tmpcount += 1
return "%tmp." + str(count)
-
-
\ No newline at end of file
Modified: pypy/branch/dist-2.4.1/pypy/translator/llvm2/funcnode.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/translator/llvm2/funcnode.py (original)
+++ pypy/branch/dist-2.4.1/pypy/translator/llvm2/funcnode.py Sun Jul 3 20:36:05 2005
@@ -2,19 +2,49 @@
from pypy.objspace.flow.model import Block, Constant, Variable, Link
from pypy.objspace.flow.model import flatten, mkentrymap, traverse
from pypy.rpython import lltype
-from pypy.translator.llvm2.cfgtransform import prepare_graph
+from pypy.translator.backendoptimization import remove_same_as
+from pypy.translator.unsimplify import remove_double_links
+from pypy.translator.llvm2.node import LLVMNode
from pypy.translator.llvm2.log import log
log = log.funcnode
-class FuncNode(object):
+
+class FuncTypeNode(LLVMNode):
+ func_type_node_counter = 0
+
+ def __init__(self, db, type_):
+ self.db = db
+ assert isinstance(type_, lltype.FuncType)
+ self.type_ = type_
+ ref = '"ft.%s.%s"' % (type_, FuncTypeNode.func_type_node_counter)
+ self.ref = ref.replace(" ", "")
+ FuncTypeNode.func_type_node_counter += 1
+
+ def __str__(self):
+ return "<FuncTypeNode %r>" % self.ref
+
+ def setup(self):
+ self.db.prepare_repr_arg_type(self.type_.RESULT)
+ self.db.prepare_repr_arg_type_multi(self.type_._trueargs())
+
+ def writedatatypedecl(self, codewriter):
+ returntype = self.db.repr_arg_type(self.type_.RESULT)
+ inputargtypes = self.db.repr_arg_type_multi(self.type_._trueargs())
+ decl = "%s type %s (%s)*" % (self.ref, returntype,
+ ", ".join(inputargtypes))
+ codewriter.funcdef(self.ref, returntype, inputargtypes)
+
+
+class FuncNode(LLVMNode):
_issetup = False
def __init__(self, db, const_ptr_func):
self.db = db
self.ref = "%" + const_ptr_func.value._obj._name
- self.graph = prepare_graph(const_ptr_func.value._obj.graph,
- db._translator)
-
+ self.graph = const_ptr_func.value._obj.graph
+ remove_same_as(self.graph)
+ remove_double_links(self.db._translator, self.graph)
+
def __str__(self):
return "<FuncNode %r>" %(self.ref,)
@@ -28,21 +58,9 @@
for op in node.operations:
map(self.db.prepare_arg, op.args)
self.db.prepare_arg(op.result)
+ assert self.graph, "cannot traverse"
traverse(visit, self.graph)
self._issetup = True
-
- def getdecl(self):
- assert self._issetup
- startblock = self.graph.startblock
- returnblock = self.graph.returnblock
- inputargs = self.db.repr_arg_multi(startblock.inputargs)
- inputargtypes = self.db.repr_arg_type_multi(startblock.inputargs)
- returntype = self.db.repr_arg_type(self.graph.returnblock.inputargs[0])
- result = "%s %s" % (returntype, self.ref)
- args = ["%s %s" % item for item in zip(inputargtypes, inputargs)]
- result += "(%s)" % ", ".join(args)
- return result
-
# ______________________________________________________________________
# main entry points from genllvm
def writedecl(self, codewriter):
@@ -72,6 +90,18 @@
# ______________________________________________________________________
# writing helpers for entry points
+ def getdecl(self):
+ assert self._issetup
+ startblock = self.graph.startblock
+ returnblock = self.graph.returnblock
+ inputargs = self.db.repr_arg_multi(startblock.inputargs)
+ inputargtypes = self.db.repr_arg_type_multi(startblock.inputargs)
+ returntype = self.db.repr_arg_type(self.graph.returnblock.inputargs[0])
+ result = "%s %s" % (returntype, self.ref)
+ args = ["%s %s" % item for item in zip(inputargtypes, inputargs)]
+ result += "(%s)" % ", ".join(args)
+ return result
+
def write_block(self, codewriter, block):
self.write_block_phi_nodes(codewriter, block)
self.write_block_operations(codewriter, block)
@@ -87,7 +117,8 @@
names = self.db.repr_arg_multi([link.args[i] for link in entrylinks])
blocknames = [self.block_to_name[link.prevblock]
for link in entrylinks]
- codewriter.phi(arg, type_, names, blocknames)
+ if type_ != "void":
+ codewriter.phi(arg, type_, names, blocknames)
def write_block_branches(self, codewriter, block):
if len(block.exits) == 1:
@@ -100,11 +131,7 @@
def write_block_operations(self, codewriter, block):
opwriter = OpWriter(self.db, codewriter)
for op in block.operations:
- meth = getattr(opwriter, op.opname, None)
- assert meth is not None, "operation %r not found" %(op.opname,)
- meth(op)
-
-
+ opwriter.write_operation(op)
def write_startblock(self, codewriter, block):
self.write_block_operations(codewriter, block)
self.write_block_branches(codewriter, block)
@@ -114,54 +141,71 @@
self.write_block_phi_nodes(codewriter, block)
inputargtype = self.db.repr_arg_type(block.inputargs[0])
inputarg = self.db.repr_arg(block.inputargs[0])
- codewriter.ret(inputargtype, inputarg)
+ if inputargtype != "void":
+ codewriter.ret(inputargtype, inputarg)
+ else:
+ codewriter.ret_void()
class OpWriter(object):
+ binary_operations = {'int_mul': 'mul',
+ 'int_add': 'add',
+ 'int_sub': 'sub',
+ 'int_floordiv': 'div',
+ 'int_mod': 'rem',
+ 'int_lt': 'setlt',
+ 'int_le': 'setle',
+ 'int_eq': 'seteq',
+ 'int_ne': 'setne',
+ 'int_ge': 'setge',
+ 'int_gt': 'setgt',
+
+ 'uint_mul': 'mul',
+ 'uint_add': 'add',
+ 'uint_sub': 'sub',
+ 'uint_floordiv': 'div',
+ 'uint_mod': 'rem',
+ 'uint_lt': 'setlt',
+ 'uint_le': 'setle',
+ 'uint_eq': 'seteq',
+ 'uint_ne': 'setne',
+ 'uint_ge': 'setge',
+ 'uint_gt': 'setgt',
+
+ 'float_mul': 'mul',
+ 'float_add': 'add',
+ 'float_sub': 'sub',
+ 'float_truediv': 'div',
+ 'float_mod': 'rem',
+ 'float_lt': 'setlt',
+ 'float_le': 'setle',
+ 'float_eq': 'seteq',
+ 'float_ne': 'setne',
+ 'float_ge': 'setge',
+ 'float_gt': 'setgt',
+ }
+
def __init__(self, db, codewriter):
self.db = db
self.codewriter = codewriter
- def binaryop(self, name, op):
+ def write_operation(self, op):
+ if op.opname in self.binary_operations:
+ self.binaryop(op)
+ else:
+ meth = getattr(self, op.opname, None)
+ assert meth is not None, "operation %r not found" %(op.opname,)
+ meth(op)
+
+ def binaryop(self, op):
+ name = self.binary_operations[op.opname]
assert len(op.args) == 2
self.codewriter.binaryop(name,
self.db.repr_arg(op.result),
self.db.repr_arg_type(op.args[0]),
self.db.repr_arg(op.args[0]),
self.db.repr_arg(op.args[1]))
- def int_mul(self, op):
- self.binaryop('mul', op)
-
- def int_floordiv(self, op):
- self.binaryop('div', op)
-
- def int_add(self, op):
- self.binaryop('add', op)
-
- def int_sub(self, op):
- self.binaryop('sub', op)
- def int_mod(self, op):
- self.binaryop('rem', op)
-
- def int_eq(self, op):
- self.binaryop('seteq', op)
-
- def int_ne(self, op):
- self.binaryop('setne', op)
-
- def int_lt(self, op):
- self.binaryop('setlt', op)
-
- def int_le(self, op):
- self.binaryop('setle', op)
-
- def int_gt(self, op):
- self.binaryop('setgt', op)
-
- def int_ge(self, op):
- self.binaryop('setge', op)
-
- def cast_bool_to_int(self, op):
+ def cast_primitive(self, op): #works for all primitives
assert len(op.args) == 1
targetvar = self.db.repr_arg(op.result)
targettype = self.db.repr_arg_type(op.result)
@@ -169,7 +213,24 @@
fromtype = self.db.repr_arg_type(op.args[0])
self.codewriter.cast(targetvar, fromtype, fromvar, targettype)
- int_is_true = cast_bool_to_int
+ cast_bool_to_int = cast_primitive
+ cast_bool_to_uint = uint_is_true = cast_primitive
+
+ def int_is_true(self, op):
+ self.codewriter.binaryop("setne",
+ self.db.repr_arg(op.result),
+ self.db.repr_arg_type(op.args[0]),
+ self.db.repr_arg(op.args[0]),
+ "0")
+
+ uint_is_true = int_is_true
+
+ def float_is_true(self, op):
+ self.codewriter.binaryop("setne",
+ self.db.repr_arg(op.result),
+ self.db.repr_arg_type(op.args[0]),
+ self.db.repr_arg(op.args[0]),
+ "0.0")
def direct_call(self, op):
assert len(op.args) >= 1
@@ -178,8 +239,11 @@
functionref = self.db.repr_arg(op.args[0])
argrefs = self.db.repr_arg_multi(op.args[1:])
argtypes = self.db.repr_arg_type_multi(op.args[1:])
- self.codewriter.call(targetvar, returntype, functionref, argrefs,
- argtypes)
+ if returntype != "void":
+ self.codewriter.call(targetvar, returntype, functionref, argrefs,
+ argtypes)
+ else:
+ self.codewriter.call_void(functionref, argrefs, argtypes)
def malloc(self, op):
targetvar = self.db.repr_arg(op.result)
@@ -190,17 +254,38 @@
type = self.db.obj2node[arg.value].ref
self.codewriter.malloc(targetvar, type)
+ def malloc_varsize(self, op):
+ targetvar = self.db.repr_arg(op.result)
+ arg_type = op.args[0]
+ assert (isinstance(arg_type, Constant) and
+ isinstance(arg_type.value, lltype.Array))
+ #XXX unclean
+ struct_type = self.db.obj2node[arg_type.value].ref
+ struct_cons = self.db.obj2node[arg_type.value].constructor_ref
+ argrefs = self.db.repr_arg_multi(op.args[1:])
+ argtypes = self.db.repr_arg_type_multi(op.args[1:])
+ self.codewriter.call(targetvar, struct_type + "*", struct_cons,
+ argrefs, argtypes)
+
def getfield(self, op):
tmpvar = self.db.repr_tmpvar()
- type = self.db.repr_arg_type(op.args[0])
- typevar = self.db.repr_arg(op.args[0])
+ typ = self.db.repr_arg_type(op.args[0])
+ typevar = self.db.repr_arg(op.args[0])
fieldnames = list(op.args[0].concretetype.TO._names)
index = fieldnames.index(op.args[1].value)
- self.codewriter.getelementptr(tmpvar, type, typevar, index)
-
+ self.codewriter.getelementptr(tmpvar, typ, typevar, ("uint", index))
+
targetvar = self.db.repr_arg(op.result)
targettype = self.db.repr_arg_type(op.result)
- self.codewriter.load(targetvar, targettype, tmpvar)
+ assert targettype != "void"
+ #XXX This doesnt work - yet
+ #if isinstance(op.result.concretetype, lltype.Ptr):
+ # self.codewriter.cast(targetvar, targettype, tmpvar, targettype)
+ #else:
+ # Moving to correct result variable
+ #self.codewriter.load(targetvar, targettype, tmpvar)
+ self.codewriter.load(targetvar, targettype, tmpvar)
+ getsubstruct = getfield
def setfield(self, op):
tmpvar = self.db.repr_tmpvar()
@@ -208,8 +293,51 @@
typevar = self.db.repr_arg(op.args[0])
fieldnames = list(op.args[0].concretetype.TO._names)
index = fieldnames.index(op.args[1].value)
- self.codewriter.getelementptr(tmpvar, type, typevar, index)
+ self.codewriter.getelementptr(tmpvar, type, typevar, ("uint", index))
+ valuevar = self.db.repr_arg(op.args[2])
+ valuetype = self.db.repr_arg_type(op.args[2])
+ assert valuetype != "void"
+ self.codewriter.store(valuetype, valuevar, tmpvar)
+
+ def getarrayitem(self, op):
+ var = self.db.repr_arg(op.args[0])
+ vartype = self.db.repr_arg_type(op.args[0])
+ index = self.db.repr_arg(op.args[1])
+ indextype = self.db.repr_arg_type(op.args[1])
+
+ tmpvar = self.db.repr_tmpvar()
+ self.codewriter.getelementptr(tmpvar, vartype, var,
+ ("uint", 1), (indextype, index))
+
+ targetvar = self.db.repr_arg(op.result)
+ targettype = self.db.repr_arg_type(op.result)
+
+ # Ditto see getfield
+ if not isinstance(op.result.concretetype, lltype.Ptr):
+ self.codewriter.load(targetvar, targettype, tmpvar)
+ else:
+ # XXX noop
+ self.codewriter.cast(targetvar, targettype, tmpvar, targettype)
+
+ def setarrayitem(self, op):
+ array = self.db.repr_arg(op.args[0])
+ arraytype = self.db.repr_arg_type(op.args[0])
+ index = self.db.repr_arg(op.args[1])
+ indextype = self.db.repr_arg_type(op.args[1])
+
+ tmpvar = self.db.repr_tmpvar()
+ self.codewriter.getelementptr(tmpvar, arraytype, array,
+ ("uint", 1), (indextype, index))
valuevar = self.db.repr_arg(op.args[2])
valuetype = self.db.repr_arg_type(op.args[2])
self.codewriter.store(valuetype, valuevar, tmpvar)
+
+ def getarraysize(self, op):
+ var = self.db.repr_arg(op.args[0])
+ vartype = self.db.repr_arg_type(op.args[0])
+ tmpvar = self.db.repr_tmpvar()
+ self.codewriter.getelementptr(tmpvar, vartype, var, ("uint", 0))
+ targetvar = self.db.repr_arg(op.result)
+ targettype = self.db.repr_arg_type(op.result)
+ self.codewriter.load(targetvar, targettype, tmpvar)
Modified: pypy/branch/dist-2.4.1/pypy/translator/llvm2/genllvm.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/translator/llvm2/genllvm.py (original)
+++ pypy/branch/dist-2.4.1/pypy/translator/llvm2/genllvm.py Sun Jul 3 20:36:05 2005
@@ -1,3 +1,6 @@
+from os.path import exists
+use_boehm_gc = exists('/usr/lib/libgc.so')
+
import py
from pypy.translator.llvm2 import build_llvm_module
from pypy.translator.llvm2.database import Database
@@ -8,8 +11,10 @@
from pypy.rpython import lltype
from pypy.tool.udir import udir
from pypy.translator.llvm2.codewriter import CodeWriter
+from pypy.translator.backendoptimization import remove_void
def genllvm(translator):
+ remove_void(translator)
func = translator.entrypoint
db = Database(translator)
@@ -17,18 +22,31 @@
c = inputconst(lltype.typeOf(ptr), ptr)
db.prepare_repr_arg(c)
assert c in db.obj2node
- while db.process():
- pass
+ db.setup_all()
entrynode = db.obj2node[c]
codewriter = CodeWriter()
- dbobjects = db.getobjects()
- log.debug(dbobjects)
- log.debug(db.obj2node)
- for node in dbobjects:
- node.writedecl(codewriter)
- codewriter.startimpl()
- for node in dbobjects:
- node.writeimpl(codewriter)
+ comment = codewriter.comment
+ nl = codewriter.newline
+
+ nl(); comment("Type Declarations"); nl()
+ for typ_decl in db.getobjects():
+ typ_decl.writedatatypedecl(codewriter)
+
+ nl(); comment("Global Data") ; nl()
+ for typ_decl in db.getobjects():
+ typ_decl.writeglobalconstants(codewriter)
+
+ nl(); comment("Function Prototypes") ; nl()
+ for typ_decl in db.getobjects():
+ typ_decl.writedecl(codewriter)
+
+ #import pdb ; pdb.set_trace()
+ nl(); comment("Function Implementation")
+ codewriter.startimpl()
+ for typ_decl in db.getobjects():
+ typ_decl.writeimpl(codewriter)
+
+ comment("End of file") ; nl()
targetdir = udir
llvmsource = targetdir.join(func.func_name).new(ext='.ll')
Modified: pypy/branch/dist-2.4.1/pypy/translator/llvm2/pyxwrapper.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/translator/llvm2/pyxwrapper.py (original)
+++ pypy/branch/dist-2.4.1/pypy/translator/llvm2/pyxwrapper.py Sun Jul 3 20:36:05 2005
@@ -3,7 +3,11 @@
log = log.pyrex
PRIMITIVES_TO_C = {lltype.Signed: "int",
- lltype.Bool: "char"}
+ lltype.Unsigned: "unsigned int",
+ lltype.Bool: "char",
+ lltype.Float: "double",
+ lltype.Char: "char",
+ }
def write_pyx_wrapper(funcgen, targetpath):
def c_declaration():
Modified: pypy/branch/dist-2.4.1/pypy/translator/llvm2/structnode.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/translator/llvm2/structnode.py (original)
+++ pypy/branch/dist-2.4.1/pypy/translator/llvm2/structnode.py Sun Jul 3 20:36:05 2005
@@ -1,36 +1,125 @@
import py
from pypy.objspace.flow.model import Block, Constant, Variable, Link
-from pypy.translator.llvm2.log import log
+from pypy.translator.llvm2.log import log
+from pypy.translator.llvm2.node import LLVMNode
+from pypy.rpython import lltype
+
log = log.structnode
-class StructNode(object):
+class StructTypeNode(LLVMNode):
_issetup = False
struct_counter = 0
def __init__(self, db, struct):
+ assert isinstance(struct, lltype.Struct)
self.db = db
- self.struct = struct
- self.ref = "%%st.%s.%s" % (struct._name, StructNode.struct_counter)
- StructNode.struct_counter += 1
+ self.struct = struct
+ self.name = "%s.%s" % (self.struct._name, StructTypeNode.struct_counter)
+ self.ref = "%%st.%s" % self.name
+ StructTypeNode.struct_counter += 1
def __str__(self):
- return "<StructNode %r>" %(self.ref,)
+ return "<StructTypeNode %r>" %(self.ref,)
def setup(self):
- log.XXX("setup", self)
+ # Recurse
+ for field in self.struct._flds:
+ self.db.prepare_repr_arg_type(field)
self._issetup = True
# ______________________________________________________________________
- # entry points from genllvm
- #
- def writedecl(self, codewriter):
+ # main entry points from genllvm
+
+ def writedatatypedecl(self, codewriter):
assert self._issetup
- struct = self.struct
- l = []
- for fieldname in struct._names:
- type_ = getattr(struct, fieldname)
- l.append(self.db.repr_arg_type(type_))
- codewriter.structdef(self.ref, l)
+ fields = [getattr(self.struct, name) for name in self.struct._names_without_voids()]
+ l = [self.db.repr_arg_type(field) for field in fields]
+ codewriter.structdef(self.ref, l)
+
+class StructVarsizeTypeNode(StructTypeNode):
+
+ def __init__(self, db, struct):
+ super(StructVarsizeTypeNode, self).__init__(db, struct)
+ new_var_name = "%%new.st.var.%s" % self.name
+ self.constructor_name = "%s * %s(int %%len)" % (self.ref, new_var_name)
+
+ def writedecl(self, codewriter):
+ # declaration for constructor
+ codewriter.declare(self.constructor_name)
def writeimpl(self, codewriter):
- assert self._issetup
+ log.writeimpl(self.ref)
+ codewriter.openfunc(self.constructor_name)
+ codewriter.label("block0")
+ indices_to_array = [("int", 0)]
+ s = self.struct
+ while isintance(s, lltypes.Struct):
+ last_pos = len(self.struct._names_without_voids()) - 1
+ indices_to_array.append(("uint", last_pos))
+ s = s._flds.values()[-1]
+
+ # Into array and length
+ indices = indices_to_array + [("uint", 1), ("int", "%len")]
+ codewriter.getelementptr("%size", self.ref + "*",
+ "null", *indices)
+
+ #XXX is this ok for 64bit?
+ codewriter.cast("%sizeu", arraytype + "*", "%size", "uint")
+ codewriter.malloc("%resulttmp", "sbyte", "uint", "%sizeu")
+ codewriter.cast("%result", "sbyte*", "%resulttmp", self.ref + "*")
+
+ # remember the allocated length for later use.
+ indices = indices_to_array + [("uint", 0)]
+ codewriter.getelementptr("%size_ptr", self.ref + "*",
+ "%result", *indices)
+
+ codewriter.cast("%signedsize", "uint", "%sizeu", "int")
+ codewriter.store("int", "%signedsize", "%size_ptr")
+
+ codewriter.ret(self.ref + "*", "%result")
+ codewriter.closefunc()
+
+class StructNode(LLVMNode):
+ _issetup = False
+ struct_counter = 0
+
+ def __init__(self, db, value):
+ self.db = db
+ self.name = "%s.%s" % (value._TYPE._name, StructNode.struct_counter)
+ self.ref = "%%stinstance.%s" % self.name
+ self.value = value
+ StructNode.struct_counter += 1
+
+ def __str__(self):
+ return "<StructNode %r>" %(self.ref,)
+
+ def setup(self):
+ for name in self.value._TYPE._names_without_voids():
+ T = self.value._TYPE._flds[name]
+ assert T is not lltype.Void
+ if not isinstance(T, lltype.Primitive):
+ value = getattr(self.value, name)
+ # Create a dummy constant hack XXX
+ c = Constant(value, T)
+ self.db.prepare_arg(c)
+
+ self._issetup = True
+
+ def get_values(self):
+ res = []
+ for name in self.value._TYPE._names_without_voids():
+ T = self.value._TYPE._flds[name]
+ value = getattr(self.value, name)
+ if not isinstance(T, lltype.Primitive):
+ # Create a dummy constant hack XXX
+ value = self.db.repr_arg(Constant(value, T))
+ else:
+ value = str(value)
+ res.append((self.db.repr_arg_type(T), value))
+ return ", ".join(["%s %s" % (t, v) for t, v in res])
+
+ def writeglobalconstants(self, codewriter):
+ codewriter.globalinstance(self.ref,
+ self.db.repr_arg_type(self.value._TYPE),
+ self.get_values())
+
Modified: pypy/branch/dist-2.4.1/pypy/translator/llvm2/test/test_genllvm.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/translator/llvm2/test/test_genllvm.py (original)
+++ pypy/branch/dist-2.4.1/pypy/translator/llvm2/test/test_genllvm.py Sun Jul 3 20:36:05 2005
@@ -5,10 +5,12 @@
from pypy.translator.translator import Translator
from pypy.translator.llvm2.genllvm import genllvm
+from pypy.translator.llvm2.genllvm import use_boehm_gc
from pypy.translator.llvm2.test import llvmsnippet
from pypy.objspace.flow.model import Constant, Variable
from pypy.rpython.rtyper import RPythonTyper
+from pypy.rpython.rarithmetic import r_uint
py.log.setconsumer("genllvm", py.log.STDOUT)
py.log.setconsumer("genllvm database prepare", None)
@@ -17,13 +19,32 @@
## def setup_module(mod):
## mod.llvm_found = is_on_path("llvm-as")
-def compile_function(function, annotate):
+def compile_function(function, annotate, view=False):
t = Translator(function)
a = t.annotate(annotate)
t.specialize()
a.simplify()
+ if view:
+ t.view()
return genllvm(t)
+def test_GC_malloc():
+ if not use_boehm_gc:
+ py.test.skip("test_GC_malloc skipped because Boehm collector library was not found")
+ return
+ py.test.skip("test_GC_malloc skipped because test not yet correct (Boehm collector IS used anyway)")
+ return
+ def tuple_getitem(n):
+ x = 0
+ i = 0
+ while i < n:
+ l = (1,2,i,234,23,23,23,234,234,234,234)
+ x += l[2]
+ i += 1
+ return x
+ f = compile_function(tuple_getitem, [int])
+ t = 1024*1024*100
+ f(t) #assert f(t) == t
def test_return1():
def simple1():
@@ -31,6 +52,20 @@
f = compile_function(simple1, [])
assert f() == 1
+def Xtest_simple_function_pointer():
+ def f1(x):
+ return x + 1
+ def f2(x):
+ return x + 2
+
+ l = [f1, f2]
+
+ def pointersimple(i):
+ return l[i]
+
+ f = compile_function(pointersimple, [int])
+ assert f
+
def test_simple_branching():
def simple5(b):
if b:
@@ -59,6 +94,91 @@
assert f(1) == 1
assert f(2) == 2
+def test_while_loop():
+ def factorial(i):
+ r = 1
+ while i>1:
+ r *= i
+ i -= 1
+ return r
+ f = compile_function(factorial, [int])
+ assert factorial(4) == 24
+ assert factorial(5) == 120
+ f = compile_function(factorial, [float])
+ assert factorial(4.) == 24.
+ assert factorial(5.) == 120.
+
+def test_return_void():
+ def return_void(i):
+ return None
+ def call_return_void(i):
+ return_void(i)
+ return 1
+ f = compile_function(call_return_void, [int])
+ assert f(10) == 1
+
+def test_break_while_loop():
+ def factorial(i):
+ r = 1
+ while 1:
+ if i<=1:
+ break
+ r *= i
+ i -= 1
+ return r
+ f = compile_function(factorial, [int])
+ assert factorial(4) == 24
+ assert factorial(5) == 120
+
+
+def test_primitive_is_true():
+ def var_is_true(v):
+ return bool(v)
+ f = compile_function(var_is_true, [int])
+ assert f(256)
+ assert not f(0)
+ f = compile_function(var_is_true, [r_uint])
+ assert f(r_uint(256))
+ assert not f(r_uint(0))
+ f = compile_function(var_is_true, [float])
+ assert f(256.0)
+ assert not f(0.0)
+
+
+def test_uint_ops():
+ def ops(i):
+ x = r_uint(0)
+ x += i < i
+ x += i <= i
+ x += i == i
+ x += i != i
+ x += i >= i
+ x += i > i
+ x += x % i
+ #x += i is not None
+ #x += i is None
+ return i + 1 * i // i - 1
+ f = compile_function(ops, [r_uint])
+ assert f(1) == 1
+ assert f(2) == 2
+
+def test_float_ops():
+ def ops(flt):
+ x = 0
+ x += flt < flt
+ x += flt <= flt
+ x += flt == flt
+ x += flt != flt
+ x += flt >= flt
+ x += flt > flt
+ #x += flt fs not None
+ #x += flt is None
+ return flt + 1 * flt / flt - 1
+ f = compile_function(ops, [float])
+ assert f(1) == 1
+ assert f(2) == 2
+
+
def test_function_call():
def callee():
return 1
@@ -76,7 +196,7 @@
if m == 0:
return ackermann(n - 1, 1)
return ackermann(n - 1, ackermann(n, m - 1))
- f = compile_function(call_ackermann, [int, int])
+ f = compile_function(call_ackermann, [int, int], view=False)
assert f(0, 2) == 3
def test_tuple_getitem():
@@ -86,9 +206,89 @@
f = compile_function(tuple_getitem, [int])
assert f(1) == 2
-def test_nested_tuple():
+def test_nested_tuple():
def nested_tuple(i):
l = (1,(1,2,i),i)
return l[1][2]
f = compile_function(nested_tuple, [int])
- assert f(4) == 4
+ assert f(4) == 4
+
+def test_pbc_fns():
+ def f2(x):
+ return x+1
+ def f3(x):
+ return x+2
+ def g(y):
+ if y < 0:
+ f = f2
+ else:
+ f = f3
+ return f(y+3)
+ f = compile_function(g, [int])
+ assert f(-1) == 3
+ assert f(0) == 5
+
+def DONOT_test_simple_chars():
+ def char_constant2(s):
+ s = s + s + s
+ return len(s + '.')
+ def char_constant():
+ return char_constant2("kk")
+ f = compile_function(char_constant, [])
+ assert f() == 7
+
+def test_list_getitem():
+ def list_getitem(i):
+ l = [1,2,i+1]
+ return l[i]
+ f = compile_function(list_getitem, [int])
+ assert f(0) == 1
+ assert f(1) == 2
+ assert f(2) == 3
+
+def test_list_basic_ops():
+ def list_basic_ops(i, j):
+ l = [1,2,3]
+ l.insert(0, 42)
+ del l[1]
+ l.append(i)
+ listlen = len(l)
+ l.extend(l)
+ del l[listlen:]
+ l += [5,6]
+ l[1] = i
+ return l[j]
+ f = compile_function(list_basic_ops, [int, int])
+ for i in range(6):
+ for j in range(6):
+ assert f(i,j) == list_basic_ops(i,j)
+
+def Xtest_string_getitem1():
+ l = "Hello, World"
+ def string_test(i):
+ return l[i]
+ f = compile_function(string_test, [int], view=True)
+ assert f(0) == ord("H")
+
+def DONOT_test_string_getitem2():
+ def string_test(i):
+ l = "Hello, World"
+ return l[i]
+ f = compile_function(string_test, [int])
+ assert f(0) == ord("H")
+
+class TestException(Exception):
+ pass
+
+def DONOTtest_exception():
+ def raise_(i):
+ if i:
+ raise TestException()
+ else:
+ return 1
+ def catch(i):
+ try:
+ return raise_(i)
+ except TestException:
+ return 0
+ f = compile_function(catch, [int])
Modified: pypy/branch/dist-2.4.1/pypy/translator/tool/pygame/drawgraph.py
==============================================================================
--- pypy/branch/dist-2.4.1/pypy/translator/tool/pygame/drawgraph.py (original)
+++ pypy/branch/dist-2.4.1/pypy/translator/tool/pygame/drawgraph.py Sun Jul 3 20:36:05 2005
@@ -116,56 +116,77 @@
rest = rest[3:]
self.style, self.color = rest
self.highlight = False
+ self.cachedbezierpoints = None
+ self.cachedarrowhead = None
+ self.cachedlimits = None
def sethighlight(self, which):
self.highlight = bool(which)
- def bezierpoints(self, resolution=8):
- result = []
- pts = self.points
- for i in range(0, len(pts)-3, 3):
- result += beziercurve(pts[i], pts[i+1],
- pts[i+2], pts[i+3], resolution)
+ def limits(self):
+ result = self.cachedlimits
+ if result is None:
+ points = self.bezierpoints()
+ xs = [point[0] for point in points]
+ ys = [point[1] for point in points]
+ self.cachedlimits = result = (min(xs), max(ys), max(xs), min(ys))
+ return result
+
+ def bezierpoints(self):
+ result = self.cachedbezierpoints
+ if result is None:
+ result = []
+ pts = self.points
+ for i in range(0, len(pts)-3, 3):
+ result += beziercurve(pts[i], pts[i+1], pts[i+2], pts[i+3])
+ self.cachedbezierpoints = result
return result
def arrowhead(self):
- bottom_up = self.points[0][1] > self.points[-1][1]
- if (self.tail.y > self.head.y) != bottom_up: # reversed edge
- head = 0
- dir = 1
- else:
- head = -1
- dir = -1
- n = 1
- while True:
- try:
- x0, y0 = self.points[head]
- x1, y1 = self.points[head+n*dir]
- except IndexError:
- return []
- vx = x0-x1
- vy = y0-y1
- try:
- f = 0.12 / math.sqrt(vx*vx + vy*vy)
- vx *= f
- vy *= f
- return [(x0 + 0.9*vx, y0 + 0.9*vy),
- (x0 + 0.4*vy, y0 - 0.4*vx),
- (x0 - 0.4*vy, y0 + 0.4*vx)]
- except (ZeroDivisionError, ValueError):
- n += 1
+ result = self.cachedarrowhead
+ if result is None:
+ bottom_up = self.points[0][1] > self.points[-1][1]
+ if (self.tail.y > self.head.y) != bottom_up: # reversed edge
+ head = 0
+ dir = 1
+ else:
+ head = -1
+ dir = -1
+ n = 1
+ while True:
+ try:
+ x0, y0 = self.points[head]
+ x1, y1 = self.points[head+n*dir]
+ except IndexError:
+ result = []
+ break
+ vx = x0-x1
+ vy = y0-y1
+ try:
+ f = 0.12 / math.sqrt(vx*vx + vy*vy)
+ vx *= f
+ vy *= f
+ result = [(x0 + 0.9*vx, y0 + 0.9*vy),
+ (x0 + 0.4*vy, y0 - 0.4*vx),
+ (x0 - 0.4*vy, y0 + 0.4*vx)]
+ break
+ except (ZeroDivisionError, ValueError):
+ n += 1
+ self.cachedarrowhead = result
+ return result
def beziercurve((x0,y0), (x1,y1), (x2,y2), (x3,y3), resolution=8):
result = []
f = 1.0/(resolution-1)
+ append = result.append
for i in range(resolution):
t = f*i
t0 = (1-t)*(1-t)*(1-t)
t1 = t *(1-t)*(1-t) * 3.0
t2 = t * t *(1-t) * 3.0
t3 = t * t * t
- result.append((x0*t0 + x1*t1 + x2*t2 + x3*t3,
- y0*t0 + y1*t1 + y2*t2 + y3*t3))
+ append((x0*t0 + x1*t1 + x2*t2 + x3*t3,
+ y0*t0 + y1*t1 + y2*t2 + y3*t3))
return result
def segmentdistance((x0,y0), (x1,y1), (x,y)):
@@ -173,16 +194,16 @@
vx = x1-x0
vy = y1-y0
try:
- l = math.sqrt(vx*vx+vy*vy)
+ l = math.hypot(vx, vy)
vx /= l
vy /= l
dlong = vx*(x-x0) + vy*(y-y0)
except (ZeroDivisionError, ValueError):
dlong = -1
if dlong < 0.0:
- return math.sqrt((x-x0)*(x-x0) + (y-y0)*(y-y0))
+ return math.hypot(x-x0, y-y0)
elif dlong > l:
- return math.sqrt((x-x1)*(x-x1) + (y-y1)*(y-y1))
+ return math.hypot(x-x1, y-y1)
else:
return abs(vy*(x-x0) - vx*(y-y0))
@@ -209,6 +230,8 @@
self.textzones = []
self.highlightwords = graphlayout.links
self.highlight_word = None
+ self.visiblenodes = []
+ self.visibleedges = []
def wordcolor(self, word):
if word == self.highlight_word:
@@ -298,8 +321,26 @@
coordinates may sometimes become longs and cause OverflowErrors
within pygame.
"""
- return (x1 < self.width-self.ofsx and x2 > -self.ofsx and
- y1 < self.height-self.ofsy and y2 > -self.ofsy)
+ w, h = self.screen.get_size()
+ return x1 < w and x2 > 0 and y1 < h and y2 > 0
+
+ def computevisible(self):
+ del self.visiblenodes[:]
+ del self.visibleedges[:]
+ w, h = self.screen.get_size()
+ for node in self.graphlayout.nodes.values():
+ x, y = self.map(node.x, node.y)
+ nw2 = int(node.w * self.scale)//2
+ nh2 = int(node.h * self.scale)//2
+ if x-nw2 < w and x+nw2 > 0 and y-nh2 < h and y+nh2 > 0:
+ self.visiblenodes.append(node)
+ for edge in self.graphlayout.edges:
+ x1, y1, x2, y2 = edge.limits()
+ x1, y1 = self.map(x1, y1)
+ if x1 < w and y1 < h:
+ x2, y2 = self.map(x2, y2)
+ if x2 > 0 and y2 > 0:
+ self.visibleedges.append(edge)
def map(self, x, y):
return (int(x*self.scale) - (self.ofsx - self.margin),
@@ -382,6 +423,14 @@
def cmd():
pygame.draw.rect(self.screen, fgcolor, rect, 1)
commands.append(cmd)
+ elif node.shape == 'ellipse':
+ rect = (x-1, y-1, boxwidth+2, boxheight+2)
+ def cmd():
+ pygame.draw.ellipse(self.screen, bgcolor, rect, 0)
+ bkgndcommands.append(cmd)
+ def cmd():
+ pygame.draw.ellipse(self.screen, fgcolor, rect, 1)
+ commands.append(cmd)
elif node.shape == 'octagon':
step = 1-math.sqrt(2)/2
points = [(int(x+boxwidth*fx), int(y+boxheight*fy))
@@ -400,14 +449,15 @@
def draw_commands(self):
nodebkgndcmd = []
nodecmd = []
- for node in self.graphlayout.nodes.values():
+ for node in self.visiblenodes:
cmd1, cmd2 = self.draw_node_commands(node)
nodebkgndcmd += cmd1
nodecmd += cmd2
edgebodycmd = []
edgeheadcmd = []
- for edge in self.graphlayout.edges:
+ for edge in self.visibleedges:
+
fgcolor = getcolor(edge.color, (0,0,0))
if edge.highlight:
fgcolor = highlight_color(fgcolor)
@@ -435,6 +485,8 @@
return edgebodycmd + nodebkgndcmd + edgeheadcmd + nodecmd
def render(self):
+ self.computevisible()
+
bbox = self.getboundingbox()
self.screen.fill((224, 255, 224), bbox)
@@ -478,7 +530,7 @@
def node_at_position(self, (x, y)):
"""Return the Node under the cursor."""
x, y = self.revmap(x, y)
- for node in self.graphlayout.nodes.itervalues():
+ for node in self.visiblenodes:
if 2.0*abs(x-node.x) <= node.w and 2.0*abs(y-node.y) <= node.h:
return node
return None
@@ -489,7 +541,7 @@
distmax /= self.scale
xy = self.revmap(x, y)
closest_edge = None
- for edge in self.graphlayout.edges:
+ for edge in self.visibleedges:
pts = edge.bezierpoints()
for i in range(1, len(pts)):
d = segmentdistance(pts[i-1], pts[i], xy)
More information about the Pypy-commit
mailing list