[pypy-commit] pypy cppyy-packaging: merge default into branch
wlav
pypy.commits at gmail.com
Fri Aug 4 18:51:08 EDT 2017
Author: Wim Lavrijsen <WLavrijsen at lbl.gov>
Branch: cppyy-packaging
Changeset: r92080:3a13ca0c3f09
Date: 2017-08-04 15:37 -0700
http://bitbucket.org/pypy/pypy/changeset/3a13ca0c3f09/
Log: merge default into branch
diff --git a/Makefile b/Makefile
--- a/Makefile
+++ b/Makefile
@@ -10,7 +10,7 @@
RUNINTERP = $(PYPY_EXECUTABLE)
endif
-.PHONY: cffi_imports
+.PHONY: pypy-c cffi_imports
pypy-c:
@echo
@@ -32,7 +32,7 @@
@echo "===================================================================="
@echo
@sleep 5
- $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py
+ cd pypy/goal && $(RUNINTERP) ../../rpython/bin/rpython -Ojit targetpypystandalone.py
# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are
# replaced with an opaque --jobserver option by the time this Makefile
@@ -40,4 +40,4 @@
# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html
cffi_imports: pypy-c
- PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py || /bin/true
+ PYTHONPATH=. pypy/goal/pypy-c pypy/tool/build_cffi_imports.py || /bin/true
diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py
--- a/lib-python/2.7/distutils/unixccompiler.py
+++ b/lib-python/2.7/distutils/unixccompiler.py
@@ -226,7 +226,19 @@
return "-L" + dir
def _is_gcc(self, compiler_name):
- return "gcc" in compiler_name or "g++" in compiler_name
+ # XXX PyPy workaround, look at the big comment below for more
+ # context. On CPython, the hack below works fine because
+ # `compiler_name` contains the name of the actual compiler which was
+ # used at compile time (e.g. 'x86_64-linux-gnu-gcc' on my machine).
+ # PyPy hardcodes it to 'cc', so the hack doesn't work, and the end
+ # result is that we pass the wrong option to the compiler.
+ #
+ # The workaround is to *always* pretend to be GCC if we are on Linux:
+ # this should cover the vast majority of real systems, including the
+ # ones which use clang (which understands the '-Wl,-rpath' syntax as
+ # well)
+ return (sys.platform == "linux2" or
+ "gcc" in compiler_name or "g++" in compiler_name)
def runtime_library_dir_option(self, dir):
# XXX Hackish, at the very least. See Python bug #445902:
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -394,12 +394,17 @@
replace_with = ' ' + replace_with
return self._backend.getcname(cdecl, replace_with)
- def gc(self, cdata, destructor):
+ def gc(self, cdata, destructor, size=0):
"""Return a new cdata object that points to the same
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
+
+ The optional 'size' gives an estimate of the size, used to
+ trigger the garbage collection more eagerly. So far only used
+ on PyPy. It tells the GC that the returned object keeps alive
+ roughly 'size' bytes of external memory.
"""
- return self._backend.gcp(cdata, destructor)
+ return self._backend.gcp(cdata, destructor, size)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -1002,7 +1002,7 @@
_weakref_cache_ref = None
- def gcp(self, cdata, destructor):
+ def gcp(self, cdata, destructor, size=0):
if self._weakref_cache_ref is None:
import weakref
class MyRef(weakref.ref):
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -224,11 +224,6 @@
"use specialised tuples",
default=False),
- BoolOption("withcelldict",
- "use dictionaries that are optimized for being used as module dicts",
- default=False,
- requires=[("objspace.honor__builtins__", False)]),
-
BoolOption("withliststrategies",
"enable optimized ways to store lists of primitives ",
default=True),
@@ -288,7 +283,7 @@
# extra optimizations with the JIT
if level == 'jit':
- config.objspace.std.suggest(withcelldict=True)
+ pass # none at the moment
def enable_allworkingmodules(config):
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -152,22 +152,61 @@
Run the translation
-------------------
+We usually translate in the ``pypy/goal`` directory, so all the following
+commands assume your ``$pwd`` is there.
+
Translate with JIT::
- cd pypy/goal
pypy ../../rpython/bin/rpython --opt=jit
Translate without JIT::
- cd pypy/goal
pypy ../../rpython/bin/rpython --opt=2
+Note this translates pypy via the ``targetpypystandalone.py`` file, so these
+are shorthand for::
+
+ pypy ../../rpython/bin/rpython <rpython options> targetpypystandalone.py <pypy options>
+
+More help is availabe via ``--help`` at either option position, and more info
+can be found in the :doc:`config/index` section.
+
(You can use ``python`` instead of ``pypy`` here, which will take longer
but works too.)
-If everything works correctly this will create an executable ``pypy-c`` in the
-current directory. The executable behaves mostly like a normal Python
-interpreter (see :doc:`cpython_differences`).
+If everything works correctly this will:
+
+1. Run the rpython `translation chain`_, producing a database of the
+ entire pypy interpreter. This step is currently singe threaded, and RAM
+ hungry. As part of this step, the chain creates a large number of C code
+ files and a Makefile to compile them in a
+ directory controlled by the ``PYPY_USESSION_DIR`` environment variable.
+2. Create an executable ``pypy-c`` by running the Makefile. This step can
+ utilize all possible cores on the machine.
+3. Copy the needed binaries to the current directory.
+4. Generate c-extension modules for any cffi-based stdlib modules.
+
+
+The resulting executable behaves mostly like a normal Python
+interpreter (see :doc:`cpython_differences`), and is ready for testing, for
+use as a base interpreter for a new virtualenv, or for packaging into a binary
+suitable for installation on another machine running the same OS as the build
+machine.
+
+Note that step 4 is merely done as a convenience, any of the steps may be rerun
+without rerunning the previous steps.
+
+.. _`translation chain`: https://rpython.readthedocs.io/en/latest/translation.html
+
+
+Making a debug build of PyPy
+----------------------------
+
+If the Makefile is rerun with the lldebug or lldebug0 target, appropriate
+compilation flags are added to add debug info and reduce compiler optimizations
+to ``-O0`` respectively. If you stop in a debugger, you will see the
+very wordy machine-generated C code from the rpython translation step, which
+takes a little bit of reading to relate back to the rpython code.
Build cffi import libraries for the stdlib
------------------------------------------
@@ -181,14 +220,6 @@
.. _`out-of-line API mode`: http://cffi.readthedocs.org/en/latest/overview.html#real-example-api-level-out-of-line
-Translating with non-standard options
--------------------------------------
-
-It is possible to have non-standard features enabled for translation,
-but they are not really tested any more. Look, for example, at the
-:doc:`objspace proxies <objspace-proxies>` document.
-
-
Packaging (preparing for installation)
--------------------------------------
@@ -217,14 +248,16 @@
* PyPy 2.5.1 or earlier: normal users would see permission errors.
Installers need to run ``pypy -c "import gdbm"`` and other similar
- commands at install time; the exact list is in `package.py`_. Users
+ commands at install time; the exact list is in
+ :source:`pypy/tool/release/package.py <package.py>`. Users
seeing a broken installation of PyPy can fix it after-the-fact if they
have sudo rights, by running once e.g. ``sudo pypy -c "import gdbm``.
* PyPy 2.6 and later: anyone would get ``ImportError: no module named
_gdbm_cffi``. Installers need to run ``pypy _gdbm_build.py`` in the
``lib_pypy`` directory during the installation process (plus others;
- see the exact list in `package.py`_). Users seeing a broken
+ see the exact list in :source:`pypy/tool/release/package.py <package.py>`).
+ Users seeing a broken
installation of PyPy can fix it after-the-fact, by running ``pypy
/path/to/lib_pypy/_gdbm_build.py``. This command produces a file
called ``_gdbm_cffi.pypy-41.so`` locally, which is a C extension
diff --git a/pypy/doc/config/objspace.std.withcelldict.txt b/pypy/doc/config/objspace.std.withcelldict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withcelldict.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Enable cell-dicts. This optimization is not helpful without the JIT. In the
-presence of the JIT, it greatly helps looking up globals.
diff --git a/pypy/doc/configuration.rst b/pypy/doc/configuration.rst
--- a/pypy/doc/configuration.rst
+++ b/pypy/doc/configuration.rst
@@ -188,4 +188,6 @@
can be found on the ``config`` attribute of all ``TranslationContext``
instances and are described in :source:`rpython/config/translationoption.py`. The interpreter options
are attached to the object space, also under the name ``config`` and are
-described in :source:`pypy/config/pypyoption.py`.
+described in :source:`pypy/config/pypyoption.py`. Both set of options are
+documented in the :doc:`config/index` section.
+
diff --git a/pypy/doc/cppyy_example.rst b/pypy/doc/cppyy_example.rst
deleted file mode 100644
--- a/pypy/doc/cppyy_example.rst
+++ /dev/null
@@ -1,59 +0,0 @@
-File example.h
-==============
-
-::
-
- #include <iostream>
- #include <vector>
-
- class AbstractClass {
- public:
- virtual ~AbstractClass() {}
- virtual void abstract_method() = 0;
- };
-
- class ConcreteClass : AbstractClass {
- public:
- ConcreteClass(int n=42) : m_int(n) {}
- ~ConcreteClass() {}
-
- virtual void abstract_method() {
- std::cout << "called concrete method" << std::endl;
- }
-
- void array_method(int* ad, int size) {
- for (int i=0; i < size; ++i)
- std::cout << ad[i] << ' ';
- std::cout << std::endl;
- }
-
- void array_method(double* ad, int size) {
- for (int i=0; i < size; ++i)
- std::cout << ad[i] << ' ';
- std::cout << std::endl;
- }
-
- AbstractClass* show_autocast() {
- return this;
- }
-
- operator const char*() {
- return "Hello operator const char*!";
- }
-
- public:
- int m_int;
- };
-
- namespace Namespace {
-
- class ConcreteClass {
- public:
- class NestedClass {
- public:
- std::vector<int> m_v;
- };
-
- };
-
- } // namespace Namespace
diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst
--- a/pypy/doc/extending.rst
+++ b/pypy/doc/extending.rst
@@ -12,7 +12,7 @@
* Write them in pure Python and use ctypes_.
-* Write them in C++ and bind them through :doc:`cppyy <cppyy>` using Cling.
+* Write them in C++ and bind them through cppyy_ using Cling.
* Write them as `RPython mixed modules`_.
@@ -64,9 +64,9 @@
cppyy
-----
-For C++, `cppyy`_ is an automated bindings generator available for both
+For C++, _cppyy_ is an automated bindings generator available for both
PyPy and CPython.
-``cppyy`` relies on declarations from C++ header files to dynamically
+_cppyy_ relies on declarations from C++ header files to dynamically
construct Python equivalent classes, functions, variables, etc.
It is designed for use by large scale programs and supports modern C++.
With PyPy, it leverages the built-in ``_cppyy`` module, allowing the JIT to
@@ -75,8 +75,7 @@
To install, run ``pip install cppyy``.
Further details are available in the `full documentation`_.
-.. _cppyy: http://cppyy.readthedocs.org/
-.. _`full documentation`: http://cppyy.readthedocs.org/
+.. _`full documentation`: https://cppyy.readthedocs.org/
RPython Mixed Modules
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -55,3 +55,8 @@
Fix the bounds in the GC when allocating a lot of objects with finalizers,
fixes issue #2590
+
+.. branch: arrays-force-less
+
+Small improvement to optimize list accesses with constant indexes better by
+throwing away information about them less eagerly.
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -2,6 +2,7 @@
Arguments objects.
"""
from rpython.rlib.debug import make_sure_not_resized
+from rpython.rlib.objectmodel import not_rpython
from rpython.rlib import jit
from pypy.interpreter.error import OperationError, oefmt
@@ -46,8 +47,8 @@
# behaviour but produces better error messages
self.methodcall = methodcall
+ @not_rpython
def __repr__(self):
- """ NOT_RPYTHON """
name = self.__class__.__name__
if not self.keywords:
return '%s(%s)' % (name, self.arguments_w,)
diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
--- a/pypy/interpreter/error.py
+++ b/pypy/interpreter/error.py
@@ -7,6 +7,7 @@
from rpython.rlib import jit
from rpython.rlib.objectmodel import we_are_translated, specialize
+from rpython.rlib.objectmodel import not_rpython
from rpython.rlib import rstack, rstackovf
from pypy.interpreter import debug
@@ -57,8 +58,9 @@
self.match(space, space.w_KeyboardInterrupt))
# note: an extra case is added in OpErrFmtNoArgs
+ @not_rpython
def __str__(self):
- "NOT_RPYTHON: Convenience for tracebacks."
+ "Convenience for tracebacks."
s = self._w_value
space = getattr(self.w_type, 'space', None)
if space is not None:
@@ -107,15 +109,16 @@
if RECORD_INTERPLEVEL_TRACEBACK:
self.debug_excs.append(sys.exc_info())
+ @not_rpython
def print_application_traceback(self, space, file=None):
- "NOT_RPYTHON: Dump a standard application-level traceback."
+ "Dump a standard application-level traceback."
if file is None:
file = sys.stderr
self.print_app_tb_only(file)
print >> file, self.errorstr(space)
+ @not_rpython
def print_app_tb_only(self, file):
- "NOT_RPYTHON"
tb = self._application_traceback
if tb:
import linecache
@@ -142,8 +145,9 @@
print >> file, l
tb = tb.next
+ @not_rpython
def print_detailed_traceback(self, space=None, file=None):
- """NOT_RPYTHON: Dump a nice detailed interpreter- and
+ """Dump a nice detailed interpreter- and
application-level traceback, useful to debug the interpreter."""
if file is None:
file = sys.stderr
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -1,7 +1,7 @@
import sys
from pypy.interpreter.error import OperationError, get_cleared_operation_error
from rpython.rlib.unroll import unrolling_iterable
-from rpython.rlib.objectmodel import specialize
+from rpython.rlib.objectmodel import specialize, not_rpython
from rpython.rlib import jit, rgc, objectmodel
TICK_COUNTER_STEP = 100
@@ -423,8 +423,9 @@
# to run at the next possible bytecode
self.reset_ticker(-1)
+ @not_rpython
def register_periodic_action(self, action, use_bytecode_counter):
- """NOT_RPYTHON:
+ """
Register the PeriodicAsyncAction action to be called whenever the
tick counter becomes smaller than 0. If 'use_bytecode_counter' is
True, make sure that we decrease the tick counter at every bytecode.
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -23,7 +23,7 @@
DescrMismatch)
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.function import ClassMethod, FunctionWithFixedCode
-from rpython.rlib.objectmodel import we_are_translated
+from rpython.rlib.objectmodel import we_are_translated, not_rpython
from rpython.rlib.rarithmetic import r_longlong, r_int, r_ulonglong, r_uint
from rpython.tool.sourcetools import func_with_new_name, compile2
@@ -64,8 +64,8 @@
def _freeze_(self):
return True
+ @not_rpython
def unwrap(self, space, w_value):
- """NOT_RPYTHON"""
raise NotImplementedError
@@ -380,8 +380,8 @@
class BuiltinActivation(object):
_immutable_ = True
+ @not_rpython
def __init__(self, behavior):
- """NOT_RPYTHON"""
self.behavior = behavior
def _run(self, space, scope_w):
@@ -621,9 +621,9 @@
# When a BuiltinCode is stored in a Function object,
# you get the functionality of CPython's built-in function type.
+ @not_rpython
def __init__(self, func, unwrap_spec=None, self_type=None,
descrmismatch=None, doc=None):
- "NOT_RPYTHON"
# 'implfunc' is the interpreter-level function.
# Note that this uses a lot of (construction-time) introspection.
Code.__init__(self, func.__name__)
@@ -969,10 +969,10 @@
instancecache = {}
+ @not_rpython
def __new__(cls, f, app_name=None, unwrap_spec=None, descrmismatch=None,
as_classmethod=False, doc=None):
- "NOT_RPYTHON"
# f must be a function whose name does NOT start with 'app_'
self_type = None
if hasattr(f, 'im_func'):
@@ -1013,8 +1013,8 @@
self._staticdefs = zip(argnames[-len(defaults):], defaults)
return self
+ @not_rpython
def _getdefaults(self, space):
- "NOT_RPYTHON"
defs_w = []
for name, defaultval in self._staticdefs:
if name.startswith('w_'):
@@ -1070,8 +1070,8 @@
class GatewayCache(SpaceCache):
+ @not_rpython
def build(cache, gateway):
- "NOT_RPYTHON"
space = cache.space
defs = gateway._getdefaults(space) # needs to be implemented by subclass
code = gateway._code
@@ -1141,8 +1141,8 @@
w_globals = self.getwdict(space)
return space.getitem(w_globals, space.newtext(name))
+ @not_rpython
def interphook(self, name):
- "NOT_RPYTHON"
def appcaller(space, *args_w):
if not isinstance(space, ObjSpace):
raise TypeError("first argument must be a space instance.")
@@ -1179,15 +1179,16 @@
"""NOT_RPYTHON
The cache mapping each applevel instance to its lazily built w_dict"""
+ @not_rpython
def build(self, app):
- "NOT_RPYTHON. Called indirectly by Applevel.getwdict()."
+ "Called indirectly by Applevel.getwdict()."
return build_applevel_dict(app, self.space)
# __________ pure applevel version __________
+ at not_rpython
def build_applevel_dict(self, space):
- "NOT_RPYTHON"
w_glob = space.newdict(module=True)
space.setitem(w_glob, space.newtext('__name__'), space.newtext(self.modname))
space.exec_(self.source, w_glob, w_glob,
@@ -1198,8 +1199,9 @@
# ____________________________________________________________
+ at not_rpython
def appdef(source, applevel=ApplevelClass, filename=None):
- """ NOT_RPYTHON: build an app-level helper function, like for example:
+ """ build an app-level helper function, like for example:
myfunc = appdef('''myfunc(x, y):
return x+y
''')
@@ -1245,6 +1247,6 @@
# app2interp_temp is used for testing mainly
+ at not_rpython
def app2interp_temp(func, applevel_temp=applevel_temp, filename=None):
- """ NOT_RPYTHON """
return appdef(func, applevel_temp, filename=filename)
diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py
--- a/pypy/interpreter/miscutils.py
+++ b/pypy/interpreter/miscutils.py
@@ -3,6 +3,7 @@
"""
from rpython.rlib.listsort import make_timsort_class
+from rpython.rlib.objectmodel import not_rpython
class ThreadLocals:
@@ -41,9 +42,8 @@
# but in some corner cases it is not... unsure why
self._value = None
-
+ at not_rpython
def make_weak_value_dictionary(space, keytype, valuetype):
- "NOT_RPYTHON"
if space.config.translation.rweakref:
from rpython.rlib.rweakref import RWeakValueDictionary
return RWeakValueDictionary(keytype, valuetype)
diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py
--- a/pypy/interpreter/mixedmodule.py
+++ b/pypy/interpreter/mixedmodule.py
@@ -3,6 +3,9 @@
from pypy.interpreter import gateway
from pypy.interpreter.error import OperationError
from pypy.interpreter.baseobjspace import W_Root
+
+from rpython.rlib.objectmodel import not_rpython
+
import sys
class MixedModule(Module):
@@ -15,16 +18,17 @@
lazy = False
submodule_name = None
+ @not_rpython
def __init__(self, space, w_name):
- """ NOT_RPYTHON """
Module.__init__(self, space, w_name)
self.lazy = True
self.__class__.buildloaders()
self.loaders = self.loaders.copy() # copy from the class to the inst
self.submodules_w = []
+ @not_rpython
def install(self):
- """NOT_RPYTHON: install this module, and it's submodules into
+ """install this module, and it's submodules into
space.builtin_modules"""
Module.install(self)
if hasattr(self, "submodules"):
@@ -61,8 +65,8 @@
self.w_initialdict = self.space.call_method(self.w_dict, 'items')
@classmethod
+ @not_rpython
def get_applevel_name(cls):
- """ NOT_RPYTHON """
if cls.applevel_name is not None:
return cls.applevel_name
else:
@@ -130,8 +134,8 @@
self._frozen = True
@classmethod
+ @not_rpython
def buildloaders(cls):
- """ NOT_RPYTHON """
if not hasattr(cls, 'loaders'):
# build a constant dictionary out of
# applevel/interplevel definitions
@@ -161,8 +165,8 @@
return space.newtext_or_none(cls.__doc__)
+ at not_rpython
def getinterpevalloader(pkgroot, spec):
- """ NOT_RPYTHON """
def ifileloader(space):
d = {'space':space}
# EVIL HACK (but it works, and this is not RPython :-)
@@ -202,8 +206,8 @@
return ifileloader
applevelcache = {}
+ at not_rpython
def getappfileloader(pkgroot, appname, spec):
- """ NOT_RPYTHON """
# hum, it's a bit more involved, because we usually
# want the import at applevel
modname, attrname = spec.split('.')
diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py
--- a/pypy/interpreter/module.py
+++ b/pypy/interpreter/module.py
@@ -4,7 +4,7 @@
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import OperationError
-from rpython.rlib.objectmodel import we_are_translated
+from rpython.rlib.objectmodel import we_are_translated, not_rpython
class Module(W_Root):
@@ -40,13 +40,15 @@
except OperationError:
pass
+ @not_rpython
def install(self):
- """NOT_RPYTHON: installs this module into space.builtin_modules"""
+ """installs this module into space.builtin_modules"""
modulename = self.space.text0_w(self.w_name)
self.space.builtin_modules[modulename] = self
+ @not_rpython
def setup_after_space_initialization(self):
- """NOT_RPYTHON: to allow built-in modules to do some more setup
+ """to allow built-in modules to do some more setup
after the space is fully initialized."""
def init(self, space):
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -7,6 +7,7 @@
from rpython.rlib.debug import ll_assert_not_none
from rpython.rlib.jit import hint
from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated
+from rpython.rlib.objectmodel import not_rpython
from rpython.rlib.rarithmetic import intmask, r_uint
from rpython.tool.pairtype import extendabletype
@@ -144,8 +145,9 @@
return None
return d.w_locals
+ @not_rpython
def __repr__(self):
- # NOT_RPYTHON: useful in tracebacks
+ # useful in tracebacks
return "<%s.%s executing %s at line %s" % (
self.__class__.__module__, self.__class__.__name__,
self.pycode, self.get_last_lineno())
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -7,7 +7,7 @@
from rpython.rlib import jit, rstackovf
from rpython.rlib.debug import check_nonneg
from rpython.rlib.objectmodel import (we_are_translated, always_inline,
- dont_inline)
+ dont_inline, not_rpython)
from rpython.rlib.rarithmetic import r_uint, intmask
from rpython.tool.sourcetools import func_with_new_name
@@ -20,8 +20,8 @@
from pypy.interpreter.pycode import PyCode, BytecodeCorruption
from pypy.tool.stdlib_opcode import bytecode_spec
+ at not_rpython
def unaryoperation(operationname):
- """NOT_RPYTHON"""
def opimpl(self, *ignored):
operation = getattr(self.space, operationname)
w_1 = self.popvalue()
@@ -31,8 +31,8 @@
return func_with_new_name(opimpl, "opcode_impl_for_%s" % operationname)
+ at not_rpython
def binaryoperation(operationname):
- """NOT_RPYTHON"""
def opimpl(self, *ignored):
operation = getattr(self.space, operationname)
w_2 = self.popvalue()
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -8,14 +8,15 @@
from rpython.rlib.jit import promote
from rpython.rlib.objectmodel import compute_identity_hash, specialize
-from rpython.rlib.objectmodel import instantiate
+from rpython.rlib.objectmodel import instantiate, not_rpython
from rpython.tool.sourcetools import compile2, func_with_new_name
class TypeDef(object):
+ @not_rpython
def __init__(self, __name, __base=None, __total_ordering__=None,
__buffer=None, **rawdict):
- "NOT_RPYTHON: initialization-time only"
+ "initialization-time only"
self.name = __name
if __base is None:
bases = []
@@ -113,8 +114,9 @@
# register_finalizer() or not.
@specialize.memo()
+ at not_rpython
def get_unique_interplevel_subclass(space, cls):
- "NOT_RPYTHON: initialization-time only"
+ "initialization-time only"
assert cls.typedef.acceptable_as_base_class
try:
return _unique_subclass_cache[cls]
@@ -349,15 +351,17 @@
return self
+ at not_rpython
def interp_attrproperty(name, cls, doc=None, wrapfn=None):
- "NOT_RPYTHON: initialization-time only"
+ "initialization-time only"
assert wrapfn is not None
def fget(space, obj):
return getattr(space, wrapfn)(getattr(obj, name))
return GetSetProperty(fget, cls=cls, doc=doc)
+ at not_rpython
def interp_attrproperty_w(name, cls, doc=None):
- "NOT_RPYTHON: initialization-time only"
+ "initialization-time only"
def fget(space, obj):
w_value = getattr(obj, name)
if w_value is None:
diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py
--- a/pypy/module/__builtin__/test/test_classobj.py
+++ b/pypy/module/__builtin__/test/test_classobj.py
@@ -1090,18 +1090,18 @@
def setup_class(cls):
if cls.runappdirect:
py.test.skip("can only be run on py.py")
- def is_strdict(space, w_class):
- from pypy.objspace.std.dictmultiobject import BytesDictStrategy
+ def is_moduledict(space, w_class):
+ from pypy.objspace.std.celldict import ModuleDictStrategy
w_d = w_class.getdict(space)
- return space.wrap(isinstance(w_d.get_strategy(), BytesDictStrategy))
+ return space.wrap(isinstance(w_d.get_strategy(), ModuleDictStrategy))
- cls.w_is_strdict = cls.space.wrap(gateway.interp2app(is_strdict))
+ cls.w_is_moduledict = cls.space.wrap(gateway.interp2app(is_moduledict))
- def test_strdict(self):
+ def test_moduledict(self):
class A:
a = 1
b = 2
- assert self.is_strdict(A)
+ assert self.is_moduledict(A)
def test_attr_slots(self):
class C:
diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -433,17 +433,22 @@
def _sizeof(self):
return self.ctype.size
- def with_gc(self, w_destructor):
+ def with_gc(self, w_destructor, size=0):
space = self.space
if space.is_none(w_destructor):
if isinstance(self, W_CDataGCP):
self.detach_destructor()
- return space.w_None
- raise oefmt(space.w_TypeError,
- "Can remove destructor only on a object "
- "previously returned by ffi.gc()")
- with self as ptr:
- return W_CDataGCP(space, ptr, self.ctype, self, w_destructor)
+ w_res = space.w_None
+ else:
+ raise oefmt(space.w_TypeError,
+ "Can remove destructor only on a object "
+ "previously returned by ffi.gc()")
+ else:
+ with self as ptr:
+ w_res = W_CDataGCP(space, ptr, self.ctype, self, w_destructor)
+ if size != 0:
+ rgc.add_memory_pressure(size)
+ return w_res
def unpack(self, length):
from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray
diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py
--- a/pypy/module/_cffi_backend/ffi_obj.py
+++ b/pypy/module/_cffi_backend/ffi_obj.py
@@ -351,14 +351,14 @@
return handle.from_handle(self.space, w_arg)
- @unwrap_spec(w_cdata=W_CData)
- def descr_gc(self, w_cdata, w_destructor):
+ @unwrap_spec(w_cdata=W_CData, size=int)
+ def descr_gc(self, w_cdata, w_destructor, size=0):
"""\
Return a new cdata object that points to the same data.
Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called."""
#
- return w_cdata.with_gc(w_destructor)
+ return w_cdata.with_gc(w_destructor, size)
@unwrap_spec(replace_with='text')
diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py
--- a/pypy/module/_cffi_backend/func.py
+++ b/pypy/module/_cffi_backend/func.py
@@ -257,6 +257,6 @@
# ____________________________________________________________
- at unwrap_spec(w_cdata=cdataobj.W_CData)
-def gcp(space, w_cdata, w_destructor):
- return w_cdata.with_gc(w_destructor)
+ at unwrap_spec(w_cdata=cdataobj.W_CData, size=int)
+def gcp(space, w_cdata, w_destructor, size=0):
+ return w_cdata.with_gc(w_destructor, size)
diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py
--- a/pypy/module/_cffi_backend/test/test_ffi_obj.py
+++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py
@@ -377,7 +377,7 @@
raises(TypeError, ffi.gc, p, None)
seen = []
q1 = ffi.gc(p, lambda p: seen.append(1))
- q2 = ffi.gc(q1, lambda p: seen.append(2))
+ q2 = ffi.gc(q1, lambda p: seen.append(2), size=123)
import gc; gc.collect()
assert seen == []
assert ffi.gc(q1, None) is None
diff --git a/pypy/module/_codecs/__init__.py b/pypy/module/_codecs/__init__.py
--- a/pypy/module/_codecs/__init__.py
+++ b/pypy/module/_codecs/__init__.py
@@ -1,5 +1,6 @@
from pypy.interpreter.mixedmodule import MixedModule
from rpython.rlib import runicode
+from rpython.rlib.objectmodel import not_rpython
from pypy.module._codecs import interp_codecs
class Module(MixedModule):
@@ -86,9 +87,8 @@
'unicode_internal_encode' : 'interp_codecs.unicode_internal_encode',
}
+ @not_rpython
def __init__(self, space, *args):
- "NOT_RPYTHON"
-
# mbcs codec is Windows specific, and based on rffi.
if (hasattr(runicode, 'str_decode_mbcs')):
self.interpleveldefs['mbcs_encode'] = 'interp_codecs.mbcs_encode'
diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py
--- a/pypy/module/_codecs/interp_codecs.py
+++ b/pypy/module/_codecs/interp_codecs.py
@@ -1,5 +1,5 @@
from rpython.rlib import jit
-from rpython.rlib.objectmodel import we_are_translated
+from rpython.rlib.objectmodel import we_are_translated, not_rpython
from rpython.rlib.rstring import UnicodeBuilder
from rpython.rlib.runicode import code_to_unichr, MAXUNICODE
@@ -268,8 +268,8 @@
raise oefmt(space.w_TypeError,
"don't know how to handle %T in error callback", w_exc)
+ at not_rpython
def register_builtin_error_handlers(space):
- "NOT_RPYTHON"
state = space.fromcache(CodecState)
for error in ("strict", "ignore", "replace", "xmlcharrefreplace",
"backslashreplace"):
diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py
--- a/pypy/module/array/interp_array.py
+++ b/pypy/module/array/interp_array.py
@@ -118,6 +118,29 @@
return space.w_True
return space.w_False
+index_count_jd = jit.JitDriver(
+ greens = ['count', 'arrclass', 'tp_item'],
+ reds = 'auto', name = 'array.index_or_count')
+
+def index_count_array(arr, w_val, count=False):
+ space = arr.space
+ tp_item = space.type(w_val)
+ arrclass = arr.__class__
+ cnt = 0
+ for i in range(arr.len):
+ index_count_jd.jit_merge_point(
+ tp_item=tp_item, count=count,
+ arrclass=arrclass)
+ w_item = arr.w_getitem(space, i)
+ if space.eq_w(w_item, w_val):
+ if count:
+ cnt += 1
+ else:
+ return i
+ if count:
+ return cnt
+ return -1
+
UNICODE_ARRAY = lltype.Ptr(lltype.Array(lltype.UniChar,
hints={'nolength': True}))
@@ -257,17 +280,12 @@
"""
self.extend(w_x)
- def descr_count(self, space, w_val):
+ def descr_count(self, space, w_x):
""" count(x)
Return number of occurrences of x in the array.
"""
- cnt = 0
- for i in range(self.len):
- # XXX jitdriver
- w_item = self.w_getitem(space, i)
- if space.eq_w(w_item, w_val):
- cnt += 1
+ cnt = index_count_array(self, w_x, count=True)
return space.newint(cnt)
def descr_index(self, space, w_x):
@@ -275,10 +293,9 @@
Return index of first occurrence of x in the array.
"""
- for i in range(self.len):
- w_item = self.w_getitem(space, i)
- if space.eq_w(w_item, w_x):
- return space.newint(i)
+ res = index_count_array(self, w_x, count=False)
+ if res >= 0:
+ return space.newint(res)
raise oefmt(space.w_ValueError, "array.index(x): x not in list")
def descr_reverse(self, space):
@@ -752,7 +769,9 @@
class TypeCode(object):
def __init__(self, itemtype, unwrap, canoverflow=False, signed=False,
- method='__int__'):
+ method='__int__', errorname=None):
+ if errorname is None:
+ errorname = unwrap[:-2]
self.itemtype = itemtype
self.bytes = rffi.sizeof(itemtype)
self.arraytype = lltype.Array(itemtype, hints={'nolength': True})
@@ -762,6 +781,7 @@
self.canoverflow = canoverflow
self.w_class = None
self.method = method
+ self.errorname = errorname
def _freeze_(self):
# hint for the annotator: track individual constant instances
@@ -785,8 +805,8 @@
'i': TypeCode(rffi.INT, 'int_w', True, True),
'I': _UINTTypeCode,
'l': TypeCode(rffi.LONG, 'int_w', True, True),
- 'L': TypeCode(rffi.ULONG, 'bigint_w'), # Overflow handled by
- # rbigint.touint() which
+ 'L': TypeCode(rffi.ULONG, 'bigint_w', # Overflow handled by
+ errorname="integer"), # rbigint.touint() which
# corresponds to the
# C-type unsigned long
'f': TypeCode(lltype.SingleFloat, 'float_w', method='__float__'),
@@ -864,7 +884,7 @@
item = unwrap(space.call_method(w_item, mytype.method))
except OperationError:
raise oefmt(space.w_TypeError,
- "array item must be " + mytype.unwrap[:-2])
+ "array item must be " + mytype.errorname)
else:
raise
if mytype.unwrap == 'bigint_w':
diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py
--- a/pypy/module/array/test/test_array.py
+++ b/pypy/module/array/test/test_array.py
@@ -162,6 +162,11 @@
raises(OverflowError, a.append, -1)
raises(OverflowError, a.append, 2 ** (8 * b))
+ def test_errormessage(self):
+ a = self.array("L", [1, 2, 3])
+ excinfo = raises(TypeError, "a[0] = 'abc'")
+ assert str(excinfo.value) == "array item must be integer"
+
def test_fromstring(self):
import sys
diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py
--- a/pypy/module/posix/interp_posix.py
+++ b/pypy/module/posix/interp_posix.py
@@ -3,7 +3,7 @@
from rpython.rlib import rposix, rposix_stat
from rpython.rlib import objectmodel, rurandom
-from rpython.rlib.objectmodel import specialize
+from rpython.rlib.objectmodel import specialize, not_rpython
from rpython.rlib.rarithmetic import r_longlong, intmask, r_uint
from rpython.rlib.unroll import unrolling_iterable
@@ -731,8 +731,8 @@
else:
assert False, "Unknown fork hook"
+ at not_rpython
def add_fork_hook(where, hook):
- "NOT_RPYTHON"
get_fork_hooks(where).append(hook)
add_fork_hook('child', ExecutionContext._mark_thread_disappeared)
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py
@@ -2455,3 +2455,61 @@
assert (pt.x, pt.y) == (-9*500*999, 9*500*999)
pt = lib.call2(lib.cb2)
assert (pt.x, pt.y) == (99*500*999, -99*500*999)
+
+def test_ffi_gc_size_arg():
+ # with PyPy's GC, these calls to ffi.gc() would rapidly consume
+ # 40 GB of RAM without the third argument
+ ffi = FFI()
+ ffi.cdef("void *malloc(size_t); void free(void *);")
+ lib = ffi.verify(r"""
+ #include <stdlib.h>
+ """)
+ for i in range(2000):
+ p = lib.malloc(20*1024*1024) # 20 MB
+ p1 = ffi.cast("char *", p)
+ for j in xrange(0, 20*1024*1024, 4096):
+ p1[j] = '!'
+ p = ffi.gc(p, lib.free, 20*1024*1024)
+ del p
+
+def test_ffi_gc_size_arg_2():
+ # a variant of the above: this "attack" works on cpython's cyclic gc too
+ # and I found no obvious way to prevent that. So for now, this test
+ # is skipped on CPython, where it eats all the memory.
+ if '__pypy__' not in sys.builtin_module_names:
+ py.test.skip("find a way to tweak the cyclic GC of CPython")
+ ffi = FFI()
+ ffi.cdef("void *malloc(size_t); void free(void *);")
+ lib = ffi.verify(r"""
+ #include <stdlib.h>
+ """)
+ class X(object):
+ pass
+ for i in range(2000):
+ p = lib.malloc(50*1024*1024) # 50 MB
+ p1 = ffi.cast("char *", p)
+ for j in xrange(0, 50*1024*1024, 4096):
+ p1[j] = '!'
+ p = ffi.gc(p, lib.free, 50*1024*1024)
+ x = X()
+ x.p = p
+ x.cyclic = x
+ del p, x
+
+def test_ffi_new_with_cycles():
+ # still another variant, with ffi.new()
+ if '__pypy__' not in sys.builtin_module_names:
+ py.test.skip("find a way to tweak the cyclic GC of CPython")
+ ffi = FFI()
+ ffi.cdef("")
+ lib = ffi.verify("")
+ class X(object):
+ pass
+ for i in range(2000):
+ p = ffi.new("char[]", 50*1024*1024) # 50 MB
+ for j in xrange(0, 50*1024*1024, 4096):
+ p[j] = '!'
+ x = X()
+ x.p = p
+ x.cyclic = x
+ del p, x
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py
@@ -2291,3 +2291,61 @@
expected = "unsigned int"
assert ffi.typeof("UINT_PTR") is ffi.typeof(expected)
assert ffi.typeof("PTSTR") is ffi.typeof("wchar_t *")
+
+def test_gc_pypy_size_arg():
+ ffi = FFI()
+ ffi.cdef("void *malloc(size_t); void free(void *);")
+ lib = ffi.verify(r"""
+ #include <stdlib.h>
+ """)
+ for i in range(2000):
+ p = lib.malloc(20*1024*1024) # 20 MB
+ p1 = ffi.cast("char *", p)
+ for j in xrange(0, 20*1024*1024, 4096):
+ p1[j] = '!'
+ p = ffi.gc(p, lib.free, 20*1024*1024)
+ del p
+ # with PyPy's GC, the above would rapidly consume 40 GB of RAM
+ # without the third argument to ffi.gc()
+
+def test_ffi_gc_size_arg_2():
+ # a variant of the above: this "attack" works on cpython's cyclic gc too
+ # and I found no obvious way to prevent that. So for now, this test
+ # is skipped on CPython, where it eats all the memory.
+ if '__pypy__' not in sys.builtin_module_names:
+ py.test.skip("find a way to tweak the cyclic GC of CPython")
+ ffi = FFI()
+ ffi.cdef("void *malloc(size_t); void free(void *);")
+ lib = ffi.verify(r"""
+ #include <stdlib.h>
+ """)
+ class X(object):
+ pass
+ for i in range(2000):
+ p = lib.malloc(50*1024*1024) # 50 MB
+ p1 = ffi.cast("char *", p)
+ for j in xrange(0, 50*1024*1024, 4096):
+ p1[j] = '!'
+ p = ffi.gc(p, lib.free, 50*1024*1024)
+ x = X()
+ x.p = p
+ x.cyclic = x
+ del p, x
+
+def test_ffi_new_with_cycles():
+ # still another variant, with ffi.new()
+ if '__pypy__' not in sys.builtin_module_names:
+ py.test.skip("find a way to tweak the cyclic GC of CPython")
+ ffi = FFI()
+ ffi.cdef("")
+ lib = ffi.verify("")
+ class X(object):
+ pass
+ for i in range(2000):
+ p = ffi.new("char[]", 50*1024*1024) # 50 MB
+ for j in xrange(0, 50*1024*1024, 4096):
+ p[j] = '!'
+ x = X()
+ x.p = p
+ x.cyclic = x
+ del p, x
diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -56,7 +56,7 @@
def allocate_and_init_instance(space, w_type=None, module=False,
instance=False, strdict=False,
kwargs=False):
- if space.config.objspace.std.withcelldict and module:
+ if module:
from pypy.objspace.std.celldict import ModuleDictStrategy
assert w_type is None
# every module needs its own strategy, because the strategy stores
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -181,8 +181,8 @@
return self._wrap_not_rpython(x)
+ @not_rpython
def _wrap_not_rpython(self, x):
- "NOT_RPYTHON"
# _____ this code is here to support testing only _____
# wrap() of a container works on CPython, but the code is
diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py
--- a/pypy/objspace/std/test/test_celldict.py
+++ b/pypy/objspace/std/test/test_celldict.py
@@ -58,7 +58,6 @@
assert v2 is v3
class AppTestModuleDict(object):
- spaceconfig = {"objspace.std.withcelldict": True}
def setup_class(cls):
cls.w_runappdirect = cls.space.wrap(cls.runappdirect)
@@ -116,7 +115,6 @@
class AppTestCellDict(object):
- spaceconfig = {"objspace.std.withcelldict": True}
def setup_class(cls):
if cls.runappdirect:
diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py
--- a/pypy/objspace/std/test/test_dictmultiobject.py
+++ b/pypy/objspace/std/test/test_dictmultiobject.py
@@ -1261,7 +1261,6 @@
class Config:
class objspace:
class std:
- withcelldict = False
methodcachesizeexp = 11
withmethodcachecounter = False
@@ -1467,6 +1466,7 @@
def test_module_uses_strdict():
+ from pypy.objspace.std.celldict import ModuleDictStrategy
fakespace = FakeSpace()
d = fakespace.newdict(module=True)
- assert type(d.get_strategy()) is BytesDictStrategy
+ assert type(d.get_strategy()) is ModuleDictStrategy
diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py
--- a/pypy/objspace/std/test/test_mapdict.py
+++ b/pypy/objspace/std/test/test_mapdict.py
@@ -4,7 +4,6 @@
class Config:
class objspace:
class std:
- withcelldict = False
methodcachesizeexp = 11
withmethodcachecounter = False
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -11,7 +11,7 @@
from rpython.rlib.jit import (promote, elidable_promote, we_are_jitted,
elidable, dont_look_inside, unroll_safe)
from rpython.rlib.objectmodel import current_object_addr_as_int, compute_hash
-from rpython.rlib.objectmodel import we_are_translated
+from rpython.rlib.objectmodel import we_are_translated, not_rpython
from rpython.rlib.rarithmetic import intmask, r_uint
class MutableCell(W_Root):
@@ -212,8 +212,8 @@
else:
self.terminator = NoDictTerminator(space, self)
+ @not_rpython
def __repr__(self):
- "NOT_RPYTHON"
return '<W_TypeObject %r at 0x%x>' % (self.name, id(self))
def mutated(self, key):
@@ -492,8 +492,9 @@
self, w_subtype, w_subtype)
return w_subtype
+ @not_rpython
def _cleanup_(self):
- "NOT_RPYTHON. Forces the lazy attributes to be computed."
+ "Forces the lazy attributes to be computed."
if 'lazyloaders' in self.__dict__:
for attr in self.lazyloaders.keys():
self.getdictvalue(self.space, attr)
@@ -1317,8 +1318,9 @@
class TypeCache(SpaceCache):
+ @not_rpython
def build(self, typedef):
- "NOT_RPYTHON: initialization-time only."
+ "initialization-time only."
from pypy.objspace.std.objectobject import W_ObjectObject
from pypy.interpreter.typedef import GetSetProperty
from rpython.rlib.objectmodel import instantiate
diff --git a/rpython/jit/metainterp/optimizeopt/bridgeopt.py b/rpython/jit/metainterp/optimizeopt/bridgeopt.py
--- a/rpython/jit/metainterp/optimizeopt/bridgeopt.py
+++ b/rpython/jit/metainterp/optimizeopt/bridgeopt.py
@@ -18,6 +18,10 @@
# (<box1> <descr> <box2>) length times, if getfield(box1, descr) == box2
# both boxes should be in the liveboxes
#
+# <length>
+# (<box1> <index> <descr> <box2>) length times, if getarrayitem_gc(box1, index, descr) == box2
+# both boxes should be in the liveboxes
+#
# ----
@@ -82,18 +86,26 @@
# structs
# XXX could be extended to arrays
if optimizer.optheap:
- triples = optimizer.optheap.serialize_optheap(available_boxes)
+ triples_struct, triples_array = optimizer.optheap.serialize_optheap(available_boxes)
# can only encode descrs that have a known index into
# metainterp_sd.all_descrs
- triples = [triple for triple in triples if triple[1].descr_index != -1]
- numb_state.append_int(len(triples))
- for box1, descr, box2 in triples:
- index = descr.descr_index
+ triples_struct = [triple for triple in triples_struct if triple[1].descr_index != -1]
+ numb_state.append_int(len(triples_struct))
+ for box1, descr, box2 in triples_struct:
+ descr_index = descr.descr_index
+ numb_state.append_short(tag_box(box1, liveboxes_from_env, memo))
+ numb_state.append_int(descr_index)
+ numb_state.append_short(tag_box(box2, liveboxes_from_env, memo))
+ numb_state.append_int(len(triples_array))
+ for box1, index, descr, box2 in triples_array:
+ descr_index = descr.descr_index
numb_state.append_short(tag_box(box1, liveboxes_from_env, memo))
numb_state.append_int(index)
+ numb_state.append_int(descr_index)
numb_state.append_short(tag_box(box2, liveboxes_from_env, memo))
else:
numb_state.append_int(0)
+ numb_state.append_int(0)
def deserialize_optimizer_knowledge(optimizer, resumestorage, frontend_boxes, liveboxes):
reader = resumecode.Reader(resumestorage.rd_numb)
@@ -123,13 +135,24 @@
if not optimizer.optheap:
return
length = reader.next_item()
- result = []
+ result_struct = []
+ for i in range(length):
+ tagged = reader.next_item()
+ box1 = decode_box(resumestorage, tagged, liveboxes, metainterp_sd.cpu)
+ descr_index = reader.next_item()
+ descr = metainterp_sd.all_descrs[descr_index]
+ tagged = reader.next_item()
+ box2 = decode_box(resumestorage, tagged, liveboxes, metainterp_sd.cpu)
+ result_struct.append((box1, descr, box2))
+ length = reader.next_item()
+ result_array = []
for i in range(length):
tagged = reader.next_item()
box1 = decode_box(resumestorage, tagged, liveboxes, metainterp_sd.cpu)
index = reader.next_item()
- descr = metainterp_sd.all_descrs[index]
+ descr_index = reader.next_item()
+ descr = metainterp_sd.all_descrs[descr_index]
tagged = reader.next_item()
box2 = decode_box(resumestorage, tagged, liveboxes, metainterp_sd.cpu)
- result.append((box1, descr, box2))
- optimizer.optheap.deserialize_optheap(result)
+ result_array.append((box1, index, descr, box2))
+ optimizer.optheap.deserialize_optheap(result_struct, result_array)
diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py
--- a/rpython/jit/metainterp/optimizeopt/heap.py
+++ b/rpython/jit/metainterp/optimizeopt/heap.py
@@ -223,7 +223,10 @@
def invalidate(self, descr):
for opinfo in self.cached_infos:
assert isinstance(opinfo, info.ArrayPtrInfo)
- opinfo._items = None
+ # only invalidate those at self.index
+ if self.index < len(opinfo._items):
+ opinfo._items[self.index] = None
+ #opinfo._items = None #[self.index] = None
self.cached_infos = []
self.cached_structs = []
@@ -695,7 +698,7 @@
return self.emit(op)
def serialize_optheap(self, available_boxes):
- result = []
+ result_getfield = []
for descr, cf in self.cached_fields.iteritems():
if cf._lazy_set:
continue # XXX safe default for now
@@ -703,27 +706,62 @@
if not parent_descr.is_object():
continue # XXX could be extended to non-instance objects
for i, box1 in enumerate(cf.cached_structs):
- if box1 not in available_boxes:
+ if not box1.is_constant() and box1 not in available_boxes:
continue
structinfo = cf.cached_infos[i]
- box2 = structinfo.getfield(descr).get_box_replacement()
- if isinstance(box2, Const) or box2 in available_boxes:
- result.append((box1, descr, box2))
- return result
+ box2 = structinfo.getfield(descr)
+ if box2 is None:
+ # XXX this should not happen, as it is an invariant
+ # violation! yet it does if box1 is a constant
+ continue
+ box2 = box2.get_box_replacement()
+ if box2.is_constant() or box2 in available_boxes:
+ result_getfield.append((box1, descr, box2))
+ result_array = []
+ for descr, indexdict in self.cached_arrayitems.iteritems():
+ for index, cf in indexdict.iteritems():
+ if cf._lazy_set:
+ continue # XXX safe default for now
+ for i, box1 in enumerate(cf.cached_structs):
+ if not box1.is_constant() and box1 not in available_boxes:
+ continue
+ arrayinfo = cf.cached_infos[i]
+ box2 = arrayinfo.getitem(descr, index)
+ if box2 is None:
+ # XXX this should not happen, as it is an invariant
+ # violation! yet it does if box1 is a constant
+ continue
+ box2 = box2.get_box_replacement()
+ if box2.is_constant() or box2 in available_boxes:
+ result_array.append((box1, index, descr, box2))
+ return result_getfield, result_array
- def deserialize_optheap(self, triples):
- for box1, descr, box2 in triples:
+ def deserialize_optheap(self, triples_struct, triples_array):
+ for box1, descr, box2 in triples_struct:
parent_descr = descr.get_parent_descr()
assert parent_descr.is_object()
- structinfo = box1.get_forwarded()
- if not isinstance(structinfo, info.AbstractVirtualPtrInfo):
- structinfo = info.InstancePtrInfo(parent_descr)
- structinfo.init_fields(parent_descr, descr.get_index())
- box1.set_forwarded(structinfo)
-
+ if box1.is_constant():
+ structinfo = info.ConstPtrInfo(box1)
+ else:
+ structinfo = box1.get_forwarded()
+ if not isinstance(structinfo, info.AbstractVirtualPtrInfo):
+ structinfo = info.InstancePtrInfo(parent_descr)
+ structinfo.init_fields(parent_descr, descr.get_index())
+ box1.set_forwarded(structinfo)
cf = self.field_cache(descr)
structinfo.setfield(descr, box1, box2, optheap=self, cf=cf)
+ for box1, index, descr, box2 in triples_array:
+ if box1.is_constant():
+ arrayinfo = info.ConstPtrInfo(box1)
+ else:
+ arrayinfo = box1.get_forwarded()
+ if not isinstance(arrayinfo, info.AbstractVirtualPtrInfo):
+ arrayinfo = info.ArrayPtrInfo(descr)
+ box1.set_forwarded(arrayinfo)
+ cf = self.arrayitem_cache(descr, index)
+ arrayinfo.setitem(descr, index, box1, box2, optheap=self, cf=cf)
+
dispatch_opt = make_dispatcher_method(OptHeap, 'optimize_',
default=OptHeap.emit)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
@@ -1537,6 +1537,46 @@
"""
self.optimize_loop(ops, expected)
+ def test_duplicate_getarrayitem_after_setarrayitem_and_guard(self):
+ ops = """
+ [p0, p1, p2, p3, i1]
+ p4 = getarrayitem_gc_r(p0, 0, descr=arraydescr2)
+ p5 = getarrayitem_gc_r(p0, 1, descr=arraydescr2)
+ p6 = getarrayitem_gc_r(p1, 0, descr=arraydescr2)
+ setarrayitem_gc(p1, 1, p3, descr=arraydescr2)
+ guard_true(i1) [i1]
+ p7 = getarrayitem_gc_r(p0, 0, descr=arraydescr2)
+ p8 = getarrayitem_gc_r(p0, 1, descr=arraydescr2)
+ p9 = getarrayitem_gc_r(p1, 0, descr=arraydescr2)
+ p10 = getarrayitem_gc_r(p1, 1, descr=arraydescr2)
+ escape_n(p4)
+ escape_n(p5)
+ escape_n(p6)
+ escape_n(p7)
+ escape_n(p8)
+ escape_n(p9)
+ escape_n(p10)
+ jump(p0, p1, p2, p3, i1)
+ """
+ expected = """
+ [p0, p1, p2, p3, i1]
+ p4 = getarrayitem_gc_r(p0, 0, descr=arraydescr2)
+ p5 = getarrayitem_gc_r(p0, 1, descr=arraydescr2)
+ p6 = getarrayitem_gc_r(p1, 0, descr=arraydescr2)
+ setarrayitem_gc(p1, 1, p3, descr=arraydescr2)
+ guard_true(i1) [i1]
+ p8 = getarrayitem_gc_r(p0, 1, descr=arraydescr2)
+ escape_n(p4)
+ escape_n(p5)
+ escape_n(p6)
+ escape_n(p4)
+ escape_n(p8)
+ escape_n(p6)
+ escape_n(p3)
+ jump(p0, p1, p2, p3, 1)
+ """
+ self.optimize_loop(ops, expected)
+
def test_getarrayitem_pure_does_not_invalidate(self):
ops = """
[p1, p2]
diff --git a/rpython/jit/metainterp/test/test_bridgeopt.py b/rpython/jit/metainterp/test/test_bridgeopt.py
--- a/rpython/jit/metainterp/test/test_bridgeopt.py
+++ b/rpython/jit/metainterp/test/test_bridgeopt.py
@@ -61,7 +61,7 @@
serialize_optimizer_knowledge(optimizer, numb_state, liveboxes, {}, None)
- assert unpack_numbering(numb_state.create_numbering()) == [1, 0b010000, 0]
+ assert unpack_numbering(numb_state.create_numbering()) == [1, 0b010000, 0, 0]
rbox1 = InputArgRef()
rbox2 = InputArgRef()
@@ -97,7 +97,7 @@
serialize_optimizer_knowledge(optimizer, numb_state, liveboxes, {}, None)
- assert len(numb_state.create_numbering().code) == 2 + math.ceil(len(refboxes) / 6.0)
+ assert len(numb_state.create_numbering().code) == 3 + math.ceil(len(refboxes) / 6.0)
dct = {box: cls
for box, known_class in boxes_known_classes
@@ -143,11 +143,7 @@
def test_bridge_field_read(self):
myjitdriver = jit.JitDriver(greens=[], reds=['y', 'res', 'n', 'a'])
class A(object):
- def f(self):
- return 1
- class B(A):
- def f(self):
- return 2
+ pass
class M(object):
_immutable_fields_ = ['x']
def __init__(self, x):
@@ -156,14 +152,12 @@
m1 = M(1)
m2 = M(2)
def f(x, y, n):
+ a = A()
+ a.n = n
if x:
- a = A()
a.m = m1
- a.n = n
else:
- a = B()
a.m = m2
- a.n = n
a.x = 0
res = 0
while y > 0:
@@ -186,3 +180,105 @@
self.check_resops(getfield_gc_i=4) # 3x a.x, 1x a.n
self.check_resops(getfield_gc_r=1) # in main loop
+ def test_bridge_field_read_constants(self):
+ myjitdriver = jit.JitDriver(greens=[], reds=['y', 'res', 'n'])
+ class A(object):
+ pass
+ class M(object):
+ _immutable_fields_ = ['x']
+ def __init__(self, x):
+ self.x = x
+
+ m1 = M(1)
+ m2 = M(2)
+ a = A()
+ a.m = m1
+ a.n = 0
+ def f(x, y, n):
+ if x:
+ a.m = m1
+ a.n = n
+ else:
+ a.m = m2
+ a.n = n
+ a.x = 0
+ res = 0
+ while y > 0:
+ myjitdriver.jit_merge_point(y=y, n=n, res=res)
+ n1 = a.n
+ m = jit.promote(a.m)
+ res += m.x
+ a.x += 1
+ if y > n:
+ res += 1
+ m = jit.promote(a.m)
+ res += m.x
+ res += n1 + a.n
+ y -= 1
+ return res
+ res = self.meta_interp(f, [6, 32, 16])
+ assert res == f(6, 32, 16)
+ self.check_trace_count(3)
+ self.check_resops(guard_value=1)
+ self.check_resops(getfield_gc_i=4) # 3x a.x, 1x a.n
+ self.check_resops(getfield_gc_r=1) # in main loop
+
+ def test_bridge_array_read(self):
+ myjitdriver = jit.JitDriver(greens=[], reds=['y', 'res', 'n', 'a'])
+ def f(x, y, n):
+ if x:
+ a = [1, n, 0]
+ else:
+ a = [2, n, 0]
+ res = 0
+ while y > 0:
+ myjitdriver.jit_merge_point(y=y, n=n, res=res, a=a)
+ n1 = a[1]
+ m = jit.promote(a[0])
+ res += m
+ a[2] += 1
+ if y > n:
+ res += 1
+ m = jit.promote(a[0])
+ res += m
+ res += n1 + a[1]
+ y -= 1
+ return res
+ res = self.meta_interp(f, [6, 32, 16])
+ assert res == f(6, 32, 16)
+ self.check_trace_count(3)
+ self.check_resops(guard_value=1)
+ self.check_resops(getarrayitem_gc_i=4)
+
+ def test_bridge_array_read_constant(self):
+ myjitdriver = jit.JitDriver(greens=[], reds=['y', 'res', 'n'])
+ class A(object):
+ pass
+ a = A()
+ a.l = [1, -65, 0]
+ def f(x, y, n):
+ if x:
+ a.l[0] = 1
+ else:
+ a.l[0] = 2
+ a.l[1] = n
+ a.l[2] = 0
+ res = 0
+ while y > 0:
+ myjitdriver.jit_merge_point(y=y, n=n, res=res)
+ n1 = a.l[1]
+ m = jit.promote(a.l[0])
+ res += m
+ a.l[2] += 1
+ if y > n:
+ res += 1
+ m = jit.promote(a.l[0])
+ res += m
+ res += n1 + a.l[1]
+ y -= 1
+ return res
+ res = self.meta_interp(f, [6, 32, 16])
+ assert res == f(6, 32, 16)
+ self.check_trace_count(3)
+ self.check_resops(guard_value=1)
+ self.check_resops(getarrayitem_gc_i=5)
More information about the pypy-commit
mailing list