[pypy-commit] pypy unroll-if-alt: merged default.
alex_gaynor
noreply at buildbot.pypy.org
Thu Aug 4 23:21:17 CEST 2011
Author: Alex Gaynor <alex.gaynor at gmail.com>
Branch: unroll-if-alt
Changeset: r46280:f677aa19e903
Date: 2011-08-04 14:22 -0700
http://bitbucket.org/pypy/pypy/changeset/f677aa19e903/
Log: merged default.
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -37,22 +37,22 @@
Armin Rigo
Maciej Fijalkowski
Carl Friedrich Bolz
+ Antonio Cuni
Amaury Forgeot d'Arc
- Antonio Cuni
Samuele Pedroni
Michael Hudson
Holger Krekel
+ Benjamin Peterson
Christian Tismer
- Benjamin Peterson
+ Hakan Ardo
+ Alex Gaynor
Eric van Riet Paap
- Anders Chrigström
- Håkan Ardö
+ Anders Chrigstrom
+ David Schneider
Richard Emslie
Dan Villiom Podlaski Christiansen
Alexander Schremmer
- Alex Gaynor
- David Schneider
- Aurelién Campeas
+ Aurelien Campeas
Anders Lehmann
Camillo Bruni
Niklaus Haldimann
@@ -63,16 +63,17 @@
Bartosz Skowron
Jakub Gustak
Guido Wesdorp
+ Daniel Roberts
Adrien Di Mascio
Laura Creighton
Ludovic Aubry
Niko Matsakis
- Daniel Roberts
Jason Creighton
- Jacob Hallén
+ Jacob Hallen
Alex Martelli
Anders Hammarquist
Jan de Mooij
+ Wim Lavrijsen
Stephan Diehl
Michael Foord
Stefan Schwarzer
@@ -83,9 +84,13 @@
Alexandre Fayolle
Marius Gedminas
Simon Burton
+ Justin Peel
Jean-Paul Calderone
John Witulski
+ Lukas Diekmann
+ holger krekel
Wim Lavrijsen
+ Dario Bertini
Andreas Stührk
Jean-Philippe St. Pierre
Guido van Rossum
@@ -97,15 +102,16 @@
Georg Brandl
Gerald Klix
Wanja Saatkamp
+ Ronny Pfannschmidt
Boris Feigin
Oscar Nierstrasz
- Dario Bertini
David Malcolm
Eugene Oden
Henry Mason
+ Sven Hager
Lukas Renggli
+ Ilya Osadchiy
Guenter Jantzen
- Ronny Pfannschmidt
Bert Freudenberg
Amit Regmi
Ben Young
@@ -122,8 +128,8 @@
Jared Grubb
Karl Bartel
Gabriel Lavoie
+ Victor Stinner
Brian Dorsey
- Victor Stinner
Stuart Williams
Toby Watson
Antoine Pitrou
@@ -134,19 +140,23 @@
Jonathan David Riehl
Elmo Mäntynen
Anders Qvist
- Beatrice Düring
+ Beatrice During
Alexander Sedov
+ Timo Paulssen
+ Corbin Simpson
Vincent Legoll
+ Romain Guillebert
Alan McIntyre
- Romain Guillebert
Alex Perry
Jens-Uwe Mager
+ Simon Cross
Dan Stromberg
- Lukas Diekmann
+ Guillebert Romain
Carl Meyer
Pieter Zieschang
Alejandro J. Cura
Sylvain Thenault
+ Christoph Gerum
Travis Francis Athougies
Henrik Vendelbo
Lutz Paelike
@@ -157,6 +167,7 @@
Miguel de Val Borro
Ignas Mikalajunas
Artur Lisiecki
+ Philip Jenvey
Joshua Gilbert
Godefroid Chappelle
Yusei Tahara
@@ -165,27 +176,31 @@
Gustavo Niemeyer
William Leslie
Akira Li
- Kristján Valur Jónsson
+ Kristjan Valur Jonsson
Bobby Impollonia
+ Michael Hudson-Doyle
Andrew Thompson
Anders Sigfridsson
+ Floris Bruynooghe
Jacek Generowicz
Dan Colish
- Sven Hager
Zooko Wilcox-O Hearn
+ Dan Villiom Podlaski Christiansen
Anders Hammarquist
+ Chris Lambacher
Dinu Gherman
Dan Colish
+ Brett Cannon
Daniel Neuhäuser
Michael Chermside
Konrad Delong
Anna Ravencroft
Greg Price
Armin Ronacher
+ Christian Muirhead
Jim Baker
- Philip Jenvey
Rodrigo Araújo
- Brett Cannon
+ Romain Guillebert
Heinrich-Heine University, Germany
Open End AB (formerly AB Strakt), Sweden
diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py
--- a/lib_pypy/_ctypes/function.py
+++ b/lib_pypy/_ctypes/function.py
@@ -91,13 +91,15 @@
raise TypeError(
"item %d in _argtypes_ has no from_param method" % (
i + 1,))
- #
- if all([hasattr(argtype, '_ffiargshape') for argtype in argtypes]):
- fastpath_cls = make_fastpath_subclass(self.__class__)
- fastpath_cls.enable_fastpath_maybe(self)
self._argtypes_ = list(argtypes)
+ self._check_argtypes_for_fastpath()
argtypes = property(_getargtypes, _setargtypes)
+ def _check_argtypes_for_fastpath(self):
+ if all([hasattr(argtype, '_ffiargshape') for argtype in self._argtypes_]):
+ fastpath_cls = make_fastpath_subclass(self.__class__)
+ fastpath_cls.enable_fastpath_maybe(self)
+
def _getparamflags(self):
return self._paramflags
@@ -216,6 +218,7 @@
import ctypes
restype = ctypes.c_int
self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype)
+ self._check_argtypes_for_fastpath()
return
diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py
--- a/pypy/config/translationoption.py
+++ b/pypy/config/translationoption.py
@@ -13,6 +13,10 @@
DEFL_LOW_INLINE_THRESHOLD = DEFL_INLINE_THRESHOLD / 2.0
DEFL_GC = "minimark"
+if sys.platform.startswith("linux"):
+ DEFL_ROOTFINDER_WITHJIT = "asmgcc"
+else:
+ DEFL_ROOTFINDER_WITHJIT = "shadowstack"
IS_64_BITS = sys.maxint > 2147483647
@@ -109,7 +113,7 @@
BoolOption("jit", "generate a JIT",
default=False,
suggests=[("translation.gc", DEFL_GC),
- ("translation.gcrootfinder", "asmgcc"),
+ ("translation.gcrootfinder", DEFL_ROOTFINDER_WITHJIT),
("translation.list_comprehension_operations", True)]),
ChoiceOption("jit_backend", "choose the backend for the JIT",
["auto", "x86", "x86-without-sse2", "llvm"],
diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
--- a/pypy/doc/contributor.rst
+++ b/pypy/doc/contributor.rst
@@ -9,22 +9,22 @@
Armin Rigo
Maciej Fijalkowski
Carl Friedrich Bolz
+ Antonio Cuni
Amaury Forgeot d'Arc
- Antonio Cuni
Samuele Pedroni
Michael Hudson
Holger Krekel
+ Benjamin Peterson
Christian Tismer
- Benjamin Peterson
+ Hakan Ardo
+ Alex Gaynor
Eric van Riet Paap
- Anders Chrigström
- Håkan Ardö
+ Anders Chrigstrom
+ David Schneider
Richard Emslie
Dan Villiom Podlaski Christiansen
Alexander Schremmer
- Alex Gaynor
- David Schneider
- Aurelién Campeas
+ Aurelien Campeas
Anders Lehmann
Camillo Bruni
Niklaus Haldimann
@@ -35,16 +35,17 @@
Bartosz Skowron
Jakub Gustak
Guido Wesdorp
+ Daniel Roberts
Adrien Di Mascio
Laura Creighton
Ludovic Aubry
Niko Matsakis
- Daniel Roberts
Jason Creighton
- Jacob Hallén
+ Jacob Hallen
Alex Martelli
Anders Hammarquist
Jan de Mooij
+ Wim Lavrijsen
Stephan Diehl
Michael Foord
Stefan Schwarzer
@@ -55,9 +56,13 @@
Alexandre Fayolle
Marius Gedminas
Simon Burton
+ Justin Peel
Jean-Paul Calderone
John Witulski
+ Lukas Diekmann
+ holger krekel
Wim Lavrijsen
+ Dario Bertini
Andreas Stührk
Jean-Philippe St. Pierre
Guido van Rossum
@@ -69,15 +74,16 @@
Georg Brandl
Gerald Klix
Wanja Saatkamp
+ Ronny Pfannschmidt
Boris Feigin
Oscar Nierstrasz
- Dario Bertini
David Malcolm
Eugene Oden
Henry Mason
+ Sven Hager
Lukas Renggli
+ Ilya Osadchiy
Guenter Jantzen
- Ronny Pfannschmidt
Bert Freudenberg
Amit Regmi
Ben Young
@@ -94,8 +100,8 @@
Jared Grubb
Karl Bartel
Gabriel Lavoie
+ Victor Stinner
Brian Dorsey
- Victor Stinner
Stuart Williams
Toby Watson
Antoine Pitrou
@@ -106,19 +112,23 @@
Jonathan David Riehl
Elmo Mäntynen
Anders Qvist
- Beatrice Düring
+ Beatrice During
Alexander Sedov
+ Timo Paulssen
+ Corbin Simpson
Vincent Legoll
+ Romain Guillebert
Alan McIntyre
- Romain Guillebert
Alex Perry
Jens-Uwe Mager
+ Simon Cross
Dan Stromberg
- Lukas Diekmann
+ Guillebert Romain
Carl Meyer
Pieter Zieschang
Alejandro J. Cura
Sylvain Thenault
+ Christoph Gerum
Travis Francis Athougies
Henrik Vendelbo
Lutz Paelike
@@ -129,6 +139,7 @@
Miguel de Val Borro
Ignas Mikalajunas
Artur Lisiecki
+ Philip Jenvey
Joshua Gilbert
Godefroid Chappelle
Yusei Tahara
@@ -137,24 +148,29 @@
Gustavo Niemeyer
William Leslie
Akira Li
- Kristján Valur Jónsson
+ Kristjan Valur Jonsson
Bobby Impollonia
+ Michael Hudson-Doyle
Andrew Thompson
Anders Sigfridsson
+ Floris Bruynooghe
Jacek Generowicz
Dan Colish
- Sven Hager
Zooko Wilcox-O Hearn
+ Dan Villiom Podlaski Christiansen
Anders Hammarquist
+ Chris Lambacher
Dinu Gherman
Dan Colish
+ Brett Cannon
Daniel Neuhäuser
Michael Chermside
Konrad Delong
Anna Ravencroft
Greg Price
Armin Ronacher
+ Christian Muirhead
Jim Baker
- Philip Jenvey
Rodrigo Araújo
+ Romain Guillebert
diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst
--- a/pypy/doc/how-to-release.rst
+++ b/pypy/doc/how-to-release.rst
@@ -21,8 +21,8 @@
Release Steps
----------------
-* at code freeze make a release branch under
- http://codepeak.net/svn/pypy/release/x.y(.z). IMPORTANT: bump the
+* at code freeze make a release branch using release-x.x.x in mercurial
+ IMPORTANT: bump the
pypy version number in module/sys/version.py and in
module/cpyext/include/patchlevel.h, notice that the branch
will capture the revision number of this change for the release;
@@ -48,12 +48,6 @@
the release announcement should contain a direct link to the download page
* update pypy.org (under extradoc/pypy.org), rebuild and commit
-* update http://codespeak.net/pypy/trunk:
- code0> + chmod -R yourname:users /www/codespeak.net/htdocs/pypy/trunk
- local> cd ..../pypy/doc && py.test
- local> cd ..../pypy
- local> rsync -az doc codespeak.net:/www/codespeak.net/htdocs/pypy/trunk/pypy/
-
* post announcement on morepypy.blogspot.com
* send announcements to pypy-dev, python-list,
python-announce, python-dev ...
diff --git a/pypy/doc/release-1.6.0.rst b/pypy/doc/release-1.6.0.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-1.6.0.rst
@@ -0,0 +1,85 @@
+===========================
+PyPy 1.6 - faster than ever
+===========================
+
+We're pleased to announce the 1.6 release of PyPy. This release brings a lot
+of bugfixes and performance improvements over 1.5, and improves support for
+Windows 32bit and OS X 64bit. This version fully implements Python 2.7.1 and
+has beta level support for loading CPython C extensions. You can download it
+here:
+
+ http://pypy.org/download.html
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7.1. It's fast (`pypy 1.5 and cpython 2.6.2`_ performance comparison)
+due to its integrated tracing JIT compiler. XXX: compare to 2.7.1
+
+This release supports x86 machines running Linux 32/64 or Mac OS X. Windows 32
+is beta (it roughly works but a lot of small issues have not been fixed so
+far). Windows 64 is not yet supported.
+
+The main topics of this release are speed and stability: on average, PyPy 1.6
+is between 20% and 30% faster than PyPy 1.5, and overall it's 4.3 times faster
+than CPython when running our set of benchmarks.
+
+The speed improvements have been made possible by optimizing many of the
+layers which compose PyPy. In particular, we improved: the Garbage Collector,
+the JIT warmup time, the optimizations performed by the JIT, the quality of
+the generated machine code and the implementation of our Python interpreter.
+
+
+Highlights
+==========
+
+* Numerous performance improvements, overall giving considerable speedups:
+
+ - better GC behavior when dealing with very large objects and arrays
+
+ - `fast ctypes`_: now calls to ctypes functions are seen and optimized
+ by the JIT, and they are up to 60 times faster than PyPy 1.5 and 10 times
+ faster than CPython
+
+ - improved generators(1): simple generators now are inlined into the caller
+ loop, making performance up to 3.5 times faster than PyPy 1.5.
+
+ - improved generators(2): thanks to other optimizations, even generators
+ that are not inlined are between 10% and 20% faster than PyPy 1.5.
+
+ - faster warmup time for the JIT
+
+ - JIT support for single floats (e.g., for ``array('f')``)
+
+ - optimized dictionaries: the internal representation of dictionaries is now
+ dynamically selected depending on the type of stored objects, resulting in
+ faster code and smaller memory footprint. For example, dictionaries whose
+ keys are all strings, or all integers.
+
+* JitViewer: this is the first official release which includes the JitViewer,
+ a web-based tool which helps you to see which parts of your Python code have
+ been compiled by the JIT, down until the assembler. XXX: publish a public
+ demo?
+
+- The CPython extension module API has been improved and now supports many
+ more extensions. For information on which one are supported, please refer to
+ our `compatibility wiki`_.
+
+* Multibyte encoding support: this was of of the last areas in which we were
+ still behind CPython, but now we fully support them. (XXX: is that true?)
+
+* Preliminary support for NumPy: this release includes a preview of a very
+ fast NumPy module integrated with the PyPy JIT. Unfortunately, this does
+ not mean that you can expect to take an existing NumPy program and run it on
+ PyPy, because the module is still unfinished and supports only some of the
+ numpy API. However, what works is blazingly fast :-)
+
+* Bugfixes: since the 1.5 release we fixed 53 bugs in our `bug tracker`_, not
+ counting the numerous bugs that were found and reported through other
+ channels than the bug tracker.
+
+Cheers,
+
+Carl Friedrich Bolz, Laura Creighton, Antonio Cuni, Maciej Fijalkowski,
+Amaury Forgeot d'Arc, Alex Gaynor, Armin Rigo and the PyPy team
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -111,6 +111,9 @@
def setslotvalue(self, index, w_val):
raise NotImplementedError
+ def delslotvalue(self, index):
+ raise NotImplementedError
+
def descr_call_mismatch(self, space, opname, RequiredClass, args):
if RequiredClass is None:
classname = '?'
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -64,7 +64,7 @@
self.visit_self(el[1], *args)
else:
self.visit_function(el, *args)
- else:
+ elif isinstance(el, type):
for typ in self.bases_order:
if issubclass(el, typ):
visit = getattr(self, "visit__%s" % (typ.__name__,))
@@ -73,6 +73,8 @@
else:
raise Exception("%s: no match for unwrap_spec element %s" % (
self.__class__.__name__, el))
+ else:
+ raise Exception("unable to dispatch, %s, perhaps your parameter should have started with w_?" % el)
def apply_over(self, unwrap_spec, *extra):
dispatch = self.dispatch
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -258,6 +258,11 @@
self.slots_w = [None] * nslots
def setslotvalue(self, index, w_value):
self.slots_w[index] = w_value
+ def delslotvalue(self, index):
+ if self.slots_w[index] is None:
+ return False
+ self.slots_w[index] = None
+ return True
def getslotvalue(self, index):
return self.slots_w[index]
add(Proto)
@@ -530,11 +535,10 @@
"""member.__delete__(obj)
Delete the value of the slot 'member' from the given 'obj'."""
self.typecheck(space, w_obj)
- w_oldresult = w_obj.getslotvalue(self.index)
- if w_oldresult is None:
+ success = w_obj.delslotvalue(self.index)
+ if not success:
raise OperationError(space.w_AttributeError,
space.wrap(self.name)) # XXX better message
- w_obj.setslotvalue(self.index, None)
Member.typedef = TypeDef(
"member_descriptor",
diff --git a/pypy/jit/backend/llgraph/test/test_llgraph.py b/pypy/jit/backend/llgraph/test/test_llgraph.py
--- a/pypy/jit/backend/llgraph/test/test_llgraph.py
+++ b/pypy/jit/backend/llgraph/test/test_llgraph.py
@@ -19,6 +19,9 @@
def setup_method(self, _):
self.cpu = self.cpu_type(None)
+ def test_memoryerror(self):
+ py.test.skip("does not make much sense on the llgraph backend")
+
def test_cast_adr_to_int_and_back():
X = lltype.Struct('X', ('foo', lltype.Signed))
diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py
--- a/pypy/jit/backend/llsupport/gc.py
+++ b/pypy/jit/backend/llsupport/gc.py
@@ -544,18 +544,19 @@
assert self.GCClass.inline_simple_malloc
assert self.GCClass.inline_simple_malloc_varsize
- # make a malloc function, with three arguments
+ # make a malloc function, with two arguments
def malloc_basic(size, tid):
type_id = llop.extract_ushort(llgroup.HALFWORD, tid)
has_finalizer = bool(tid & (1<<llgroup.HALFSHIFT))
check_typeid(type_id)
- try:
- res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF,
- type_id, size, True,
- has_finalizer, False)
- except MemoryError:
- fatalerror("out of memory (from JITted code)")
- res = lltype.nullptr(llmemory.GCREF.TO)
+ res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF,
+ type_id, size, True,
+ has_finalizer, False)
+ # In case the operation above failed, we are returning NULL
+ # from this function to assembler. There is also an RPython
+ # exception set, typically MemoryError; but it's easier and
+ # faster to check for the NULL return value, as done by
+ # translator/exceptiontransform.py.
#llop.debug_print(lltype.Void, "\tmalloc_basic", size, type_id,
# "-->", res)
return res
@@ -571,14 +572,10 @@
def malloc_array(itemsize, tid, num_elem):
type_id = llop.extract_ushort(llgroup.HALFWORD, tid)
check_typeid(type_id)
- try:
- return llop1.do_malloc_varsize_clear(
- llmemory.GCREF,
- type_id, num_elem, self.array_basesize, itemsize,
- self.array_length_ofs, True)
- except MemoryError:
- fatalerror("out of memory (from JITted code)")
- return lltype.nullptr(llmemory.GCREF.TO)
+ return llop1.do_malloc_varsize_clear(
+ llmemory.GCREF,
+ type_id, num_elem, self.array_basesize, itemsize,
+ self.array_length_ofs, True)
self.malloc_array = malloc_array
self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType(
[lltype.Signed] * 3, llmemory.GCREF))
@@ -591,23 +588,15 @@
unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE)
#
def malloc_str(length):
- try:
- return llop1.do_malloc_varsize_clear(
- llmemory.GCREF,
- str_type_id, length, str_basesize, str_itemsize,
- str_ofs_length, True)
- except MemoryError:
- fatalerror("out of memory (from JITted code)")
- return lltype.nullptr(llmemory.GCREF.TO)
+ return llop1.do_malloc_varsize_clear(
+ llmemory.GCREF,
+ str_type_id, length, str_basesize, str_itemsize,
+ str_ofs_length, True)
def malloc_unicode(length):
- try:
- return llop1.do_malloc_varsize_clear(
- llmemory.GCREF,
- unicode_type_id, length, unicode_basesize,unicode_itemsize,
- unicode_ofs_length, True)
- except MemoryError:
- fatalerror("out of memory (from JITted code)")
- return lltype.nullptr(llmemory.GCREF.TO)
+ return llop1.do_malloc_varsize_clear(
+ llmemory.GCREF,
+ unicode_type_id, length, unicode_basesize,unicode_itemsize,
+ unicode_ofs_length, True)
self.malloc_str = malloc_str
self.malloc_unicode = malloc_unicode
self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType(
@@ -628,16 +617,12 @@
if self.DEBUG:
random_usage_of_xmm_registers()
assert size >= self.minimal_size_in_nursery
- try:
- # NB. although we call do_malloc_fixedsize_clear() here,
- # it's a bit of a hack because we set tid to 0 and may
- # also use it to allocate varsized objects. The tid
- # and possibly the length are both set afterward.
- gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF,
- 0, size, True, False, False)
- except MemoryError:
- fatalerror("out of memory (from JITted code)")
- return 0
+ # NB. although we call do_malloc_fixedsize_clear() here,
+ # it's a bit of a hack because we set tid to 0 and may
+ # also use it to allocate varsized objects. The tid
+ # and possibly the length are both set afterward.
+ gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF,
+ 0, size, True, False, False)
return rffi.cast(lltype.Signed, gcref)
self.malloc_slowpath = malloc_slowpath
self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed)
diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py
--- a/pypy/jit/backend/llsupport/test/test_ffisupport.py
+++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py
@@ -1,4 +1,5 @@
from pypy.rlib.libffi import types
+from pypy.jit.codewriter.longlong import is_64_bit
from pypy.jit.backend.llsupport.ffisupport import *
@@ -34,11 +35,14 @@
assert descr.get_result_size(False) == 1
assert descr.is_result_signed() == False
- descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong)
- assert descr is None # missing longlongs
- descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True),
- [], types.slonglong)
- assert isinstance(descr, LongLongCallDescr)
+ if not is_64_bit:
+ descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong)
+ assert descr is None # missing longlongs
+ descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True),
+ [], types.slonglong)
+ assert isinstance(descr, LongLongCallDescr)
+ else:
+ assert types.slonglong is types.slong
descr = get_call_descr_dynamic(FakeCPU(), [], types.float)
assert descr is None # missing singlefloats
diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py
--- a/pypy/jit/backend/model.py
+++ b/pypy/jit/backend/model.py
@@ -14,7 +14,7 @@
done_with_this_frame_int_v = -1
done_with_this_frame_ref_v = -1
done_with_this_frame_float_v = -1
- exit_frame_with_exception_v = -1
+ propagate_exception_v = -1
total_compiled_loops = 0
total_compiled_bridges = 0
total_freed_loops = 0
diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py
--- a/pypy/jit/backend/test/runner_test.py
+++ b/pypy/jit/backend/test/runner_test.py
@@ -2807,6 +2807,26 @@
assert mem2 < mem1
assert mem2 == mem0
+ def test_memoryerror(self):
+ excdescr = BasicFailDescr(666)
+ self.cpu.propagate_exception_v = self.cpu.get_fail_descr_number(
+ excdescr)
+ self.cpu.setup_once() # xxx redo it, because we added
+ # propagate_exception_v
+ i0 = BoxInt()
+ p0 = BoxPtr()
+ operations = [
+ ResOperation(rop.NEWUNICODE, [i0], p0),
+ ResOperation(rop.FINISH, [p0], None, descr=BasicFailDescr(1))
+ ]
+ inputargs = [i0]
+ looptoken = LoopToken()
+ self.cpu.compile_loop(inputargs, operations, looptoken)
+ # overflowing value:
+ self.cpu.set_future_value_int(0, sys.maxint // 4 + 1)
+ fail = self.cpu.execute_token(looptoken)
+ assert fail.identifier == excdescr.identifier
+
class OOtypeBackendTest(BaseBackendTest):
diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py
--- a/pypy/jit/backend/x86/assembler.py
+++ b/pypy/jit/backend/x86/assembler.py
@@ -91,6 +91,7 @@
self._current_depths_cache = (0, 0)
self.datablockwrapper = None
self.stack_check_slowpath = 0
+ self.propagate_exception_path = 0
self.teardown()
def leave_jitted_hook(self):
@@ -127,6 +128,7 @@
self._build_failure_recovery(True, withfloats=True)
support.ensure_sse2_floats()
self._build_float_constants()
+ self._build_propagate_exception_path()
if gc_ll_descr.get_malloc_slowpath_addr is not None:
self._build_malloc_slowpath()
self._build_stack_check_slowpath()
@@ -140,6 +142,9 @@
assert self.memcpy_addr != 0, "setup_once() not called?"
self.current_clt = looptoken.compiled_loop_token
self.pending_guard_tokens = []
+ if WORD == 8:
+ self.pending_memoryerror_trampoline_from = []
+ self.error_trampoline_64 = 0
self.mc = codebuf.MachineCodeBlockWrapper()
#assert self.datablockwrapper is None --- but obscure case
# possible, e.g. getting MemoryError and continuing
@@ -149,6 +154,8 @@
def teardown(self):
self.pending_guard_tokens = None
+ if WORD == 8:
+ self.pending_memoryerror_trampoline_from = None
self.mc = None
self.looppos = -1
self.currently_compiling_loop = None
@@ -237,15 +244,47 @@
if self.cpu.supports_floats: # restore the XMM registers
for i in range(self.cpu.NUM_REGS):# from where they were saved
mc.MOVSD_xs(i, (WORD*2)+8*i)
+ #
+ # Note: we check this after the code above, just because the code
+ # above is more than 127 bytes on 64-bits...
+ mc.TEST_rr(eax.value, eax.value)
+ mc.J_il8(rx86.Conditions['Z'], 0) # patched later
+ jz_location = mc.get_relative_pos()
+ #
nursery_free_adr = self.cpu.gc_ll_descr.get_nursery_free_addr()
mc.MOV(edx, heap(nursery_free_adr)) # load this in EDX
mc.RET()
+ #
+ # If the slowpath malloc failed, we raise a MemoryError that
+ # always interrupts the current loop, as a "good enough"
+ # approximation. Also note that we didn't RET from this helper;
+ # but the code we jump to will actually restore the stack
+ # position based on EBP, which will get us out of here for free.
+ offset = mc.get_relative_pos() - jz_location
+ assert 0 < offset <= 127
+ mc.overwrite(jz_location-1, chr(offset))
+ mc.JMP(imm(self.propagate_exception_path))
+ #
rawstart = mc.materialize(self.cpu.asmmemmgr, [])
self.malloc_slowpath2 = rawstart
+ def _build_propagate_exception_path(self):
+ if self.cpu.propagate_exception_v < 0:
+ return # not supported (for tests, or non-translated)
+ #
+ self.mc = codebuf.MachineCodeBlockWrapper()
+ # call on_leave_jitted_save_exc()
+ addr = self.cpu.get_on_leave_jitted_int(save_exception=True)
+ self.mc.CALL(imm(addr))
+ self.mc.MOV_ri(eax.value, self.cpu.propagate_exception_v)
+ self._call_footer()
+ rawstart = self.mc.materialize(self.cpu.asmmemmgr, [])
+ self.propagate_exception_path = rawstart
+ self.mc = None
+
def _build_stack_check_slowpath(self):
_, _, slowpathaddr = self.cpu.insert_stack_check()
- if slowpathaddr == 0 or self.cpu.exit_frame_with_exception_v < 0:
+ if slowpathaddr == 0 or self.cpu.propagate_exception_v < 0:
return # no stack check (for tests, or non-translated)
#
# make a "function" that is called immediately at the start of
@@ -301,19 +340,11 @@
offset = mc.get_relative_pos() - jnz_location
assert 0 < offset <= 127
mc.overwrite(jnz_location-1, chr(offset))
- # clear the exception from the global position
- mc.MOV(eax, heap(self.cpu.pos_exc_value()))
- mc.MOV(heap(self.cpu.pos_exception()), imm0)
- mc.MOV(heap(self.cpu.pos_exc_value()), imm0)
- # save the current exception instance into fail_boxes_ptr[0]
- adr = self.fail_boxes_ptr.get_addr_for_num(0)
- mc.MOV(heap(adr), eax)
- # call the helper function to set the GC flag on the fail_boxes_ptr
- # array (note that there is no exception any more here)
- addr = self.cpu.get_on_leave_jitted_int(save_exception=False)
+ # call on_leave_jitted_save_exc()
+ addr = self.cpu.get_on_leave_jitted_int(save_exception=True)
mc.CALL(imm(addr))
#
- mc.MOV_ri(eax.value, self.cpu.exit_frame_with_exception_v)
+ mc.MOV_ri(eax.value, self.cpu.propagate_exception_v)
#
# footer -- note the ADD, which skips the return address of this
# function, and will instead return to the caller's caller. Note
@@ -525,6 +556,8 @@
# at the end of self.mc.
for tok in self.pending_guard_tokens:
tok.pos_recovery_stub = self.generate_quick_failure(tok)
+ if WORD == 8 and len(self.pending_memoryerror_trampoline_from) > 0:
+ self.error_trampoline_64 = self.generate_propagate_error_64()
def patch_pending_failure_recoveries(self, rawstart):
# after we wrote the assembler to raw memory, set up
@@ -561,6 +594,12 @@
# less, we would run into the issue that overwriting the
# 5 bytes here might get a few nonsense bytes at the
# return address of the following CALL.
+ if WORD == 8:
+ for pos_after_jz in self.pending_memoryerror_trampoline_from:
+ assert self.error_trampoline_64 != 0 # only if non-empty
+ mc = codebuf.MachineCodeBlockWrapper()
+ mc.writeimm32(self.error_trampoline_64 - pos_after_jz)
+ mc.copy_to_raw_memory(rawstart + pos_after_jz - 4)
def get_asmmemmgr_blocks(self, looptoken):
clt = looptoken.compiled_loop_token
@@ -1422,7 +1461,7 @@
assert isinstance(loc_vtable, ImmedLoc)
arglocs = arglocs[:-1]
self.call(self.malloc_func_addr, arglocs, eax)
- # xxx ignore NULL returns for now
+ self.propagate_memoryerror_if_eax_is_null()
self.set_vtable(eax, loc_vtable)
def set_vtable(self, loc, loc_vtable):
@@ -1441,18 +1480,35 @@
def genop_new(self, op, arglocs, result_loc):
assert result_loc is eax
self.call(self.malloc_func_addr, arglocs, eax)
+ self.propagate_memoryerror_if_eax_is_null()
def genop_new_array(self, op, arglocs, result_loc):
assert result_loc is eax
self.call(self.malloc_array_func_addr, arglocs, eax)
+ self.propagate_memoryerror_if_eax_is_null()
def genop_newstr(self, op, arglocs, result_loc):
assert result_loc is eax
self.call(self.malloc_str_func_addr, arglocs, eax)
+ self.propagate_memoryerror_if_eax_is_null()
def genop_newunicode(self, op, arglocs, result_loc):
assert result_loc is eax
self.call(self.malloc_unicode_func_addr, arglocs, eax)
+ self.propagate_memoryerror_if_eax_is_null()
+
+ def propagate_memoryerror_if_eax_is_null(self):
+ # if self.propagate_exception_path == 0 (tests), this may jump to 0
+ # and segfaults. too bad. the alternative is to continue anyway
+ # with eax==0, but that will segfault too.
+ self.mc.TEST_rr(eax.value, eax.value)
+ if WORD == 4:
+ self.mc.J_il(rx86.Conditions['Z'], self.propagate_exception_path)
+ self.mc.add_pending_relocation()
+ elif WORD == 8:
+ self.mc.J_il(rx86.Conditions['Z'], 0)
+ pos = self.mc.get_relative_pos()
+ self.pending_memoryerror_trampoline_from.append(pos)
# ----------
@@ -1724,6 +1780,12 @@
return GuardToken(faildescr, failargs, fail_locs, exc,
is_guard_not_invalidated)
+ def generate_propagate_error_64(self):
+ assert WORD == 8
+ startpos = self.mc.get_relative_pos()
+ self.mc.JMP(imm(self.propagate_exception_path))
+ return startpos
+
def generate_quick_failure(self, guardtok):
"""Generate the initial code for handling a failure. We try to
keep it as compact as possible.
diff --git a/pypy/jit/backend/x86/codebuf.py b/pypy/jit/backend/x86/codebuf.py
--- a/pypy/jit/backend/x86/codebuf.py
+++ b/pypy/jit/backend/x86/codebuf.py
@@ -25,8 +25,11 @@
self.init_block_builder()
# a list of relative positions; for each position p, the bytes
# at [p-4:p] encode an absolute address that will need to be
- # made relative.
- self.relocations = []
+ # made relative. Only works on 32-bit!
+ if WORD == 4:
+ self.relocations = []
+ else:
+ self.relocations = None
#
# ResOperation --> offset in the assembly.
# ops_offset[None] represents the beginning of the code after the last op
@@ -42,9 +45,10 @@
def copy_to_raw_memory(self, addr):
self._copy_to_raw_memory(addr)
- for reloc in self.relocations:
- p = addr + reloc
- adr = rffi.cast(rffi.LONGP, p - WORD)
- adr[0] = intmask(adr[0] - p)
+ if self.relocations is not None:
+ for reloc in self.relocations:
+ p = addr + reloc
+ adr = rffi.cast(rffi.LONGP, p - WORD)
+ adr[0] = intmask(adr[0] - p)
valgrind.discard_translations(addr, self.get_relative_pos())
self._dump(addr, "jit-backend-dump", backend_name)
diff --git a/pypy/jit/backend/x86/test/test_regloc.py b/pypy/jit/backend/x86/test/test_regloc.py
--- a/pypy/jit/backend/x86/test/test_regloc.py
+++ b/pypy/jit/backend/x86/test/test_regloc.py
@@ -62,7 +62,7 @@
assert mc.relocations == [5]
expected = "\xE8" + struct.pack('<i', target - (rawstart + 5))
elif IS_X86_64:
- assert mc.relocations == []
+ assert mc.relocations is None
if 0 <= target <= 0xffffffff:
assert length == 9
expected = (
diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py
--- a/pypy/jit/backend/x86/test/test_runner.py
+++ b/pypy/jit/backend/x86/test/test_runner.py
@@ -463,7 +463,7 @@
self.cpu.finish_once()
finally:
debug._log = None
- assert ('jit-backend-counts', [('debug_print', '0:10')]) in dlog
+ assert ('jit-backend-counts', [('debug_print', 'loop -1:10')]) in dlog
def test_debugger_checksum(self):
loop = """
diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py
--- a/pypy/jit/metainterp/compile.py
+++ b/pypy/jit/metainterp/compile.py
@@ -668,10 +668,9 @@
def handle_fail(self, metainterp_sd, jitdriver_sd):
cpu = metainterp_sd.cpu
exception = cpu.grab_exc_value()
+ assert exception, "PropagateExceptionDescr: no exception??"
raise metainterp_sd.ExitFrameWithExceptionRef(cpu, exception)
-propagate_exception_descr = PropagateExceptionDescr()
-
def compile_tmp_callback(cpu, jitdriver_sd, greenboxes, redboxes,
memory_manager=None):
"""Make a LoopToken that corresponds to assembler code that just
@@ -705,7 +704,7 @@
finishargs = []
#
jd = jitdriver_sd
- faildescr = propagate_exception_descr
+ faildescr = PropagateExceptionDescr()
operations = [
ResOperation(rop.CALL, callargs, result, descr=jd.portal_calldescr),
ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=faildescr),
diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py
--- a/pypy/jit/metainterp/optimizeopt/fficall.py
+++ b/pypy/jit/metainterp/optimizeopt/fficall.py
@@ -1,7 +1,7 @@
from pypy.rpython.annlowlevel import cast_base_ptr_to_instance
from pypy.rlib.objectmodel import we_are_translated
from pypy.rlib.libffi import Func
-from pypy.rlib.debug import debug_start, debug_stop, debug_print
+from pypy.rlib.debug import debug_print
from pypy.jit.codewriter.effectinfo import EffectInfo
from pypy.jit.metainterp.resoperation import rop, ResOperation
from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method
@@ -74,14 +74,6 @@
else:
self.logops = None
- def propagate_begin_forward(self):
- debug_start('jit-log-ffiopt')
- Optimization.propagate_begin_forward(self)
-
- def propagate_end_forward(self):
- debug_stop('jit-log-ffiopt')
- Optimization.propagate_end_forward(self)
-
def reconstruct_for_next_iteration(self, optimizer, valuemap):
return OptFfiCall()
# FIXME: Should any status be saved for next iteration?
diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py
--- a/pypy/jit/metainterp/optimizeopt/heap.py
+++ b/pypy/jit/metainterp/optimizeopt/heap.py
@@ -239,13 +239,14 @@
return
cf.force_lazy_setfield(self, can_cache)
- def force_lazy_setarrayitem(self, arraydescr, can_cache=True):
+ def force_lazy_setarrayitem(self, arraydescr, indexvalue=None, can_cache=True):
try:
submap = self.cached_arrayitems[arraydescr]
except KeyError:
return
- for cf in submap.values():
- cf.force_lazy_setfield(self, can_cache)
+ for idx, cf in submap.iteritems():
+ if indexvalue is None or indexvalue.intbound.contains(idx):
+ cf.force_lazy_setfield(self, can_cache)
def fixup_guard_situation(self):
# hackish: reverse the order of the last two operations if it makes
@@ -357,7 +358,7 @@
return
else:
# variable index, so make sure the lazy setarrayitems are done
- self.force_lazy_setarrayitem(op.getdescr())
+ self.force_lazy_setarrayitem(op.getdescr(), indexvalue=indexvalue)
# default case: produce the operation
arrayvalue.ensure_nonnull()
self.emit_operation(op)
@@ -381,7 +382,7 @@
cf.do_setfield(self, op)
else:
# variable index, so make sure the lazy setarrayitems are done
- self.force_lazy_setarrayitem(op.getdescr(), can_cache=False)
+ self.force_lazy_setarrayitem(op.getdescr(), indexvalue=indexvalue, can_cache=False)
# and then emit the operation
self.emit_operation(op)
diff --git a/pypy/jit/metainterp/optimizeopt/intbounds.py b/pypy/jit/metainterp/optimizeopt/intbounds.py
--- a/pypy/jit/metainterp/optimizeopt/intbounds.py
+++ b/pypy/jit/metainterp/optimizeopt/intbounds.py
@@ -125,6 +125,17 @@
r = self.getvalue(op.result)
r.intbound.intersect(v1.intbound.div_bound(v2.intbound))
+ def optimize_INT_MOD(self, op):
+ self.emit_operation(op)
+ v2 = self.getvalue(op.getarg(1))
+ if v2.is_constant():
+ val = v2.box.getint()
+ r = self.getvalue(op.result)
+ if val < 0:
+ val = -val
+ r.intbound.make_gt(IntBound(-val, -val))
+ r.intbound.make_lt(IntBound(val, val))
+
def optimize_INT_LSHIFT(self, op):
v1 = self.getvalue(op.getarg(0))
v2 = self.getvalue(op.getarg(1))
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
@@ -4621,6 +4621,96 @@
"""
self.optimize_strunicode_loop(ops, expected)
+ def test_intmod_bounds(self):
+ ops = """
+ [i0, i1]
+ i2 = int_mod(i0, 12)
+ i3 = int_gt(i2, 12)
+ guard_false(i3) []
+ i4 = int_lt(i2, -12)
+ guard_false(i4) []
+ i5 = int_mod(i1, -12)
+ i6 = int_lt(i5, -12)
+ guard_false(i6) []
+ i7 = int_gt(i5, 12)
+ guard_false(i7) []
+ jump(i2, i5)
+ """
+ expected = """
+ [i0, i1]
+ i2 = int_mod(i0, 12)
+ i5 = int_mod(i1, -12)
+ jump(i2, i5)
+ """
+ self.optimize_loop(ops, expected)
+
+ # This the sequence of resoperations that is generated for a Python
+ # app-level int % int. When the modulus is constant and when i0
+ # is known non-negative it should be optimized to a single int_mod.
+ ops = """
+ [i0]
+ i5 = int_ge(i0, 0)
+ guard_true(i5) []
+ i1 = int_mod(i0, 42)
+ i2 = int_rshift(i1, 63)
+ i3 = int_and(42, i2)
+ i4 = int_add(i1, i3)
+ finish(i4)
+ """
+ expected = """
+ [i0]
+ i5 = int_ge(i0, 0)
+ guard_true(i5) []
+ i1 = int_mod(i0, 42)
+ finish(i1)
+ """
+ py.test.skip("in-progress")
+ self.optimize_loop(ops, expected)
+
+ # Also, 'n % power-of-two' can be turned into int_and(),
+ # but that's a bit harder to detect here because it turns into
+ # several operations, and of course it is wrong to just turn
+ # int_mod(i0, 16) into int_and(i0, 15).
+ ops = """
+ [i0]
+ i1 = int_mod(i0, 16)
+ i2 = int_rshift(i1, 63)
+ i3 = int_and(16, i2)
+ i4 = int_add(i1, i3)
+ finish(i4)
+ """
+ expected = """
+ [i0]
+ i4 = int_and(i0, 15)
+ finish(i4)
+ """
+ py.test.skip("harder")
+ self.optimize_loop(ops, expected)
+
+ def test_bounded_lazy_setfield(self):
+ ops = """
+ [p0, i0]
+ i1 = int_gt(i0, 2)
+ guard_true(i1) []
+ setarrayitem_gc(p0, 0, 3)
+ setarrayitem_gc(p0, 2, 4)
+ setarrayitem_gc(p0, i0, 15)
+ i2 = getarrayitem_gc(p0, 2)
+ jump(p0, i2)
+ """
+ # Remove the getarrayitem_gc, because we know that p[i0] does not alias
+ # p0[2]
+ expected = """
+ [p0, i0]
+ i1 = int_gt(i0, 2)
+ guard_true(i1) []
+ setarrayitem_gc(p0, i0, 15)
+ setarrayitem_gc(p0, 0, 3)
+ setarrayitem_gc(p0, 2, 4)
+ jump(p0, 4)
+ """
+ self.optimize_loop(ops, expected)
+
def test_empty_copystrunicontent(self):
ops = """
[p0, p1, i0, i2, i3]
diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
--- a/pypy/jit/metainterp/pyjitpl.py
+++ b/pypy/jit/metainterp/pyjitpl.py
@@ -1394,9 +1394,9 @@
num = self.cpu.get_fail_descr_number(tokens[0].finishdescr)
setattr(self.cpu, 'done_with_this_frame_%s_v' % name, num)
#
- tokens = self.loop_tokens_exit_frame_with_exception_ref
- num = self.cpu.get_fail_descr_number(tokens[0].finishdescr)
- self.cpu.exit_frame_with_exception_v = num
+ exc_descr = compile.PropagateExceptionDescr()
+ num = self.cpu.get_fail_descr_number(exc_descr)
+ self.cpu.propagate_exception_v = num
#
self.globaldata = MetaInterpGlobalData(self)
diff --git a/pypy/jit/tool/oparser.py b/pypy/jit/tool/oparser.py
--- a/pypy/jit/tool/oparser.py
+++ b/pypy/jit/tool/oparser.py
@@ -53,7 +53,7 @@
class OpParser(object):
use_mock_model = False
-
+
def __init__(self, input, cpu, namespace, type_system, boxkinds,
invent_fail_descr=default_fail_descr,
nonstrict=False):
@@ -187,7 +187,7 @@
poss_descr = allargs[-1].strip()
if poss_descr.startswith('descr='):
descr = self.get_descr(poss_descr[len('descr='):])
- allargs = allargs[:-1]
+ allargs = allargs[:-1]
for arg in allargs:
arg = arg.strip()
try:
@@ -240,7 +240,7 @@
fail_args = None
if opnum == rop.FINISH:
if descr is None and self.invent_fail_descr:
- descr = self.invent_fail_descr(self.model)
+ descr = self.invent_fail_descr(self.model, fail_args)
elif opnum == rop.JUMP:
if descr is None and self.invent_fail_descr:
descr = self.looptoken
diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h
--- a/pypy/module/cpyext/include/patchlevel.h
+++ b/pypy/module/cpyext/include/patchlevel.h
@@ -31,8 +31,9 @@
/* PyPy version as a string */
#define PYPY_VERSION "1.6.0"
-/* Subversion Revision number of this file (not of the repository) */
-#define PY_PATCHLEVEL_REVISION "$Revision: 77872 $"
+/* Subversion Revision number of this file (not of the repository).
+ * Empty since Mercurial migration. */
+#define PY_PATCHLEVEL_REVISION ""
/* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2.
Use this for numeric comparisons, e.g. #if PY_VERSION_HEX >= ... */
diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py
--- a/pypy/module/itertools/interp_itertools.py
+++ b/pypy/module/itertools/interp_itertools.py
@@ -339,16 +339,21 @@
start = 0
else:
start = space.int_w(w_startstop)
+ if start < 0:
+ raise OperationError(space.w_ValueError, space.wrap(
+ "Indicies for islice() must be non-negative integers."))
w_stop = args_w[0]
else:
raise OperationError(space.w_TypeError, space.wrap("islice() takes at most 4 arguments (" + str(num_args) + " given)"))
if space.is_w(w_stop, space.w_None):
stop = -1
- stoppable = False
else:
stop = space.int_w(w_stop)
- stoppable = True
+ if stop < 0:
+ raise OperationError(space.w_ValueError, space.wrap(
+ "Stop argument must be a non-negative integer or None."))
+ stop = max(start, stop) # for obscure CPython compatibility
if num_args == 2:
w_step = args_w[1]
@@ -356,38 +361,37 @@
step = 1
else:
step = space.int_w(w_step)
+ if step < 1:
+ raise OperationError(space.w_ValueError, space.wrap(
+ "Step must be one or lager for islice()."))
else:
step = 1
- if start < 0:
- raise OperationError(space.w_ValueError, space.wrap("Indicies for islice() must be non-negative integers."))
- if stoppable and stop < 0:
- raise OperationError(space.w_ValueError, space.wrap("Stop argument must be a non-negative integer or None."))
- if step < 1:
- raise OperationError(space.w_ValueError, space.wrap("Step must be one or lager for islice()."))
-
+ self.ignore = step - 1
self.start = start
self.stop = stop
- self.step = step
def iter_w(self):
return self.space.wrap(self)
def next_w(self):
if self.start >= 0: # first call only
- consume = self.start + 1
+ ignore = self.start
self.start = -1
else: # all following calls
- consume = self.step
- if consume > 1:
- self._ignore_items(consume-1)
- if self.stop >= 0:
- if self.stop < consume:
+ ignore = self.ignore
+ stop = self.stop
+ if stop >= 0:
+ if stop <= ignore:
self.stop = 0 # reset the state so that a following next_w()
- self.step = 1 # has no effect any more
+ # has no effect any more
+ if stop > 0:
+ self._ignore_items(stop)
raise OperationError(self.space.w_StopIteration,
self.space.w_None)
- self.stop -= consume
+ self.stop = stop - (ignore + 1)
+ if ignore > 0:
+ self._ignore_items(ignore)
return self.space.next(self.iterable)
def _ignore_items(self, num):
diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py
--- a/pypy/module/itertools/test/test_itertools.py
+++ b/pypy/module/itertools/test/test_itertools.py
@@ -266,6 +266,13 @@
raises(StopIteration, islc.next) # drops the 6th and raise
assert it.next() == "j"
+ it = iter("abcdefghij")
+ islc = itertools.islice(it, 3, 4, 3)
+ assert islc.next() == "d" # drops 0, 1, 2, returns item #3
+ assert it.next() == "e"
+ raises(StopIteration, islc.next) # item #4 is 'stop', so just raise
+ assert it.next() == "f"
+
def test_islice_overflow(self):
import itertools
import sys
diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py
--- a/pypy/module/pypyjit/test_pypy_c/test_string.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_string.py
@@ -91,7 +91,12 @@
i46 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr45), descr=<BoolCallDescr>)
guard_false(i46, descr=...)
p51 = new_with_vtable(21136408)
- ...
+ setfield_gc(p51, _, descr=...) # 6 setfields, but the order is dict-order-dependent
+ setfield_gc(p51, _, descr=...)
+ setfield_gc(p51, _, descr=...)
+ setfield_gc(p51, _, descr=...)
+ setfield_gc(p51, _, descr=...)
+ setfield_gc(p51, _, descr=...)
p55 = call(ConstClass(parse_digit_string), p51, descr=<GcPtrCallDescr>)
guard_no_exception(descr=...)
i57 = call(ConstClass(rbigint.toint), p55, descr=<SignedCallDescr>)
diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py
--- a/pypy/module/rctime/interp_time.py
+++ b/pypy/module/rctime/interp_time.py
@@ -207,13 +207,13 @@
t = (((c_time(lltype.nullptr(rffi.TIME_TP.TO))) / YEAR) * YEAR)
# we cannot have reference to stack variable, put it on the heap
t_ref = lltype.malloc(rffi.TIME_TP.TO, 1, flavor='raw')
- t_ref[0] = t
+ t_ref[0] = rffi.cast(rffi.TIME_T, t)
p = c_localtime(t_ref)
janzone = -p.c_tm_gmtoff
tm_zone = rffi.charp2str(p.c_tm_zone)
janname = [" ", tm_zone][bool(tm_zone)]
tt = t + YEAR / 2
- t_ref[0] = tt
+ t_ref[0] = rffi.cast(rffi.TIME_T, tt)
p = c_localtime(t_ref)
lltype.free(t_ref, flavor='raw')
tm_zone = rffi.charp2str(p.c_tm_zone)
@@ -292,11 +292,14 @@
else:
seconds = space.float_w(w_seconds)
try:
- ovfcheck_float_to_int(seconds)
+ seconds = ovfcheck_float_to_int(seconds)
+ t = rffi.r_time_t(seconds)
+ if rffi.cast(lltype.Signed, t) != seconds:
+ raise OverflowError
except OverflowError:
raise OperationError(space.w_ValueError,
space.wrap("time argument too large"))
- return rffi.r_time_t(seconds)
+ return t
def _tm_to_tuple(space, t):
time_tuple = [
@@ -317,7 +320,7 @@
def _gettmarg(space, w_tup, allowNone=True):
if allowNone and space.is_w(w_tup, space.w_None):
# default to the current local time
- tt = rffi.r_time_t(pytime.time())
+ tt = rffi.r_time_t(int(pytime.time()))
t_ref = lltype.malloc(rffi.TIME_TP.TO, 1, flavor='raw')
t_ref[0] = tt
pbuf = c_localtime(t_ref)
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py b/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py
--- a/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py
@@ -1,4 +1,4 @@
-from ctypes import CDLL, POINTER, pointer, c_byte, c_int, c_char_p
+from ctypes import CDLL, POINTER, pointer, c_byte, c_int, c_char_p, CFUNCTYPE, c_void_p, c_size_t
import sys
import py
from support import BaseCTypesTestChecker
@@ -46,6 +46,12 @@
tf_b.argtypes = (c_byte,)
assert tf_b(-126) == -42
+ def test_from_cfunctype(self):
+ from _ctypes import _memmove_addr
+ functype = CFUNCTYPE(c_void_p, c_void_p, c_void_p, c_size_t)
+ my_memmove = functype(_memmove_addr)
+ assert my_memmove._is_fastpath
+
def test_undeclared_restype(self):
# make sure we get a fresh function
try:
diff --git a/pypy/objspace/flow/operation.py b/pypy/objspace/flow/operation.py
--- a/pypy/objspace/flow/operation.py
+++ b/pypy/objspace/flow/operation.py
@@ -359,10 +359,10 @@
# All arguments are constants: call the operator now
try:
result = op(*args)
- except:
- etype, evalue, etb = sys.exc_info()
- msg = "generated by a constant operation: %s%r" % (
- name, tuple(args))
+ except Exception, e:
+ etype = e.__class__
+ msg = "generated by a constant operation: %s" % (
+ name)
raise OperationThatShouldNotBePropagatedError(
self.wrap(etype), self.wrap(msg))
else:
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -424,6 +424,14 @@
key = ("slot", SLOTS_STARTING_FROM + index)
self._get_mapdict_map().write(self, key, w_value)
+ def delslotvalue(self, index):
+ key = ("slot", SLOTS_STARTING_FROM + index)
+ new_obj = self._get_mapdict_map().delete(self, key)
+ if new_obj is None:
+ return False
+ self._become(new_obj)
+ return True
+
# used by _weakref implemenation
def getweakref(self):
diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py
--- a/pypy/objspace/std/test/test_mapdict.py
+++ b/pypy/objspace/std/test/test_mapdict.py
@@ -210,6 +210,12 @@
assert obj2.storage == [501, 601, 701, 51, 61, 71]
assert obj.map is obj2.map
+ assert obj2.getslotvalue(b) == 601
+ assert obj2.delslotvalue(b)
+ assert obj2.getslotvalue(b) is None
+ assert obj2.storage == [501, 701, 51, 61, 71]
+ assert not obj2.delslotvalue(b)
+
def test_slots_no_dict():
cls = Class(hasdict=False)
@@ -631,6 +637,14 @@
a.__dict__ = {}
a.__dict__ = {}
+ def test_delete_slot(self):
+ class A(object):
+ __slots__ = ['x']
+
+ a = A()
+ a.x = 42
+ del a.x
+ raises(AttributeError, "a.x")
class AppTestWithMapDictAndCounters(object):
def setup_class(cls):
diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py
--- a/pypy/rpython/lltypesystem/lltype.py
+++ b/pypy/rpython/lltypesystem/lltype.py
@@ -1149,7 +1149,7 @@
try:
return self._lookup_adtmeth(field_name)
except AttributeError:
- raise AttributeError("%r instance has no field %r" % (self._T,
+ raise AttributeError("%r instance has no field %r" % (self._T._name,
field_name))
def __setattr__(self, field_name, val):
diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py
--- a/pypy/rpython/lltypesystem/rlist.py
+++ b/pypy/rpython/lltypesystem/rlist.py
@@ -14,7 +14,6 @@
from pypy.rpython.lltypesystem import rstr
from pypy.rpython import robject
from pypy.rlib.debug import ll_assert
-from pypy.rlib.rarithmetic import ovfcheck
from pypy.rpython.lltypesystem import rffi
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rlib import rgc
@@ -200,12 +199,11 @@
else:
some = 6
some += newsize >> 3
- try:
- new_allocated = ovfcheck(newsize + some)
- except OverflowError:
- raise MemoryError
+ new_allocated = newsize + some
# new_allocated is a bit more than newsize, enough to ensure an amortized
- # linear complexity for e.g. repeated usage of l.append().
+ # linear complexity for e.g. repeated usage of l.append(). In case
+ # it overflows sys.maxint, it is guaranteed negative, and the following
+ # malloc() will fail.
items = l.items
newitems = malloc(typeOf(l).TO.items.TO, new_allocated)
before_len = l.length
diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py
--- a/pypy/rpython/lltypesystem/rstr.py
+++ b/pypy/rpython/lltypesystem/rstr.py
@@ -342,6 +342,8 @@
def ll_strconcat(s1, s2):
len1 = len(s1.chars)
len2 = len(s2.chars)
+ # a single '+' like this is allowed to overflow: it gets
+ # a negative result, and the gc will complain
newstr = s1.malloc(len1 + len2)
s1.copy_contents(s1, newstr, 0, 0, len1)
s1.copy_contents(s2, newstr, 0, len1, len2)
@@ -409,9 +411,18 @@
itemslen = 0
i = 0
while i < num_items:
- itemslen += len(items[i].chars)
+ try:
+ itemslen = ovfcheck(itemslen + len(items[i].chars))
+ except OverflowError:
+ raise MemoryError
i += 1
- result = s.malloc(itemslen + s_len * (num_items - 1))
+ try:
+ seplen = ovfcheck(s_len * (num_items - 1))
+ except OverflowError:
+ raise MemoryError
+ # a single '+' at the end is allowed to overflow: it gets
+ # a negative result, and the gc will complain
+ result = s.malloc(itemslen + seplen)
res_index = len(items[0].chars)
s.copy_contents(items[0], result, 0, 0, res_index)
i = 1
@@ -691,7 +702,10 @@
itemslen = 0
i = 0
while i < num_items:
- itemslen += len(items[i].chars)
+ try:
+ itemslen = ovfcheck(itemslen + len(items[i].chars))
+ except OverflowError:
+ raise MemoryError
i += 1
if typeOf(items).TO.OF.TO == STR:
malloc = mallocstr
diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py
--- a/pypy/rpython/memory/gc/minimark.py
+++ b/pypy/rpython/memory/gc/minimark.py
@@ -517,17 +517,19 @@
# constant-folded because self.nonlarge_max, size and itemsize
# are all constants (the arguments are constant due to
# inlining).
- if not raw_malloc_usage(itemsize):
- too_many_items = raw_malloc_usage(nonvarsize) > self.nonlarge_max
+ maxsize = self.nonlarge_max - raw_malloc_usage(nonvarsize)
+ if maxsize < 0:
+ toobig = r_uint(0) # the nonvarsize alone is too big
+ elif raw_malloc_usage(itemsize):
+ toobig = r_uint(maxsize // raw_malloc_usage(itemsize)) + 1
else:
- maxlength = self.nonlarge_max - raw_malloc_usage(nonvarsize)
- maxlength = maxlength // raw_malloc_usage(itemsize)
- too_many_items = length > maxlength
+ toobig = r_uint(sys.maxint) + 1
- if too_many_items:
+ if r_uint(length) >= r_uint(toobig):
#
# If the total size of the object would be larger than
- # 'nonlarge_max', then allocate it externally.
+ # 'nonlarge_max', then allocate it externally. We also
+ # go there if 'length' is actually negative.
obj = self.external_malloc(typeid, length)
#
else:
@@ -610,13 +612,18 @@
# this includes the case of fixed-size objects, for which we
# should not even ask for the varsize_item_sizes().
totalsize = nonvarsize
- else:
+ elif length > 0:
+ # var-sized allocation with at least one item
itemsize = self.varsize_item_sizes(typeid)
try:
varsize = ovfcheck(itemsize * length)
totalsize = ovfcheck(nonvarsize + varsize)
except OverflowError:
raise MemoryError
+ else:
+ # negative length! This likely comes from an overflow
+ # earlier. We will just raise MemoryError here.
+ raise MemoryError
#
# If somebody calls this function a lot, we must eventually
# force a full collection.
diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py
new file mode 100644
--- /dev/null
+++ b/pypy/tool/gdb_pypy.py
@@ -0,0 +1,125 @@
+"""
+Some convenience macros for gdb. If you have pypy in your path, you can simply do:
+
+(gdb) python import pypy.tool.gdb_pypy
+
+Or, alternatively:
+
+(gdb) python execfile('/path/to/gdb_pypy.py')
+"""
+
+from __future__ import with_statement
+
+import sys
+import os.path
+
+try:
+ # when running inside gdb
+ from gdb import Command
+except ImportError:
+ # whenn running outside gdb: mock class for testing
+ class Command(object):
+ def __init__(self, name, command_class):
+ pass
+
+
+def find_field_with_suffix(val, suffix):
+ """
+ Return ``val[field]``, where ``field`` is the only one whose name ends
+ with ``suffix``. If there is no such field, or more than one, raise KeyError.
+ """
+ names = []
+ for field in val.type.fields():
+ if field.name.endswith(suffix):
+ names.append(field.name)
+ #
+ if len(names) == 1:
+ return val[names[0]]
+ elif len(names) == 0:
+ raise KeyError, "cannot find field *%s" % suffix
+ else:
+ raise KeyError, "too many matching fields: %s" % ', '.join(names)
+
+def lookup(val, suffix):
+ """
+ Lookup a field which ends with ``suffix`` following the rpython struct
+ inheritance hierarchy (i.e., looking both at ``val`` and
+ ``val['*_super']``, recursively.
+ """
+ try:
+ return find_field_with_suffix(val, suffix)
+ except KeyError:
+ baseobj = find_field_with_suffix(val, '_super')
+ return lookup(baseobj, suffix)
+
+
+class RPyType(Command):
+ """
+ Prints the RPython type of the expression (remember to dereference it!)
+ It assumes to find ``typeids.txt`` in the current directory.
+ E.g.:
+
+ (gdb) rpy_type *l_v123
+ GcStruct pypy.foo.Bar { super, inst_xxx, inst_yyy }
+ """
+
+ prog2typeids = {}
+
+ def __init__(self, gdb=None):
+ # dependency injection, for tests
+ if gdb is None:
+ import gdb
+ self.gdb = gdb
+ Command.__init__(self, "rpy_type", self.gdb.COMMAND_NONE)
+
+ def invoke(self, arg, from_tty):
+ # some magic code to automatically reload the python file while developing
+ ## from pypy.tool import gdb_pypy
+ ## reload(gdb_pypy)
+ ## gdb_pypy.RPyType.prog2typeids = self.prog2typeids # persist the cache
+ ## self.__class__ = gdb_pypy.RPyType
+ print self.do_invoke(arg, from_tty)
+
+ def do_invoke(self, arg, from_tty):
+ obj = self.gdb.parse_and_eval(arg)
+ hdr = lookup(obj, '_gcheader')
+ tid = hdr['h_tid']
+ offset = tid & 0xFFFFFFFF # 64bit only
+ offset = int(offset) # convert from gdb.Value to python int
+ typeids = self.get_typeids()
+ if offset in typeids:
+ return typeids[offset]
+ else:
+ return 'Cannot find the type with offset %d' % offset
+
+ def get_typeids(self):
+ progspace = self.gdb.current_progspace()
+ try:
+ return self.prog2typeids[progspace]
+ except KeyError:
+ typeids = self.load_typeids(progspace)
+ self.prog2typeids[progspace] = typeids
+ return typeids
+
+ def load_typeids(self, progspace):
+ """
+ Returns a mapping offset --> description
+ """
+ exename = progspace.filename
+ root = os.path.dirname(exename)
+ typeids_txt = os.path.join(root, 'typeids.txt')
+ print 'loading', typeids_txt
+ typeids = {}
+ with open(typeids_txt) as f:
+ for line in f:
+ member, descr = map(str.strip, line.split(None, 1))
+ expr = "((char*)(&pypy_g_typeinfo.%s)) - (char*)&pypy_g_typeinfo" % member
+ offset = int(self.gdb.parse_and_eval(expr))
+ typeids[offset] = descr
+ return typeids
+
+try:
+ import gdb
+ RPyType() # side effects
+except ImportError:
+ pass
diff --git a/pypy/tool/logparser.py b/pypy/tool/logparser.py
--- a/pypy/tool/logparser.py
+++ b/pypy/tool/logparser.py
@@ -4,7 +4,8 @@
python logparser.py <action> <logfilename> <output> <options...>
Actions:
- draw-time draw a timeline image of the log (format PNG by default)
+ draw-time draw a timeline image of the log (format PNG by default)
+ print-summary print a summary of the log
"""
import autopath
import sys, re
@@ -383,6 +384,23 @@
else:
image.save(output)
+def print_summary(log, out):
+ totaltimes = gettotaltimes(log)
+ if out == '-':
+ outfile = sys.stdout
+ else:
+ outfile = open(out, "w")
+ l = totaltimes.items()
+ l.sort(cmp=lambda a, b: cmp(b[1], a[1]))
+ total = sum([b for a, b in l])
+ for a, b in l:
+ if a is None:
+ a = 'interpret'
+ s = " " * (50 - len(a))
+ print >>outfile, a, s, str(b*100/total) + "%"
+ if out != '-':
+ outfile.close()
+
# ____________________________________________________________
@@ -391,6 +409,7 @@
'mainwidth=', 'mainheight=',
'summarywidth=', 'summarybarheight=',
]),
+ 'print-summary': (print_summary, []),
}
if __name__ == '__main__':
diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py
--- a/pypy/tool/release/force-builds.py
+++ b/pypy/tool/release/force-builds.py
@@ -22,7 +22,7 @@
# 'own-macosx-x86-32',
# 'pypy-c-app-level-linux-x86-32',
# 'pypy-c-app-level-linux-x86-64',
- 'pypy-c-stackless-app-level-linux-x86-32',
+# 'pypy-c-stackless-app-level-linux-x86-32',
'pypy-c-app-level-win-x86-32',
'pypy-c-jit-linux-x86-32',
'pypy-c-jit-linux-x86-64',
diff --git a/pypy/tool/test/test_gdb_pypy.py b/pypy/tool/test/test_gdb_pypy.py
new file mode 100644
--- /dev/null
+++ b/pypy/tool/test/test_gdb_pypy.py
@@ -0,0 +1,105 @@
+import py
+from pypy.tool import gdb_pypy
+
+class FakeGdb(object):
+
+ COMMAND_NONE = -1
+
+ def __init__(self, exprs, progspace=None):
+ self.exprs = exprs
+ self.progspace = progspace
+
+ def parse_and_eval(self, expr):
+ return self.exprs[expr]
+
+ def current_progspace(self):
+ return self.progspace
+
+
+class Mock(object):
+ def __init__(self, **attrs):
+ self.__dict__.update(attrs)
+
+class Field(Mock):
+ pass
+
+class Struct(object):
+ def __init__(self, fieldnames):
+ self._fields = [Field(name=name) for name in fieldnames]
+
+ def fields(self):
+ return self._fields[:]
+
+class Value(dict):
+ def __init__(self, *args, **kwds):
+ dict.__init__(self, *args, **kwds)
+ self.type = Struct(self.keys())
+ for key, val in self.iteritems():
+ if isinstance(val, dict):
+ self[key] = Value(val)
+
+def test_mock_objects():
+ d = {'a': 1,
+ 'b': 2,
+ 'super': {
+ 'c': 3,
+ }
+ }
+ val = Value(d)
+ assert val['a'] == 1
+ assert val['b'] == 2
+ assert isinstance(val['super'], Value)
+ assert val['super']['c'] == 3
+ fields = val.type.fields()
+ names = [f.name for f in fields]
+ assert sorted(names) == ['a', 'b', 'super']
+
+def test_find_field_with_suffix():
+ obj = Value(x_foo = 1,
+ y_bar = 2,
+ z_foobar = 3)
+ assert gdb_pypy.find_field_with_suffix(obj, 'foo') == 1
+ assert gdb_pypy.find_field_with_suffix(obj, 'foobar') == 3
+ py.test.raises(KeyError, "gdb_pypy.find_field_with_suffix(obj, 'bar')")
+ py.test.raises(KeyError, "gdb_pypy.find_field_with_suffix(obj, 'xxx')")
+
+def test_lookup():
+ d = {'r_super': {
+ '_gcheader': {
+ 'h_tid': 123,
+ }
+ },
+ 'r_foo': 42,
+ }
+ obj = Value(d)
+ assert gdb_pypy.lookup(obj, 'foo') == 42
+ hdr = gdb_pypy.lookup(obj, 'gcheader')
+ assert hdr['h_tid'] == 123
+
+def test_RPyType(tmpdir):
+ exe = tmpdir.join('pypy-c')
+ typeids = tmpdir.join('typeids.txt')
+ typeids.write("""
+member0 GcStruct xxx {}
+member1 GcStruct yyy {}
+member2 GcStruct zzz {}
+""".strip())
+ #
+ progspace = Mock(filename=str(exe))
+ d = {'r_super': {
+ '_gcheader': {
+ 'h_tid': 123,
+ }
+ },
+ 'r_foo': 42,
+ }
+ myvar = Value(d)
+ exprs = {
+ '*myvar': myvar,
+ '((char*)(&pypy_g_typeinfo.member0)) - (char*)&pypy_g_typeinfo': 0,
+ '((char*)(&pypy_g_typeinfo.member1)) - (char*)&pypy_g_typeinfo': 123,
+ '((char*)(&pypy_g_typeinfo.member2)) - (char*)&pypy_g_typeinfo': 456,
+ }
+ gdb = FakeGdb(exprs, progspace)
+ cmd = gdb_pypy.RPyType(gdb)
+ assert cmd.do_invoke('*myvar', True) == 'GcStruct yyy {}'
More information about the pypy-commit
mailing list