[pypy-commit] pypy ootype-rerased: Merge default.
ademan
noreply at buildbot.pypy.org
Sat Jul 23 10:11:29 CEST 2011
Author: Daniel Roberts <Ademan555 at gmail.com>
Branch: ootype-rerased
Changeset: r45893:4973b936e5da
Date: 2011-07-23 01:09 -0700
http://bitbucket.org/pypy/pypy/changeset/4973b936e5da/
Log: Merge default.
diff --git a/lib-python/modified-2.7/distutils/sysconfig_pypy.py b/lib-python/modified-2.7/distutils/sysconfig_pypy.py
--- a/lib-python/modified-2.7/distutils/sysconfig_pypy.py
+++ b/lib-python/modified-2.7/distutils/sysconfig_pypy.py
@@ -116,6 +116,12 @@
if compiler.compiler_type == "unix":
compiler.compiler_so.extend(['-fPIC', '-Wimplicit'])
compiler.shared_lib_extension = get_config_var('SO')
+ if "CFLAGS" in os.environ:
+ cflags = os.environ["CFLAGS"]
+ compiler.compiler.append(cflags)
+ compiler.compiler_so.append(cflags)
+ compiler.linker_so.append(cflags)
+
from sysconfig_cpython import (
parse_makefile, _variable_rx, expand_makefile_vars)
diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/modified-2.7/test/test_tarfile.py
copy from lib-python/2.7/test/test_tarfile.py
copy to lib-python/modified-2.7/test/test_tarfile.py
--- a/lib-python/2.7/test/test_tarfile.py
+++ b/lib-python/modified-2.7/test/test_tarfile.py
@@ -169,6 +169,7 @@
except tarfile.ReadError:
self.fail("tarfile.open() failed on empty archive")
self.assertListEqual(tar.getmembers(), [])
+ tar.close()
def test_null_tarfile(self):
# Test for issue6123: Allow opening empty archives.
@@ -207,16 +208,21 @@
fobj = open(self.tarname, "rb")
tar = tarfile.open(fileobj=fobj, mode=self.mode)
self.assertEqual(tar.name, os.path.abspath(fobj.name))
+ tar.close()
def test_no_name_attribute(self):
- data = open(self.tarname, "rb").read()
+ f = open(self.tarname, "rb")
+ data = f.read()
+ f.close()
fobj = StringIO.StringIO(data)
self.assertRaises(AttributeError, getattr, fobj, "name")
tar = tarfile.open(fileobj=fobj, mode=self.mode)
self.assertEqual(tar.name, None)
def test_empty_name_attribute(self):
- data = open(self.tarname, "rb").read()
+ f = open(self.tarname, "rb")
+ data = f.read()
+ f.close()
fobj = StringIO.StringIO(data)
fobj.name = ""
tar = tarfile.open(fileobj=fobj, mode=self.mode)
@@ -515,6 +521,7 @@
self.tar = tarfile.open(self.tarname, mode=self.mode, encoding="iso8859-1")
tarinfo = self.tar.getmember("pax/umlauts-�������")
self._test_member(tarinfo, size=7011, chksum=md5_regtype)
+ self.tar.close()
class LongnameTest(ReadTest):
@@ -675,6 +682,7 @@
tar = tarfile.open(tmpname, self.mode)
tarinfo = tar.gettarinfo(path)
self.assertEqual(tarinfo.size, 0)
+ tar.close()
finally:
os.rmdir(path)
@@ -692,6 +700,7 @@
tar.gettarinfo(target)
tarinfo = tar.gettarinfo(link)
self.assertEqual(tarinfo.size, 0)
+ tar.close()
finally:
os.remove(target)
os.remove(link)
@@ -704,6 +713,7 @@
tar = tarfile.open(tmpname, self.mode)
tarinfo = tar.gettarinfo(path)
self.assertEqual(tarinfo.size, 0)
+ tar.close()
finally:
os.remove(path)
@@ -722,6 +732,7 @@
tar.add(dstname)
os.chdir(cwd)
self.assertTrue(tar.getnames() == [], "added the archive to itself")
+ tar.close()
def test_exclude(self):
tempdir = os.path.join(TEMPDIR, "exclude")
@@ -742,6 +753,7 @@
tar = tarfile.open(tmpname, "r")
self.assertEqual(len(tar.getmembers()), 1)
self.assertEqual(tar.getnames()[0], "empty_dir")
+ tar.close()
finally:
shutil.rmtree(tempdir)
@@ -859,7 +871,9 @@
fobj.close()
elif self.mode.endswith("bz2"):
dec = bz2.BZ2Decompressor()
- data = open(tmpname, "rb").read()
+ f = open(tmpname, "rb")
+ data = f.read()
+ f.close()
data = dec.decompress(data)
self.assertTrue(len(dec.unused_data) == 0,
"found trailing data")
@@ -938,6 +952,7 @@
"unable to read longname member")
self.assertEqual(tarinfo.linkname, member.linkname,
"unable to read longname member")
+ tar.close()
def test_longname_1023(self):
self._test(("longnam/" * 127) + "longnam")
@@ -1030,6 +1045,7 @@
else:
n = tar.getmembers()[0].name
self.assertTrue(name == n, "PAX longname creation failed")
+ tar.close()
def test_pax_global_header(self):
pax_headers = {
@@ -1058,6 +1074,7 @@
tarfile.PAX_NUMBER_FIELDS[key](val)
except (TypeError, ValueError):
self.fail("unable to convert pax header field")
+ tar.close()
def test_pax_extended_header(self):
# The fields from the pax header have priority over the
@@ -1077,6 +1094,7 @@
self.assertEqual(t.pax_headers, pax_headers)
self.assertEqual(t.name, "foo")
self.assertEqual(t.uid, 123)
+ tar.close()
class UstarUnicodeTest(unittest.TestCase):
@@ -1120,6 +1138,7 @@
tarinfo.name = "foo"
tarinfo.uname = u"���"
self.assertRaises(UnicodeError, tar.addfile, tarinfo)
+ tar.close()
def test_unicode_argument(self):
tar = tarfile.open(tarname, "r", encoding="iso8859-1", errors="strict")
@@ -1174,6 +1193,7 @@
tar = tarfile.open(tmpname, format=self.format, encoding="ascii",
errors=handler)
self.assertEqual(tar.getnames()[0], name)
+ tar.close()
self.assertRaises(UnicodeError, tarfile.open, tmpname,
encoding="ascii", errors="strict")
@@ -1186,6 +1206,7 @@
tar = tarfile.open(tmpname, format=self.format, encoding="iso8859-1",
errors="utf-8")
self.assertEqual(tar.getnames()[0], "���/" + u"�".encode("utf8"))
+ tar.close()
class AppendTest(unittest.TestCase):
@@ -1213,6 +1234,7 @@
def _test(self, names=["bar"], fileobj=None):
tar = tarfile.open(self.tarname, fileobj=fileobj)
self.assertEqual(tar.getnames(), names)
+ tar.close()
def test_non_existing(self):
self._add_testfile()
@@ -1231,7 +1253,9 @@
def test_fileobj(self):
self._create_testtar()
- data = open(self.tarname).read()
+ f = open(self.tarname)
+ data = f.read()
+ f.close()
fobj = StringIO.StringIO(data)
self._add_testfile(fobj)
fobj.seek(0)
@@ -1257,7 +1281,9 @@
# Append mode is supposed to fail if the tarfile to append to
# does not end with a zero block.
def _test_error(self, data):
- open(self.tarname, "wb").write(data)
+ f = open(self.tarname, "wb")
+ f.write(data)
+ f.close()
self.assertRaises(tarfile.ReadError, self._add_testfile)
def test_null(self):
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -327,6 +327,9 @@
BoolOption("mutable_builtintypes",
"Allow the changing of builtin types", default=False,
requires=[("objspace.std.builtinshortcut", True)]),
+ BoolOption("withidentitydict",
+ "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not",
+ default=True),
]),
])
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -929,6 +929,19 @@
located in the ``py/bin/`` directory. For switches to
modify test execution pass the ``-h`` option.
+Coverage reports
+----------------
+
+In order to get coverage reports the `pytest-cov`_ plugin is included.
+it adds some extra requirements ( coverage_ and `cov-core`_ )
+and can once they are installed coverage testing can be invoked via::
+
+ python test_all.py --cov file_or_direcory_to_cover file_or_directory
+
+.. _`pytest-cov`: http://pypi.python.org/pypi/pytest-cov
+.. _`coverage`: http://pypi.python.org/pypi/coverage
+.. _`cov-core`: http://pypi.python.org/pypi/cov-core
+
Test conventions
----------------
diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt
new file mode 100644
--- /dev/null
+++ b/pypy/doc/config/objspace.std.withidentitydict.txt
@@ -0,0 +1,21 @@
+=============================
+objspace.std.withidentitydict
+=============================
+
+* **name:** withidentitydict
+
+* **description:** enable a dictionary strategy for "by identity" comparisons
+
+* **command-line:** --objspace-std-withidentitydict
+
+* **command-line for negation:** --no-objspace-std-withidentitydict
+
+* **option type:** boolean option
+
+* **default:** True
+
+
+Enable a dictionary strategy specialized for instances of classes which
+compares "by identity", which is the default unless you override ``__hash__``,
+``__eq__`` or ``__cmp__``. This strategy will be used only with new-style
+classes.
diff --git a/pypy/doc/config/translation.dont_write_c_files.txt b/pypy/doc/config/translation.dont_write_c_files.txt
new file mode 100644
--- /dev/null
+++ b/pypy/doc/config/translation.dont_write_c_files.txt
@@ -0,0 +1,4 @@
+write the generated C files to ``/dev/null`` instead of to the disk. Useful if
+you want to use translate.py as a benchmark and don't want to access the disk.
+
+.. _`translation documentation`: ../translation.html
diff --git a/pypy/doc/config/translation.gc.txt b/pypy/doc/config/translation.gc.txt
--- a/pypy/doc/config/translation.gc.txt
+++ b/pypy/doc/config/translation.gc.txt
@@ -1,4 +1,6 @@
-Choose the Garbage Collector used by the translated program:
+Choose the Garbage Collector used by the translated program.
+The good performing collectors are "hybrid" and "minimark".
+The default is "minimark".
- "ref": reference counting. Takes very long to translate and the result is
slow.
@@ -11,3 +13,12 @@
older generation.
- "boehm": use the Boehm conservative GC.
+
+ - "hybrid": a hybrid collector of "generation" together with a
+ mark-n-sweep old space
+
+ - "markcompact": a slow, but memory-efficient collector,
+ influenced e.g. by Smalltalk systems.
+
+ - "minimark": a generational mark-n-sweep collector with good
+ performance. Includes page marking for large arrays.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -211,6 +211,38 @@
>>>> print d1['a']
42
+Mutating classes of objects which are already used as dictionary keys
+---------------------------------------------------------------------
+
+Consider the following snippet of code::
+
+ class X(object):
+ pass
+
+ def __evil_eq__(self, other):
+ print 'hello world'
+ return False
+
+ def evil(y):
+ d = {x(): 1}
+ X.__eq__ = __evil_eq__
+ d[y] # might trigger a call to __eq__?
+
+In CPython, __evil_eq__ **might** be called, although there is no way to write
+a test which reliably calls it. It happens if ``y is not x`` and ``hash(y) ==
+hash(x)``, where ``hash(x)`` is computed when ``x`` is inserted into the
+dictionary. If **by chance** the condition is satisfied, then ``__evil_eq__``
+is called.
+
+PyPy uses a special strategy to optimize dictionaries whose keys are instances
+of user-defined classes which do not override the default ``__hash__``,
+``__eq__`` and ``__cmp__``: when using this strategy, ``__eq__`` and
+``__cmp__`` are never called, but instead the lookup is done by identity, so
+in the case above it is guaranteed that ``__eq__`` won't be called.
+
+Note that in all other cases (e.g., if you have a custom ``__hash__`` and
+``__eq__`` in ``y``) the behavior is exactly the same as CPython.
+
Ignored exceptions
-----------------------
diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -32,6 +32,24 @@
modules that relies on third-party libraries. See below how to get
and build them.
+Preping Windows for the Large Build
+-----------------------------------
+
+Normally 32bit programs are limited to 2GB of memory on Windows. It is
+possible to raise this limit, to 3GB on Windows 32bit, and almost 4GB
+on Windows 64bit.
+
+On Windows 32bit, it is necessary to modify the system: follow
+http://usa.autodesk.com/adsk/servlet/ps/dl/item?siteID=123112&id=9583842&linkID=9240617
+to enable the "3GB" feature, and reboot. This step is not necessary on
+Windows 64bit.
+
+Then you need to execute::
+
+ editbin /largeaddressaware pypy.exe
+
+on the pypy.exe file you compiled.
+
Installing external packages
----------------------------
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -31,7 +31,8 @@
_immutable_fields_ = ['code?',
'w_func_globals?',
'closure?',
- 'defs_w?[*]']
+ 'defs_w?[*]',
+ 'name?']
def __init__(self, space, code, w_globals=None, defs_w=[], closure=None,
forcename=None):
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -2820,11 +2820,11 @@
def test_residual_call_invalidate_some_arrays(self):
ops = """
[p1, p2, i1]
- p3 = getarrayitem_gc(p1, 0, descr=arraydescr2)
+ p3 = getarrayitem_gc(p2, 0, descr=arraydescr2)
p4 = getarrayitem_gc(p2, 1, descr=arraydescr2)
i2 = getarrayitem_gc(p1, 1, descr=arraydescr)
i3 = call(i1, descr=writearraydescr)
- p5 = getarrayitem_gc(p1, 0, descr=arraydescr2)
+ p5 = getarrayitem_gc(p2, 0, descr=arraydescr2)
p6 = getarrayitem_gc(p2, 1, descr=arraydescr2)
i4 = getarrayitem_gc(p1, 1, descr=arraydescr)
escape(p3)
@@ -2837,7 +2837,7 @@
"""
expected = """
[p1, p2, i1]
- p3 = getarrayitem_gc(p1, 0, descr=arraydescr2)
+ p3 = getarrayitem_gc(p2, 0, descr=arraydescr2)
p4 = getarrayitem_gc(p2, 1, descr=arraydescr2)
i2 = getarrayitem_gc(p1, 1, descr=arraydescr)
i3 = call(i1, descr=writearraydescr)
diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
--- a/pypy/jit/metainterp/pyjitpl.py
+++ b/pypy/jit/metainterp/pyjitpl.py
@@ -390,8 +390,21 @@
@arguments("box", "descr", "box")
def _opimpl_getarrayitem_gc_any(self, arraybox, arraydescr, indexbox):
- return self.execute_with_descr(rop.GETARRAYITEM_GC,
+ cache = self.metainterp.heap_array_cache.get(arraydescr, None)
+ if cache and isinstance(indexbox, ConstInt):
+ index = indexbox.getint()
+ frombox, tobox = cache.get(index, (None, None))
+ if frombox is arraybox:
+ return tobox
+ resbox = self.execute_with_descr(rop.GETARRAYITEM_GC,
arraydescr, arraybox, indexbox)
+ if isinstance(indexbox, ConstInt):
+ if not cache:
+ cache = self.metainterp.heap_array_cache[arraydescr] = {}
+ index = indexbox.getint()
+ cache[index] = arraybox, resbox
+ return resbox
+
opimpl_getarrayitem_gc_i = _opimpl_getarrayitem_gc_any
opimpl_getarrayitem_gc_r = _opimpl_getarrayitem_gc_any
@@ -419,6 +432,13 @@
indexbox, itembox):
self.execute_with_descr(rop.SETARRAYITEM_GC, arraydescr, arraybox,
indexbox, itembox)
+ if isinstance(indexbox, ConstInt):
+ cache = self.metainterp.heap_array_cache.setdefault(arraydescr, {})
+ cache[indexbox.getint()] = arraybox, itembox
+ else:
+ cache = self.metainterp.heap_array_cache.get(arraydescr, None)
+ if cache:
+ cache.clear()
opimpl_setarrayitem_gc_i = _opimpl_setarrayitem_gc_any
opimpl_setarrayitem_gc_r = _opimpl_setarrayitem_gc_any
@@ -454,21 +474,17 @@
def opimpl_newlist(self, structdescr, lengthdescr, itemsdescr, arraydescr,
sizebox):
sbox = self.metainterp.execute_and_record(rop.NEW, structdescr)
- self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr,
- sbox, sizebox)
+ self._opimpl_setfield_gc_any(sbox, lengthdescr, sizebox)
abox = self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr,
sizebox)
- self.metainterp.execute_and_record(rop.SETFIELD_GC, itemsdescr,
- sbox, abox)
+ self._opimpl_setfield_gc_any(sbox, itemsdescr, abox)
return sbox
@arguments("box", "descr", "descr", "box")
def _opimpl_getlistitem_gc_any(self, listbox, itemsdescr, arraydescr,
indexbox):
- arraybox = self.metainterp.execute_and_record(rop.GETFIELD_GC,
- itemsdescr, listbox)
- return self.execute_with_descr(rop.GETARRAYITEM_GC,
- arraydescr, arraybox, indexbox)
+ arraybox = self._opimpl_getfield_gc_any(listbox, itemsdescr)
+ return self._opimpl_getarrayitem_gc_any(arraybox, arraydescr, indexbox)
opimpl_getlistitem_gc_i = _opimpl_getlistitem_gc_any
opimpl_getlistitem_gc_r = _opimpl_getlistitem_gc_any
@@ -477,10 +493,9 @@
@arguments("box", "descr", "descr", "box", "box")
def _opimpl_setlistitem_gc_any(self, listbox, itemsdescr, arraydescr,
indexbox, valuebox):
- arraybox = self.metainterp.execute_and_record(rop.GETFIELD_GC,
- itemsdescr, listbox)
- self.execute_with_descr(rop.SETARRAYITEM_GC, arraydescr, arraybox,
- indexbox, valuebox)
+ arraybox = self._opimpl_getfield_gc_any(listbox, itemsdescr)
+ self._opimpl_setarrayitem_gc_any(arraybox, arraydescr, indexbox,
+ valuebox)
opimpl_setlistitem_gc_i = _opimpl_setlistitem_gc_any
opimpl_setlistitem_gc_r = _opimpl_setlistitem_gc_any
@@ -502,18 +517,29 @@
@arguments("box", "descr")
def _opimpl_getfield_gc_any(self, box, fielddescr):
- return self.execute_with_descr(rop.GETFIELD_GC, fielddescr, box)
+ return self._opimpl_getfield_gc_any_pureornot(
+ rop.GETFIELD_GC, box, fielddescr)
opimpl_getfield_gc_i = _opimpl_getfield_gc_any
opimpl_getfield_gc_r = _opimpl_getfield_gc_any
opimpl_getfield_gc_f = _opimpl_getfield_gc_any
@arguments("box", "descr")
def _opimpl_getfield_gc_pure_any(self, box, fielddescr):
- return self.execute_with_descr(rop.GETFIELD_GC_PURE, fielddescr, box)
+ return self._opimpl_getfield_gc_any_pureornot(
+ rop.GETFIELD_GC_PURE, box, fielddescr)
opimpl_getfield_gc_i_pure = _opimpl_getfield_gc_pure_any
opimpl_getfield_gc_r_pure = _opimpl_getfield_gc_pure_any
opimpl_getfield_gc_f_pure = _opimpl_getfield_gc_pure_any
+ @specialize.arg(1)
+ def _opimpl_getfield_gc_any_pureornot(self, opnum, box, fielddescr):
+ frombox, tobox = self.metainterp.heap_cache.get(fielddescr, (None, None))
+ if frombox is box:
+ return tobox
+ resbox = self.execute_with_descr(opnum, fielddescr, box)
+ self.metainterp.heap_cache[fielddescr] = (box, resbox)
+ return resbox
+
@arguments("orgpc", "box", "descr")
def _opimpl_getfield_gc_greenfield_any(self, pc, box, fielddescr):
ginfo = self.metainterp.jitdriver_sd.greenfield_info
@@ -532,7 +558,11 @@
@arguments("box", "descr", "box")
def _opimpl_setfield_gc_any(self, box, fielddescr, valuebox):
+ frombox, tobox = self.metainterp.heap_cache.get(fielddescr, (None, None))
+ if frombox is box and tobox is valuebox:
+ return
self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox)
+ self.metainterp.heap_cache[fielddescr] = (box, valuebox)
opimpl_setfield_gc_i = _opimpl_setfield_gc_any
opimpl_setfield_gc_r = _opimpl_setfield_gc_any
opimpl_setfield_gc_f = _opimpl_setfield_gc_any
@@ -617,7 +647,7 @@
@arguments("orgpc", "box", "descr")
def _opimpl_getfield_vable(self, pc, box, fielddescr):
if self._nonstandard_virtualizable(pc, box):
- return self.execute_with_descr(rop.GETFIELD_GC, fielddescr, box)
+ return self._opimpl_getfield_gc_any(box, fielddescr)
self.metainterp.check_synchronized_virtualizable()
index = self._get_virtualizable_field_index(fielddescr)
return self.metainterp.virtualizable_boxes[index]
@@ -629,8 +659,7 @@
@arguments("orgpc", "box", "descr", "box")
def _opimpl_setfield_vable(self, pc, box, fielddescr, valuebox):
if self._nonstandard_virtualizable(pc, box):
- self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox)
- return
+ return self._opimpl_setfield_gc_any(box, fielddescr, valuebox)
index = self._get_virtualizable_field_index(fielddescr)
self.metainterp.virtualizable_boxes[index] = valuebox
self.metainterp.synchronize_virtualizable()
@@ -660,10 +689,8 @@
@arguments("orgpc", "box", "descr", "descr", "box")
def _opimpl_getarrayitem_vable(self, pc, box, fdescr, adescr, indexbox):
if self._nonstandard_virtualizable(pc, box):
- arraybox = self.metainterp.execute_and_record(rop.GETFIELD_GC,
- fdescr, box)
- return self.execute_with_descr(rop.GETARRAYITEM_GC, adescr,
- arraybox, indexbox)
+ arraybox = self._opimpl_getfield_gc_any(box, fdescr)
+ return self._opimpl_getarrayitem_gc_any(arraybox, adescr, indexbox)
self.metainterp.check_synchronized_virtualizable()
index = self._get_arrayitem_vable_index(pc, fdescr, indexbox)
return self.metainterp.virtualizable_boxes[index]
@@ -676,10 +703,9 @@
def _opimpl_setarrayitem_vable(self, pc, box, fdescr, adescr, indexbox,
valuebox):
if self._nonstandard_virtualizable(pc, box):
- arraybox = self.metainterp.execute_and_record(rop.GETFIELD_GC,
- fdescr, box)
- self.execute_with_descr(rop.SETARRAYITEM_GC, adescr,
- arraybox, indexbox, valuebox)
+ arraybox = self._opimpl_getfield_gc_any(box, fdescr)
+ self._opimpl_setarrayitem_gc_any(arraybox, adescr,
+ indexbox, valuebox)
return
index = self._get_arrayitem_vable_index(pc, fdescr, indexbox)
self.metainterp.virtualizable_boxes[index] = valuebox
@@ -693,8 +719,7 @@
@arguments("orgpc", "box", "descr", "descr")
def opimpl_arraylen_vable(self, pc, box, fdescr, adescr):
if self._nonstandard_virtualizable(pc, box):
- arraybox = self.metainterp.execute_and_record(rop.GETFIELD_GC,
- fdescr, box)
+ arraybox = self._opimpl_getfield_gc_any(box, fdescr)
return self.execute_with_descr(rop.ARRAYLEN_GC, adescr, arraybox)
vinfo = self.metainterp.jitdriver_sd.virtualizable_info
virtualizable_box = self.metainterp.virtualizable_boxes[-1]
@@ -1462,6 +1487,12 @@
self.known_class_boxes = {}
# contains frame boxes that are not virtualizables
self.nonstandard_virtualizables = {}
+ # heap cache
+ # maps descrs to (from_box, to_box) tuples
+ self.heap_cache = {}
+ # heap array cache
+ # maps descrs to {index: (from_box, to_box)} dicts
+ self.heap_array_cache = {}
def perform_call(self, jitcode, boxes, greenkey=None):
# causes the metainterp to enter the given subfunction
@@ -1637,10 +1668,27 @@
# record the operation
profiler = self.staticdata.profiler
profiler.count_ops(opnum, RECORDED_OPS)
+ self._invalidate_caches(opnum, descr)
op = self.history.record(opnum, argboxes, resbox, descr)
self.attach_debug_info(op)
return resbox
+ def _invalidate_caches(self, opnum, descr):
+ if opnum == rop.SETFIELD_GC:
+ return
+ if opnum == rop.SETARRAYITEM_GC:
+ return
+ if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST:
+ return
+ if opnum == rop.CALL:
+ effectinfo = descr.get_extra_info()
+ if effectinfo.extraeffect == effectinfo.EF_ELIDABLE:
+ return
+ if self.heap_cache:
+ self.heap_cache.clear()
+ if self.heap_array_cache:
+ self.heap_array_cache.clear()
+
def attach_debug_info(self, op):
if (not we_are_translated() and op is not None
and getattr(self, 'framestack', None)):
@@ -1804,6 +1852,8 @@
def reached_loop_header(self, greenboxes, redboxes, resumedescr):
self.known_class_boxes = {}
self.nonstandard_virtualizables = {} # XXX maybe not needed?
+ self.heap_cache = {}
+ self.heap_array_cache = {}
duplicates = {}
self.remove_consts_and_duplicates(redboxes, len(redboxes),
@@ -2311,6 +2361,16 @@
for i in range(len(boxes)):
if boxes[i] is oldbox:
boxes[i] = newbox
+ for descr, (frombox, tobox) in self.heap_cache.iteritems():
+ change = False
+ if frombox is oldbox:
+ change = True
+ frombox = newbox
+ if tobox is oldbox:
+ change = True
+ tobox = newbox
+ if change:
+ self.heap_cache[descr] = frombox, tobox
def find_biggest_function(self):
start_stack = []
diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py
--- a/pypy/jit/metainterp/test/test_ajit.py
+++ b/pypy/jit/metainterp/test/test_ajit.py
@@ -1024,69 +1024,6 @@
res = self.meta_interp(main, [])
assert res == 55
- def test_dont_record_repeated_guard_class(self):
- class A:
- pass
- class B(A):
- pass
- @dont_look_inside
- def extern(n):
- if n == -7:
- return None
- elif n:
- return A()
- else:
- return B()
- def fn(n):
- obj = extern(n)
- return isinstance(obj, B) + isinstance(obj, B) + isinstance(obj, B) + isinstance(obj, B)
- res = self.interp_operations(fn, [0])
- assert res == 4
- self.check_operations_history(guard_class=1, guard_nonnull=1)
- res = self.interp_operations(fn, [1])
- assert not res
-
- def test_dont_record_guard_class_after_new(self):
- class A:
- pass
- class B(A):
- pass
- def fn(n):
- if n == -7:
- obj = None
- elif n:
- obj = A()
- else:
- obj = B()
- return isinstance(obj, B) + isinstance(obj, B) + isinstance(obj, B) + isinstance(obj, B)
- res = self.interp_operations(fn, [0])
- assert res == 4
- self.check_operations_history(guard_class=0, guard_nonnull=0)
- res = self.interp_operations(fn, [1])
- assert not res
-
- def test_guard_isnull_nullifies(self):
- class A:
- pass
- a = A()
- a.x = None
- def fn(n):
- if n == -7:
- a.x = ""
- obj = a.x
- res = 0
- if not obj:
- res += 1
- if obj:
- res += 1
- if obj is None:
- res += 1
- if obj is not None:
- res += 1
- return res
- res = self.interp_operations(fn, [0])
- assert res == 2
- self.check_operations_history(guard_isnull=1)
def test_assert_isinstance(self):
class A:
@@ -1248,7 +1185,7 @@
return tup[1]
res = self.interp_operations(f, [3, 5])
assert res == 5
- self.check_operations_history(setfield_gc=2, getfield_gc_pure=1)
+ self.check_operations_history(setfield_gc=2, getfield_gc_pure=0)
def test_oosend_look_inside_only_one(self):
class A:
@@ -2649,7 +2586,23 @@
return n
res = self.meta_interp(f, [10, 1])
self.check_loops(getfield_gc=2)
+ assert res == f(10, 1)
+ def test_jit_merge_point_with_raw_pointer(self):
+ driver = JitDriver(greens = [], reds = ['n', 'x'])
+
+ TP = lltype.Array(lltype.Signed, hints={'nolength': True})
+
+ def f(n):
+ x = lltype.malloc(TP, 10, flavor='raw')
+ x[0] = 1
+ while n > 0:
+ driver.jit_merge_point(n=n, x=x)
+ n -= x[0]
+ lltype.free(x, flavor='raw')
+ return n
+
+ self.meta_interp(f, [10], repeat=3)
class TestLLtype(BaseLLtypeTests, LLJitMixin):
pass
diff --git a/pypy/jit/metainterp/test/test_immutable.py b/pypy/jit/metainterp/test/test_immutable.py
--- a/pypy/jit/metainterp/test/test_immutable.py
+++ b/pypy/jit/metainterp/test/test_immutable.py
@@ -1,5 +1,9 @@
+from pypy.rlib import jit
from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin
+ at jit.dont_look_inside
+def escape(x):
+ return x
class ImmutableFieldsTests:
@@ -11,7 +15,7 @@
self.x = x
def f(x):
- y = X(x)
+ y = escape(X(x))
return y.x + 5
res = self.interp_operations(f, [23])
assert res == 28
@@ -33,7 +37,7 @@
def f(x, y):
X(x) # force the field 'x' to be on class 'X'
- z = Y(x, y)
+ z = escape(Y(x, y))
return z.x + z.y + 5
res = self.interp_operations(f, [23, 11])
assert res == 39
@@ -42,7 +46,7 @@
def f(x, y):
# this time, the field 'x' only shows up on subclass 'Y'
- z = Y(x, y)
+ z = escape(Y(x, y))
return z.x + z.y + 5
res = self.interp_operations(f, [23, 11])
assert res == 39
@@ -58,7 +62,7 @@
def f(index):
l = [1, 2, 3, 4]
l[2] = 30
- a = X(l)
+ a = escape(X(l))
return a.y[index]
res = self.interp_operations(f, [2], listops=True)
assert res == 30
@@ -76,7 +80,7 @@
self.y = y
def f(x, index):
- y = X([x], x+1)
+ y = escape(X([x], x+1))
return y.lst[index] + y.y + 5
res = self.interp_operations(f, [23, 0], listops=True)
assert res == 23 + 24 + 5
diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/metainterp/test/test_tracingopts.py
@@ -0,0 +1,407 @@
+import py
+import sys
+from pypy.rlib import jit
+from pypy.jit.metainterp.test.support import LLJitMixin
+
+
+class TestLLtype(LLJitMixin):
+ def test_dont_record_repeated_guard_class(self):
+ class A:
+ pass
+ class B(A):
+ pass
+ @jit.dont_look_inside
+ def extern(n):
+ if n == -7:
+ return None
+ elif n:
+ return A()
+ else:
+ return B()
+ def fn(n):
+ obj = extern(n)
+ return isinstance(obj, B) + isinstance(obj, B) + isinstance(obj, B) + isinstance(obj, B)
+ res = self.interp_operations(fn, [0])
+ assert res == 4
+ self.check_operations_history(guard_class=1, guard_nonnull=1)
+ res = self.interp_operations(fn, [1])
+ assert not res
+
+ def test_dont_record_guard_class_after_new(self):
+ class A:
+ pass
+ class B(A):
+ pass
+ def fn(n):
+ if n == -7:
+ obj = None
+ elif n:
+ obj = A()
+ else:
+ obj = B()
+ return isinstance(obj, B) + isinstance(obj, B) + isinstance(obj, B) + isinstance(obj, B)
+ res = self.interp_operations(fn, [0])
+ assert res == 4
+ self.check_operations_history(guard_class=0, guard_nonnull=0)
+ res = self.interp_operations(fn, [1])
+ assert not res
+
+ def test_guard_isnull_nullifies(self):
+ class A:
+ pass
+ a = A()
+ a.x = None
+ def fn(n):
+ if n == -7:
+ a.x = ""
+ obj = a.x
+ res = 0
+ if not obj:
+ res += 1
+ if obj:
+ res += 1
+ if obj is None:
+ res += 1
+ if obj is not None:
+ res += 1
+ return res
+ res = self.interp_operations(fn, [0])
+ assert res == 2
+ self.check_operations_history(guard_isnull=1)
+
+ def test_heap_caching_while_tracing(self):
+ class A:
+ pass
+ a1 = A()
+ a2 = A()
+ def fn(n):
+ if n > 0:
+ a = a1
+ else:
+ a = a2
+ a.x = n
+ return a.x
+ res = self.interp_operations(fn, [7])
+ assert res == 7
+ self.check_operations_history(getfield_gc=0)
+ res = self.interp_operations(fn, [-7])
+ assert res == -7
+ self.check_operations_history(getfield_gc=0)
+
+ def fn(n, ca, cb):
+ a1.x = n
+ a2.x = n
+ a = a1
+ if ca:
+ a = a2
+ b = a1
+ if cb:
+ b = a
+ return a.x + b.x
+ res = self.interp_operations(fn, [7, 0, 1])
+ assert res == 7 * 2
+ self.check_operations_history(getfield_gc=1)
+ res = self.interp_operations(fn, [-7, 1, 1])
+ assert res == -7 * 2
+ self.check_operations_history(getfield_gc=1)
+
+ def test_heap_caching_while_tracing_invalidation(self):
+ class A:
+ pass
+ a1 = A()
+ a2 = A()
+ @jit.dont_look_inside
+ def f(a):
+ a.x = 5
+ l = [1]
+ def fn(n):
+ if n > 0:
+ a = a1
+ else:
+ a = a2
+ a.x = n
+ x1 = a.x
+ f(a)
+ x2 = a.x
+ l[0] = x2
+ return a.x + x1 + x2
+ res = self.interp_operations(fn, [7])
+ assert res == 5 * 2 + 7
+ self.check_operations_history(getfield_gc=1)
+
+ def test_heap_caching_dont_store_same(self):
+ class A:
+ pass
+ a1 = A()
+ a2 = A()
+ def fn(n):
+ if n > 0:
+ a = a1
+ else:
+ a = a2
+ a.x = n
+ a.x = n
+ return a.x
+ res = self.interp_operations(fn, [7])
+ assert res == 7
+ self.check_operations_history(getfield_gc=0, setfield_gc=1)
+ res = self.interp_operations(fn, [-7])
+ assert res == -7
+ self.check_operations_history(getfield_gc=0)
+
+ def test_array_caching(self):
+ a1 = [0, 0]
+ a2 = [0, 0]
+ def fn(n):
+ if n > 0:
+ a = a1
+ else:
+ a = a2
+ a[0] = n
+ x1 = a[0]
+ a[n - n] = n + 1
+ return a[0] + x1
+ res = self.interp_operations(fn, [7])
+ assert res == 7 + 7 + 1
+ self.check_operations_history(getarrayitem_gc=1)
+ res = self.interp_operations(fn, [-7])
+ assert res == -7 - 7 + 1
+ self.check_operations_history(getarrayitem_gc=1)
+
+ def fn(n, ca, cb):
+ a1[0] = n
+ a2[0] = n
+ a = a1
+ if ca:
+ a = a2
+ b = a1
+ if cb:
+ b = a
+ return a[0] + b[0]
+ res = self.interp_operations(fn, [7, 0, 1])
+ assert res == 7 * 2
+ self.check_operations_history(getarrayitem_gc=1)
+ res = self.interp_operations(fn, [-7, 1, 1])
+ assert res == -7 * 2
+ self.check_operations_history(getarrayitem_gc=1)
+
+ def test_array_caching_while_tracing_invalidation(self):
+ a1 = [0, 0]
+ a2 = [0, 0]
+ @jit.dont_look_inside
+ def f(a):
+ a[0] = 5
+ class A: pass
+ l = A()
+ def fn(n):
+ if n > 0:
+ a = a1
+ else:
+ a = a2
+ a[0] = n
+ x1 = a[0]
+ f(a)
+ x2 = a[0]
+ l.x = x2
+ return a[0] + x1 + x2
+ res = self.interp_operations(fn, [7])
+ assert res == 5 * 2 + 7
+ self.check_operations_history(getarrayitem_gc=1)
+
+ def test_array_and_getfield_interaction(self):
+ class A: pass
+ a1 = A()
+ a2 = A()
+ a1.l = a2.l = [0, 0]
+ def fn(n):
+ if n > 0:
+ a = a1
+ else:
+ a = a2
+ a.l = [0, 0]
+ a.x = 0
+ a.l[a.x] = n
+ a.x += 1
+ a.l[a.x] = n + 1
+ x1 = a.l[a.x]
+ a.x -= 1
+ x2 = a.l[a.x]
+ return x1 + x2
+ res = self.interp_operations(fn, [7])
+ assert res == 7 * 2 + 1
+ self.check_operations_history(setarrayitem_gc=2, setfield_gc=3,
+ getarrayitem_gc=0, getfield_gc=1)
+
+ def test_promote_changes_heap_cache(self):
+ class A: pass
+ a1 = A()
+ a2 = A()
+ a1.l = a2.l = [0, 0]
+ a1.x = a2.x = 0
+ def fn(n):
+ if n > 0:
+ a = a1
+ else:
+ a = a2
+ a.l = [0, 0]
+ jit.promote(a.x)
+ a.l[a.x] = n
+ a.x += 1
+ a.l[a.x] = n + 1
+ x1 = a.l[a.x]
+ a.x -= 1
+ x2 = a.l[a.x]
+ return x1 + x2
+ res = self.interp_operations(fn, [7])
+ assert res == 7 * 2 + 1
+ self.check_operations_history(setarrayitem_gc=2, setfield_gc=2,
+ getarrayitem_gc=0, getfield_gc=2)
+
+ def test_list_caching(self):
+ a1 = [0, 0]
+ a2 = [0, 0]
+ def fn(n):
+ if n > 0:
+ a = a1
+ else:
+ a = a2
+ if n < -1000:
+ a.append(5)
+ a[0] = n
+ x1 = a[0]
+ a[n - n] = n + 1
+ return a[0] + x1
+ res = self.interp_operations(fn, [7])
+ assert res == 7 + 7 + 1
+ self.check_operations_history(getarrayitem_gc=1,
+ getfield_gc=1)
+ res = self.interp_operations(fn, [-7])
+ assert res == -7 - 7 + 1
+ self.check_operations_history(getarrayitem_gc=1,
+ getfield_gc=1)
+
+ def fn(n, ca, cb):
+ a1[0] = n
+ a2[0] = n
+ a = a1
+ if ca:
+ a = a2
+ if n < -100:
+ a.append(5)
+ b = a1
+ if cb:
+ b = a
+ return a[0] + b[0]
+ res = self.interp_operations(fn, [7, 0, 1])
+ assert res == 7 * 2
+ self.check_operations_history(getarrayitem_gc=1,
+ getfield_gc=3)
+ res = self.interp_operations(fn, [-7, 1, 1])
+ assert res == -7 * 2
+ self.check_operations_history(getarrayitem_gc=1,
+ getfield_gc=3)
+
+ def test_list_caching_negative(self):
+ def fn(n):
+ a = [0] * n
+ if n > 1000:
+ a.append(0)
+ a[-1] = n
+ x1 = a[-1]
+ a[n - n - 1] = n + 1
+ return a[-1] + x1
+ res = self.interp_operations(fn, [7])
+ assert res == 7 + 7 + 1
+ self.check_operations_history(setarrayitem_gc=2,
+ setfield_gc=2)
+
+ def test_virtualizable_with_array_heap_cache(self):
+ myjitdriver = jit.JitDriver(greens = [], reds = ['n', 'x', 'i', 'frame'],
+ virtualizables = ['frame'])
+
+ class Frame(object):
+ _virtualizable2_ = ['l[*]', 's']
+
+ def __init__(self, a, s):
+ self = jit.hint(self, access_directly=True, fresh_virtualizable=True)
+ self.l = [0] * (4 + a)
+ self.s = s
+
+ def f(n, a, i):
+ frame = Frame(a, 0)
+ frame.l[0] = a
+ frame.l[1] = a + 1
+ frame.l[2] = a + 2
+ frame.l[3] = a + 3
+ if not i:
+ return frame.l[0] + len(frame.l)
+ x = 0
+ while n > 0:
+ myjitdriver.can_enter_jit(frame=frame, n=n, x=x, i=i)
+ myjitdriver.jit_merge_point(frame=frame, n=n, x=x, i=i)
+ frame.s = jit.promote(frame.s)
+ n -= 1
+ s = frame.s
+ assert s >= 0
+ x += frame.l[s]
+ frame.s += 1
+ s = frame.s
+ assert s >= 0
+ x += frame.l[s]
+ x += len(frame.l)
+ x += f(n, n, 0)
+ frame.s -= 1
+ return x
+
+ res = self.meta_interp(f, [10, 1, 1], listops=True)
+ assert res == f(10, 1, 1)
+ self.check_history(getarrayitem_gc=0, getfield_gc=0)
+
+ def test_heap_caching_pure(self):
+ class A(object):
+ pass
+ p1 = A()
+ p2 = A()
+ def fn(n):
+ if n >= 0:
+ a = (n, n + 1)
+ p = p1
+ else:
+ a = (n + 1, n)
+ p = p2
+ p.x = a
+
+ return p.x[0] + p.x[1]
+ res = self.interp_operations(fn, [7])
+ assert res == 7 + 7 + 1
+ self.check_operations_history(getfield_gc=0, getfield_gc_pure=0)
+ res = self.interp_operations(fn, [-7])
+ assert res == -7 - 7 + 1
+ self.check_operations_history(getfield_gc=0, getfield_gc_pure=0)
+
+ def test_heap_caching_and_elidable_function(self):
+ class A:
+ pass
+ class B: pass
+ a1 = A()
+ a1.y = 6
+ a2 = A()
+ a2.y = 13
+ @jit.elidable
+ def f(b):
+ return b + 1
+ def fn(n):
+ if n > 0:
+ a = a1
+ else:
+ a = A()
+ a.x = n
+ z = f(6)
+ return z + a.x
+ res = self.interp_operations(fn, [7])
+ assert res == 7 + 7
+ self.check_operations_history(getfield_gc=0)
+ res = self.interp_operations(fn, [-7])
+ assert res == -7 + 7
+ self.check_operations_history(getfield_gc=0)
+ return
diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py
--- a/pypy/jit/metainterp/test/test_virtualizable.py
+++ b/pypy/jit/metainterp/test/test_virtualizable.py
@@ -377,7 +377,7 @@
expected = f(20)
res = self.meta_interp(f, [20], enable_opts='')
assert res == expected
- self.check_loops(getfield_gc=3, setfield_gc=0,
+ self.check_loops(getfield_gc=1, setfield_gc=0,
arraylen_gc=1, getarrayitem_gc=1, setarrayitem_gc=1)
# ------------------------------
diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py
--- a/pypy/jit/metainterp/warmstate.py
+++ b/pypy/jit/metainterp/warmstate.py
@@ -138,6 +138,9 @@
refvalue = cpu.ts.cast_to_ref(value)
cpu.set_future_value_ref(j, refvalue)
elif typecode == 'int':
+ if isinstance(lltype.typeOf(value), lltype.Ptr):
+ intvalue = llmemory.AddressAsInt(llmemory.cast_ptr_to_adr(value))
+ else:
intvalue = lltype.cast_primitive(lltype.Signed, value)
cpu.set_future_value_int(j, intvalue)
elif typecode == 'float':
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -25,6 +25,7 @@
'debug_print_once' : 'interp_debug.debug_print_once',
'builtinify' : 'interp_magic.builtinify',
'lookup_special' : 'interp_magic.lookup_special',
+ 'do_what_I_mean' : 'interp_magic.do_what_I_mean',
}
submodules = {
diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -70,3 +70,6 @@
if w_descr is None:
return space.w_None
return space.get(w_descr, w_obj)
+
+def do_what_I_mean(space):
+ return space.wrap(42)
diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py
--- a/pypy/module/__pypy__/test/test_special.py
+++ b/pypy/module/__pypy__/test/test_special.py
@@ -49,3 +49,8 @@
class X:
pass
raises(TypeError, lookup_special, X(), "foo")
+
+ def test_do_what_I_mean(self):
+ from __pypy__ import do_what_I_mean
+ x = do_what_I_mean()
+ assert x == 42
diff --git a/pypy/module/_multibytecodec/c_codecs.py b/pypy/module/_multibytecodec/c_codecs.py
--- a/pypy/module/_multibytecodec/c_codecs.py
+++ b/pypy/module/_multibytecodec/c_codecs.py
@@ -55,10 +55,12 @@
"pypy_cjk_dec_init", "pypy_cjk_dec_free", "pypy_cjk_dec_chunk",
"pypy_cjk_dec_outbuf", "pypy_cjk_dec_outlen",
"pypy_cjk_dec_inbuf_remaining", "pypy_cjk_dec_inbuf_consumed",
+ "pypy_cjk_dec_replace_on_error",
"pypy_cjk_enc_init", "pypy_cjk_enc_free", "pypy_cjk_enc_chunk",
"pypy_cjk_enc_reset", "pypy_cjk_enc_outbuf", "pypy_cjk_enc_outlen",
"pypy_cjk_enc_inbuf_remaining", "pypy_cjk_enc_inbuf_consumed",
+ "pypy_cjk_enc_replace_on_error",
] + ["pypy_cjkcodec_%s" % codec for codec in codecs],
)
diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py
--- a/pypy/module/cpyext/stringobject.py
+++ b/pypy/module/cpyext/stringobject.py
@@ -268,3 +268,7 @@
if errors:
w_errors = space.wrap(rffi.charp2str(errors))
return space.call_method(w_str, 'encode', w_encoding, w_errors)
+
+ at cpython_api([PyObject, PyObject], PyObject)
+def _PyString_Join(space, w_sep, w_seq):
+ return space.call_method(w_sep, 'join', w_seq)
diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py
--- a/pypy/module/cpyext/test/test_stringobject.py
+++ b/pypy/module/cpyext/test/test_stringobject.py
@@ -287,3 +287,9 @@
def test_eq(self, space, api):
assert 1 == api._PyString_Eq(space.wrap("hello"), space.wrap("hello"))
assert 0 == api._PyString_Eq(space.wrap("hello"), space.wrap("world"))
+
+ def test_join(self, space, api):
+ w_sep = space.wrap('<sep>')
+ w_seq = space.wrap(['a', 'b'])
+ w_joined = api._PyString_Join(w_sep, w_seq)
+ assert space.unwrap(w_joined) == 'a<sep>b'
diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py
--- a/pypy/module/micronumpy/__init__.py
+++ b/pypy/module/micronumpy/__init__.py
@@ -15,14 +15,22 @@
# ufuncs
'abs': 'interp_ufuncs.absolute',
'absolute': 'interp_ufuncs.absolute',
+ 'add': 'interp_ufuncs.add',
'copysign': 'interp_ufuncs.copysign',
+ 'divide': 'interp_ufuncs.divide',
'exp': 'interp_ufuncs.exp',
+ 'fabs': 'interp_ufuncs.fabs',
'floor': 'interp_ufuncs.floor',
'maximum': 'interp_ufuncs.maximum',
'minimum': 'interp_ufuncs.minimum',
+ 'multiply': 'interp_ufuncs.multiply',
'negative': 'interp_ufuncs.negative',
'reciprocal': 'interp_ufuncs.reciprocal',
'sign': 'interp_ufuncs.sign',
+ 'subtract': 'interp_ufuncs.subtract',
+ 'sin': 'interp_ufuncs.sin',
+ 'cos': 'interp_ufuncs.cos',
+ 'tan': 'interp_ufuncs.tan',
}
appleveldefs = {
diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py
--- a/pypy/module/micronumpy/compile.py
+++ b/pypy/module/micronumpy/compile.py
@@ -3,7 +3,7 @@
It should not be imported by the module itself
"""
-from pypy.module.micronumpy.interp_numarray import FloatWrapper, SingleDimArray
+from pypy.module.micronumpy.interp_numarray import FloatWrapper, SingleDimArray, BaseArray
class BogusBytecode(Exception):
pass
@@ -18,6 +18,14 @@
def wrap(self, x):
return x
+ def issequence_w(self, w_obj):
+ # Completley wrong in the general case, but good enough for this.
+ return isinstance(w_obj, BaseArray)
+
+ def float_w(self, w_obj):
+ assert isinstance(w_obj, float)
+ return w_obj
+
def numpy_compile(bytecode, array_size):
space = TrivialSpace()
stack = []
diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py
--- a/pypy/module/micronumpy/interp_numarray.py
+++ b/pypy/module/micronumpy/interp_numarray.py
@@ -2,59 +2,36 @@
from pypy.interpreter.error import OperationError, operationerrfmt
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef, GetSetProperty
+from pypy.module.micronumpy.interp_support import Signature
+from pypy.module.micronumpy import interp_ufuncs
+from pypy.objspace.std.floatobject import float2string as float2string_orig
from pypy.rlib import jit
+from pypy.rlib.rfloat import DTSF_STR_PRECISION
from pypy.rpython.lltypesystem import lltype
from pypy.tool.sourcetools import func_with_new_name
import math
-def dummy1(v):
- assert isinstance(v, float)
- return v
-
-def dummy2(v):
- assert isinstance(v, float)
- return v
-
TP = lltype.Array(lltype.Float, hints={'nolength': True})
numpy_driver = jit.JitDriver(greens = ['signature'],
reds = ['result_size', 'i', 'self', 'result'])
all_driver = jit.JitDriver(greens=['signature'], reds=['i', 'size', 'self'])
any_driver = jit.JitDriver(greens=['signature'], reds=['i', 'size', 'self'])
+slice_driver1 = jit.JitDriver(greens=['signature'], reds=['i', 'j', 'step', 'stop', 'source', 'dest'])
+slice_driver2 = jit.JitDriver(greens=['signature'], reds=['i', 'j', 'step', 'stop', 'source', 'dest'])
-class Signature(object):
- def __init__(self):
- self.transitions = {}
-
- def transition(self, target):
- if target in self.transitions:
- return self.transitions[target]
- self.transitions[target] = new = Signature()
- return new
-
-def pos(v):
- return v
-def neg(v):
- return -v
-def absolute(v):
- return abs(v)
def add(v1, v2):
return v1 + v2
-def sub(v1, v2):
- return v1 - v2
def mul(v1, v2):
return v1 * v2
-def div(v1, v2):
- return v1 / v2
-def power(v1, v2):
- return math.pow(v1, v2)
-def mod(v1, v2):
- return math.fmod(v1, v2)
def maximum(v1, v2):
return max(v1, v2)
def minimum(v1, v2):
return min(v1, v2)
+def float2string(x):
+ return float2string_orig(x, 'g', DTSF_STR_PRECISION)
+
class BaseArray(Wrappable):
def __init__(self):
self.invalidates = []
@@ -68,67 +45,39 @@
arr.force_if_needed()
del self.invalidates[:]
- def _unop_impl(function):
- signature = Signature()
+ def _unaryop_impl(w_ufunc):
def impl(self, space):
- new_sig = self.signature.transition(signature)
- res = Call1(
- function,
- self,
- new_sig)
- self.invalidates.append(res)
- return space.wrap(res)
- return func_with_new_name(impl, "uniop_%s_impl" % function.__name__)
+ return w_ufunc(space, self)
+ return func_with_new_name(impl, "unaryop_%s_impl" % w_ufunc.__name__)
- descr_pos = _unop_impl(pos)
- descr_neg = _unop_impl(neg)
- descr_abs = _unop_impl(absolute)
+ descr_pos = _unaryop_impl(interp_ufuncs.positive)
+ descr_neg = _unaryop_impl(interp_ufuncs.negative)
+ descr_abs = _unaryop_impl(interp_ufuncs.absolute)
- def _binop_impl(function):
- signature = Signature()
+ def _binop_impl(w_ufunc):
def impl(self, space, w_other):
- w_other = convert_to_array(space, w_other)
- new_sig = self.signature.transition(signature)
- res = Call2(
- function,
- self,
- w_other,
- new_sig.transition(w_other.signature)
- )
- w_other.invalidates.append(res)
- self.invalidates.append(res)
- return space.wrap(res)
- return func_with_new_name(impl, "binop_%s_impl" % function.__name__)
+ return w_ufunc(space, self, w_other)
+ return func_with_new_name(impl, "binop_%s_impl" % w_ufunc.__name__)
- descr_add = _binop_impl(add)
- descr_sub = _binop_impl(sub)
- descr_mul = _binop_impl(mul)
- descr_div = _binop_impl(div)
- descr_pow = _binop_impl(power)
- descr_mod = _binop_impl(mod)
+ descr_add = _binop_impl(interp_ufuncs.add)
+ descr_sub = _binop_impl(interp_ufuncs.subtract)
+ descr_mul = _binop_impl(interp_ufuncs.multiply)
+ descr_div = _binop_impl(interp_ufuncs.divide)
+ descr_pow = _binop_impl(interp_ufuncs.power)
+ descr_mod = _binop_impl(interp_ufuncs.mod)
- def _binop_right_impl(function):
- signature = Signature()
+ def _binop_right_impl(w_ufunc):
def impl(self, space, w_other):
- new_sig = self.signature.transition(signature)
w_other = FloatWrapper(space.float_w(w_other))
- res = Call2(
- function,
- w_other,
- self,
- new_sig.transition(w_other.signature)
- )
- self.invalidates.append(res)
- return space.wrap(res)
- return func_with_new_name(impl,
- "binop_right_%s_impl" % function.__name__)
+ return w_ufunc(space, w_other, self)
+ return func_with_new_name(impl, "binop_right_%s_impl" % w_ufunc.__name__)
- descr_radd = _binop_right_impl(add)
- descr_rsub = _binop_right_impl(sub)
- descr_rmul = _binop_right_impl(mul)
- descr_rdiv = _binop_right_impl(div)
- descr_rpow = _binop_right_impl(power)
- descr_rmod = _binop_right_impl(mod)
+ descr_radd = _binop_right_impl(interp_ufuncs.add)
+ descr_rsub = _binop_right_impl(interp_ufuncs.subtract)
+ descr_rmul = _binop_right_impl(interp_ufuncs.multiply)
+ descr_rdiv = _binop_right_impl(interp_ufuncs.divide)
+ descr_rpow = _binop_right_impl(interp_ufuncs.power)
+ descr_rmod = _binop_right_impl(interp_ufuncs.mod)
def _reduce_sum_prod_impl(function, init):
reduce_driver = jit.JitDriver(greens=['signature'],
@@ -235,9 +184,30 @@
else:
return self.descr_mul(space, w_other)
+ def _getnums(self, comma):
+ if self.find_size() > 1000:
+ nums = [
+ float2string(self.getitem(index))
+ for index in range(3)
+ ]
+ nums.append("..." + "," * comma)
+ nums.extend([
+ float2string(self.getitem(index))
+ for index in range(self.find_size() - 3, self.find_size())
+ ])
+ else:
+ nums = [
+ float2string(self.getitem(index))
+ for index in range(self.find_size())
+ ]
+ return nums
+
def get_concrete(self):
raise NotImplementedError
+ def descr_copy(self, space):
+ return new_numarray(space, self)
+
def descr_get_shape(self, space):
return space.newtuple([self.descr_len(space)])
@@ -245,10 +215,14 @@
return self.get_concrete().descr_len(space)
def descr_repr(self, space):
- return self.get_concrete()._repr(space)
+ # Simple implementation so that we can see the array. Needs work.
+ concrete = self.get_concrete()
+ return space.wrap("array([" + ", ".join(concrete._getnums(False)) + "])")
def descr_str(self, space):
- return self.get_concrete()._str(space)
+ # Simple implementation so that we can see the array. Needs work.
+ concrete = self.get_concrete()
+ return space.wrap("[" + " ".join(concrete._getnums(True)) + "]")
def descr_getitem(self, space, w_idx):
# TODO: indexing by tuples
@@ -261,14 +235,52 @@
res = SingleDimSlice(start, stop, step, slice_length, self, self.signature.transition(SingleDimSlice.static_signature))
return space.wrap(res)
- @unwrap_spec(item=int, value=float)
- def descr_setitem(self, space, item, value):
+ def descr_setitem(self, space, w_idx, w_value):
+ # TODO: indexing by tuples and lists
self.invalidated()
- return self.get_concrete().descr_setitem(space, item, value)
+ start, stop, step, slice_length = space.decode_index4(w_idx,
+ self.find_size())
+ if step == 0:
+ # Single index
+ self.get_concrete().setitem(start, space.float_w(w_value))
+ else:
+ concrete = self.get_concrete()
+ if isinstance(w_value, BaseArray):
+ # for now we just copy if setting part of an array from
+ # part of itself. can be improved.
+ if concrete.get_root_storage() is \
+ w_value.get_concrete().get_root_storage():
+ w_value = new_numarray(space, w_value)
+ else:
+ w_value = convert_to_array(space, w_value)
+ concrete.setslice(space, start, stop, step,
+ slice_length, w_value)
def descr_mean(self, space):
return space.wrap(space.float_w(self.descr_sum(space))/self.find_size())
+ def _sliceloop1(self, start, stop, step, source, dest):
+ i = start
+ j = 0
+ while i < stop:
+ slice_driver1.jit_merge_point(signature=source.signature,
+ step=step, stop=stop, i=i, j=j, source=source,
+ dest=dest)
+ dest.storage[i] = source.eval(j)
+ j += 1
+ i += step
+
+ def _sliceloop2(self, start, stop, step, source, dest):
+ i = start
+ j = 0
+ while i > stop:
+ slice_driver2.jit_merge_point(signature=source.signature,
+ step=step, stop=stop, i=i, j=j, source=source,
+ dest=dest)
+ dest.storage[i] = source.eval(j)
+ j += 1
+ i += step
+
def convert_to_array (space, w_obj):
if isinstance(w_obj, BaseArray):
return w_obj
@@ -413,8 +425,8 @@
return self.parent.getitem(self.calc_index(item))
@unwrap_spec(item=int, value=float)
- def descr_setitem(self, space, item, value):
- return self.parent.descr_setitem(space, self.calc_index(item), value)
+ def setitem(self, item, value):
+ return self.parent.setitem(self.calc_index(item), value)
def descr_len(self, space):
return space.wrap(self.find_size())
@@ -428,37 +440,37 @@
def __init__(self, start, stop, step, slice_length, parent, signature):
ViewArray.__init__(self, parent, signature)
+ if isinstance(parent, SingleDimSlice):
+ self.start = parent.calc_index(start)
+ self.stop = parent.calc_index(stop)
+ self.step = parent.step * step
+ self.parent = parent.parent
+ else:
self.start = start
self.stop = stop
self.step = step
+ self.parent = parent
self.size = slice_length
+ def get_root_storage(self):
+ self.parent.storage
+
def find_size(self):
return self.size
+ def setslice(self, space, start, stop, step, slice_length, arr):
+ start = self.calc_index(start)
+ if stop != -1:
+ stop = self.calc_index(stop)
+ step = self.step * step
+ if step > 0:
+ self._sliceloop1(start, stop, step, arr, self.parent)
+ else:
+ self._sliceloop2(start, stop, step, arr, self.parent)
+
def calc_index(self, item):
return (self.start + item * self.step)
- def _getnums(self, comma):
- if self.find_size() > 1000:
- nums = [str(self.getitem(index)) for index \
- in range(3)]
- nums.append("..." + "," * comma)
- nums.extend([str(self.getitem(index)) for index \
- in range(self.find_size() - 3, self.find_size())])
- else:
- nums = [str(self.getitem(index)) for index \
- in range(self.find_size())]
- return nums
-
- def _repr(self, space):
- # Simple implementation so that we can see the array. Needs work.
- return space.wrap("array([" + ", ".join(self._getnums(False)) + "])")
-
- def _str(self,space):
- # Simple implementation so that we can see the array. Needs work.
- return space.wrap("[" + " ".join(self._getnums(True)) + "]")
-
class SingleDimArray(BaseArray):
signature = Signature()
@@ -473,55 +485,31 @@
def get_concrete(self):
return self
+ def get_root_storage(self):
+ return self.storage
+
def find_size(self):
return self.size
def eval(self, i):
return self.storage[i]
- def getindex(self, space, item):
- if item >= self.size:
- raise operationerrfmt(space.w_IndexError,
- '%d above array size', item)
- if item < 0:
- item += self.size
- if item < 0:
- raise operationerrfmt(space.w_IndexError,
- '%d below zero', item)
- return item
-
def descr_len(self, space):
return space.wrap(self.size)
def getitem(self, item):
return self.storage[item]
- def _getnums(self, comma):
- if self.find_size() > 1000:
- nums = [str(self.getitem(index)) for index \
- in range(3)]
- nums.append("..." + "," * comma)
- nums.extend([str(self.getitem(index)) for index \
- in range(self.find_size() - 3, self.find_size())])
- else:
- nums = [str(self.getitem(index)) for index \
- in range(self.find_size())]
- return nums
-
- def _repr(self, space):
- # Simple implementation so that we can see the array. Needs work.
- return space.wrap("array([" + ", ".join(self._getnums(False)) + "])")
-
- def _str(self,space):
- # Simple implementation so that we can see the array. Needs work.
- return space.wrap("[" + " ".join(self._getnums(True)) + "]")
-
- @unwrap_spec(item=int, value=float)
- def descr_setitem(self, space, item, value):
- item = self.getindex(space, item)
+ def setitem(self, item, value):
self.invalidated()
self.storage[item] = value
+ def setslice(self, space, start, stop, step, slice_length, arr):
+ if step > 0:
+ self._sliceloop1(start, stop, step, arr, self)
+ else:
+ self._sliceloop2(start, stop, step, arr, self)
+
def __del__(self):
lltype.free(self.storage, flavor='raw')
@@ -552,6 +540,7 @@
'numarray',
__new__ = interp2app(descr_new_numarray),
+ copy = interp2app(BaseArray.descr_copy),
shape = GetSetProperty(BaseArray.descr_get_shape),
__len__ = interp2app(BaseArray.descr_len),
diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py
--- a/pypy/module/micronumpy/interp_support.py
+++ b/pypy/module/micronumpy/interp_support.py
@@ -1,14 +1,14 @@
-
from pypy.rlib.rstruct.runpack import runpack
from pypy.rpython.lltypesystem import lltype, rffi
+from pypy.interpreter.error import OperationError
from pypy.interpreter.gateway import unwrap_spec
-from pypy.interpreter.error import OperationError
-from pypy.module.micronumpy.interp_numarray import SingleDimArray
+
FLOAT_SIZE = rffi.sizeof(lltype.Float)
@unwrap_spec(s=str)
def fromstring(space, s):
+ from pypy.module.micronumpy.interp_numarray import SingleDimArray
length = len(s)
if length % FLOAT_SIZE == 0:
@@ -30,3 +30,13 @@
end += FLOAT_SIZE
return space.wrap(a)
+
+class Signature(object):
+ def __init__(self):
+ self.transitions = {}
+
+ def transition(self, target):
+ if target in self.transitions:
+ return self.transitions[target]
+ self.transitions[target] = new = Signature()
+ return new
\ No newline at end of file
diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py
--- a/pypy/module/micronumpy/interp_ufuncs.py
+++ b/pypy/module/micronumpy/interp_ufuncs.py
@@ -1,13 +1,13 @@
import math
-from pypy.module.micronumpy.interp_numarray import (Call1, Call2, Signature,
- convert_to_array)
+from pypy.module.micronumpy.interp_support import Signature
from pypy.rlib import rfloat
from pypy.tool.sourcetools import func_with_new_name
def ufunc(func):
signature = Signature()
def impl(space, w_obj):
+ from pypy.module.micronumpy.interp_numarray import Call1, convert_to_array
if space.issequence_w(w_obj):
w_obj_arr = convert_to_array(space, w_obj)
w_res = Call1(func, w_obj_arr, w_obj_arr.signature.transition(signature))
@@ -20,6 +20,7 @@
def ufunc2(func):
signature = Signature()
def impl(space, w_lhs, w_rhs):
+ from pypy.module.micronumpy.interp_numarray import Call2, convert_to_array
if space.issequence_w(w_lhs) or space.issequence_w(w_rhs):
w_lhs_arr = convert_to_array(space, w_lhs)
w_rhs_arr = convert_to_array(space, w_rhs)
@@ -37,9 +38,17 @@
return abs(value)
@ufunc2
+def add(lvalue, rvalue):
+ return lvalue + rvalue
+
+ at ufunc2
def copysign(lvalue, rvalue):
return rfloat.copysign(lvalue, rvalue)
+ at ufunc2
+def divide(lvalue, rvalue):
+ return lvalue / rvalue
+
@ufunc
def exp(value):
try:
@@ -47,6 +56,10 @@
except OverflowError:
return rfloat.INFINITY
+ at ufunc
+def fabs(value):
+ return math.fabs(value)
+
@ufunc2
def maximum(lvalue, rvalue):
return max(lvalue, rvalue)
@@ -55,6 +68,15 @@
def minimum(lvalue, rvalue):
return min(lvalue, rvalue)
+ at ufunc2
+def multiply(lvalue, rvalue):
+ return lvalue * rvalue
+
+# Used by numarray for __pos__. Not visible from numpy application space.
+ at ufunc
+def positive(value):
+ return value
+
@ufunc
def negative(value):
return -value
@@ -65,6 +87,10 @@
return rfloat.copysign(rfloat.INFINITY, value)
return 1.0 / value
+ at ufunc2
+def subtract(lvalue, rvalue):
+ return lvalue - rvalue
+
@ufunc
def floor(value):
return math.floor(value)
@@ -74,3 +100,23 @@
if value == 0.0:
return 0.0
return rfloat.copysign(1.0, value)
+
+ at ufunc
+def sin(value):
+ return math.sin(value)
+
+ at ufunc
+def cos(value):
+ return math.cos(value)
+
+ at ufunc
+def tan(value):
+ return math.tan(value)
+
+ at ufunc2
+def power(lvalue, rvalue):
+ return math.pow(lvalue, rvalue)
+
+ at ufunc2
+def mod(lvalue, rvalue):
+ return math.fmod(lvalue, rvalue)
diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py
--- a/pypy/module/micronumpy/test/test_numarray.py
+++ b/pypy/module/micronumpy/test/test_numarray.py
@@ -38,6 +38,13 @@
a[2] = 4
assert a[2] == 4
+ def test_copy(self):
+ from numpy import array
+ a = array(range(5))
+ b = a.copy()
+ for i in xrange(5):
+ assert b[i] == a[i]
+
def test_iterator_init(self):
from numpy import array
a = array(range(5))
@@ -92,6 +99,51 @@
raises(IndexError, "a[5] = 0.0")
raises(IndexError, "a[-6] = 3.0")
+ def test_setslice_array(self):
+ from numpy import array
+ a = array(range(5))
+ b = array(range(2))
+ a[1:4:2] = b
+ assert a[1] == 0.
+ assert a[3] == 1.
+ b[::-1] = b
+ assert b[0] == 1.
+ assert b[1] == 0.
+
+ def test_setslice_of_slice_array(self):
+ from numpy import array, zeros
+ a = zeros(5)
+ a[::2] = array([9., 10., 11.])
+ assert a[0] == 9.
+ assert a[2] == 10.
+ assert a[4] == 11.
+ a[1:4:2][::-1] = array([1., 2.])
+ assert a[0] == 9.
+ assert a[1] == 2.
+ assert a[2] == 10.
+ assert a[3] == 1.
+ assert a[4] == 11.
+ a = zeros(10)
+ a[::2][::-1][::2] = array(range(1,4))
+ assert a[8] == 1.
+ assert a[4] == 2.
+ assert a[0] == 3.
+
+ def test_setslice_list(self):
+ from numpy import array
+ a = array(range(5))
+ b = [0., 1.]
+ a[1:4:2] = b
+ assert a[1] == 0.
+ assert a[3] == 1.
+
+ def test_setslice_constant(self):
+ from numpy import array
+ a = array(range(5))
+ a[1:4:2] = 0.
+ assert a[1] == 0.
+ assert a[3] == 0.
+
def test_len(self):
from numpy import array
a = array(range(5))
@@ -129,6 +181,12 @@
for i in range(5):
assert b[i] == i + 5
+ def test_radd(self):
+ from numpy import array
+ r = 3 + array(range(3))
+ for i in range(3):
+ assert r[i] == i + 3
+
def test_add_list(self):
from numpy import array
a = array(range(5))
diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
--- a/pypy/module/micronumpy/test/test_ufuncs.py
+++ b/pypy/module/micronumpy/test/test_ufuncs.py
@@ -65,6 +65,33 @@
for i in range(3):
assert b[i] == abs(a[i])
+ def test_add(self):
+ from numpy import array, add
+
+ a = array([-5.0, -0.0, 1.0])
+ b = array([ 3.0, -2.0,-3.0])
+ c = add(a, b)
+ for i in range(3):
+ assert c[i] == a[i] + b[i]
+
+ def test_divide(self):
+ from numpy import array, divide
+
+ a = array([-5.0, -0.0, 1.0])
+ b = array([ 3.0, -2.0,-3.0])
+ c = divide(a, b)
+ for i in range(3):
+ assert c[i] == a[i] / b[i]
+
+ def test_fabs(self):
+ from numpy import array, fabs
+ from math import fabs as math_fabs
+
+ a = array([-5.0, -0.0, 1.0])
+ b = fabs(a)
+ for i in range(3):
+ assert b[i] == math_fabs(a[i])
+
def test_minimum(self):
from numpy import array, minimum
@@ -83,6 +110,15 @@
for i in range(3):
assert c[i] == max(a[i], b[i])
+ def test_multiply(self):
+ from numpy import array, multiply
+
+ a = array([-5.0, -0.0, 1.0])
+ b = array([ 3.0, -2.0,-3.0])
+ c = multiply(a, b)
+ for i in range(3):
+ assert c[i] == a[i] * b[i]
+
def test_sign(self):
from numpy import array, sign
@@ -101,6 +137,15 @@
for i in range(4):
assert b[i] == reference[i]
+ def test_subtract(self):
+ from numpy import array, subtract
+
+ a = array([-5.0, -0.0, 1.0])
+ b = array([ 3.0, -2.0,-3.0])
+ c = subtract(a, b)
+ for i in range(3):
+ assert c[i] == a[i] - b[i]
+
def test_floor(self):
from numpy import array, floor
@@ -133,3 +178,30 @@
except OverflowError:
res = float('inf')
assert b[i] == res
+
+ def test_sin(self):
+ import math
+ from numpy import array, sin
+
+ a = array([0, 1, 2, 3, math.pi, math.pi*1.5, math.pi*2])
+ b = sin(a)
+ for i in range(len(a)):
+ assert b[i] == math.sin(a[i])
+
+ def test_cos(self):
+ import math
+ from numpy import array, cos
+
+ a = array([0, 1, 2, 3, math.pi, math.pi*1.5, math.pi*2])
+ b = cos(a)
+ for i in range(len(a)):
+ assert b[i] == math.cos(a[i])
+
+ def test_tan(self):
+ import math
+ from numpy import array, tan
+
+ a = array([0, 1, 2, 3, math.pi, math.pi*1.5, math.pi*2])
+ b = tan(a)
+ for i in range(len(a)):
+ assert b[i] == math.tan(a[i])
diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py
--- a/pypy/module/micronumpy/test/test_zjit.py
+++ b/pypy/module/micronumpy/test/test_zjit.py
@@ -1,10 +1,11 @@
from pypy.jit.metainterp.test.support import LLJitMixin
from pypy.rpython.test.test_llinterp import interpret
from pypy.module.micronumpy.interp_numarray import (SingleDimArray, Signature,
- FloatWrapper, Call2, SingleDimSlice, add, mul, neg, Call1)
+ FloatWrapper, Call2, SingleDimSlice, add, mul, Call1)
from pypy.module.micronumpy.interp_ufuncs import negative
from pypy.module.micronumpy.compile import numpy_compile
from pypy.rlib.objectmodel import specialize
+from pypy.rlib.nonconst import NonConstant
class FakeSpace(object):
w_ValueError = None
@@ -47,21 +48,6 @@
"int_lt": 1, "guard_true": 1, "jump": 1})
assert result == f(5)
- def test_neg(self):
- space = self.space
-
- def f(i):
- ar = SingleDimArray(i)
- v = Call1(neg, ar, Signature())
- return v.get_concrete().storage[3]
-
- result = self.meta_interp(f, [5], listops=True, backendopt=True)
- self.check_loops({"getarrayitem_raw": 1, "float_neg": 1,
- "setarrayitem_raw": 1, "int_add": 1,
- "int_lt": 1, "guard_true": 1, "jump": 1})
-
- assert result == f(5)
-
def test_sum(self):
space = self.space
@@ -104,6 +90,7 @@
"float_gt": 1, "int_add": 1,
"int_lt": 1, "guard_true": 1,
"guard_false": 1, "jump": 1})
+ assert result == f(5)
def test_min(self):
space = self.space
@@ -121,6 +108,7 @@
"float_lt": 1, "int_add": 1,
"int_lt": 1, "guard_true": 2,
"jump": 1})
+ assert result == f(5)
def test_argmin(self):
space = self.space
@@ -138,6 +126,7 @@
"float_lt": 1, "int_add": 1,
"int_lt": 1, "guard_true": 2,
"jump": 1})
+ assert result == f(5)
def test_all(self):
space = self.space
@@ -153,6 +142,7 @@
self.check_loops({"getarrayitem_raw": 2, "float_add": 1,
"int_add": 1, "float_ne": 1,
"int_lt": 1, "guard_true": 2, "jump": 1})
+ assert result == f(5)
def test_any(self):
space = self.space
@@ -165,6 +155,7 @@
self.check_loops({"getarrayitem_raw": 2, "float_add": 1,
"int_add": 1, "float_ne": 1, "guard_false": 1,
"int_lt": 1, "guard_true": 1, "jump": 1})
+ assert result == f(5)
def test_already_forecd(self):
def f(i):
@@ -248,6 +239,28 @@
'int_lt': 1, 'guard_true': 1, 'jump': 1})
assert result == f(5)
+ def test_setslice(self):
+ space = self.space
+
+ def f(i):
+ step = NonConstant(3)
+ ar = SingleDimArray(step*i)
+ ar2 = SingleDimArray(i)
+ ar2.storage[1] = 5.5
+ if NonConstant(False):
+ arg = ar2
+ else:
+ arg = ar2.descr_add(space, ar2)
+ ar.setslice(space, 0, step*i, step, i, arg)
+ return ar.get_concrete().storage[3]
+
+ result = self.meta_interp(f, [5], listops=True, backendopt=True)
+ self.check_loops({'getarrayitem_raw': 2,
+ 'float_add' : 1,
+ 'setarrayitem_raw': 1, 'int_add': 2,
+ 'int_lt': 1, 'guard_true': 1, 'jump': 1})
+ assert result == 11.0
+
class TestTranslation(object):
def test_compile(self):
x = numpy_compile('aa+f*f/a-', 10)
diff --git a/pypy/module/pypyjit/test_pypy_c/test__ffi.py b/pypy/module/pypyjit/test_pypy_c/test__ffi.py
--- a/pypy/module/pypyjit/test_pypy_c/test__ffi.py
+++ b/pypy/module/pypyjit/test_pypy_c/test__ffi.py
@@ -30,7 +30,6 @@
assert res == 8.0 * 300
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('fficall', """
- p16 = getfield_gc(ConstPtr(ptr15), descr=<.* .*Function.inst_name .*>)
guard_not_invalidated(descr=...)
i17 = force_token()
setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>)
diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py
--- a/pypy/module/pypyjit/test_pypy_c/test_containers.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py
@@ -23,3 +23,29 @@
ops = loop.ops_by_id('look')
assert log.opnames(ops) == ['setfield_gc',
'guard_not_invalidated']
+
+ def test_identitydict(self):
+ def fn(n):
+ class X(object):
+ pass
+ x = X()
+ d = {}
+ d[x] = 1
+ res = 0
+ for i in range(300):
+ value = d[x] # ID: getitem
+ res += value
+ return res
+ #
+ log = self.run(fn, [1000])
+ assert log.result == 300
+ loop, = log.loops_by_filename(self.filepath)
+ # check that the call to ll_dict_lookup is not a call_may_force
+ assert loop.match_by_id("getitem", """
+ i25 = call(ConstClass(_ll_1_gc_identityhash__objectPtr), p6, descr=...)
+ ...
+ i28 = call(ConstClass(ll_dict_lookup__dicttablePtr_objectPtr_Signed), p18, p6, i25, descr=...)
+ ...
+ p33 = call(ConstClass(ll_get_value__dicttablePtr_Signed), p18, i28, descr=...)
+ ...
+ """)
diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py
--- a/pypy/module/pypyjit/test_pypy_c/test_string.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_string.py
@@ -92,10 +92,10 @@
p51 = new_with_vtable(21136408)
setfield_gc(p51, p28, descr=<GcPtrFieldDescr .*NumberStringParser.inst_literal .*>)
setfield_gc(p51, ConstPtr(ptr51), descr=<GcPtrFieldDescr pypy.objspace.std.strutil.NumberStringParser.inst_fname .*>)
- setfield_gc(p51, i29, descr=<SignedFieldDescr .*NumberStringParser.inst_n .*>)
setfield_gc(p51, 1, descr=<SignedFieldDescr .*NumberStringParser.inst_sign .*>)
setfield_gc(p51, 16, descr=<SignedFieldDescr .*NumberStringParser.inst_base .*>)
setfield_gc(p51, p28, descr=<GcPtrFieldDescr .*NumberStringParser.inst_s .*>)
+ setfield_gc(p51, i29, descr=<SignedFieldDescr .*NumberStringParser.inst_n .*>)
p55 = call(ConstClass(parse_digit_string), p51, descr=<GcPtrCallDescr>)
guard_no_exception(descr=...)
i57 = call(ConstClass(rbigint.toint), p55, descr=<SignedCallDescr>)
@@ -104,4 +104,4 @@
guard_no_overflow(descr=...)
--TICK--
jump(p0, p1, p2, p3, p4, p5, i58, i7, i8, p9, p10, descr=<Loop4>)
- """)
\ No newline at end of file
+ """)
diff --git a/pypy/module/select/test/test_epoll.py b/pypy/module/select/test/test_epoll.py
--- a/pypy/module/select/test/test_epoll.py
+++ b/pypy/module/select/test/test_epoll.py
@@ -138,7 +138,7 @@
expected.sort()
assert events == expected
- assert then - now < 0.01
+ assert then - now < 0.02
now = time.time()
events = ep.poll(timeout=2.1, maxevents=4)
@@ -151,7 +151,7 @@
now = time.time()
events = ep.poll(1, 4)
then = time.time()
- assert then - now < 0.01
+ assert then - now < 0.02
events.sort()
expected = [
@@ -168,7 +168,7 @@
now = time.time()
events = ep.poll(1, 4)
then = time.time()
- assert then - now < 0.01
+ assert then - now < 0.02
expected = [(server.fileno(), select.EPOLLOUT)]
assert events == expected
@@ -192,7 +192,7 @@
now = time.time()
ep.poll(1, 4)
then = time.time()
- assert then - now < 0.01
+ assert then - now < 0.02
server.close()
ep.unregister(fd)
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -28,6 +28,13 @@
return w_delattr
object_delattr._annspecialcase_ = 'specialize:memo'
+def object_hash(space):
+ "Utility that returns the app-level descriptor object.__hash__."
+ w_src, w_hash = space.lookup_in_type_where(space.w_object,
+ '__hash__')
+ return w_hash
+object_hash._annspecialcase_ = 'specialize:memo'
+
def raiseattrerror(space, w_obj, name, w_descr=None):
w_type = space.type(w_obj)
typename = w_type.getname(space)
diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -157,11 +157,15 @@
return self.erase(None)
def switch_to_correct_strategy(self, w_dict, w_key):
- #XXX implement other strategies later
+ withidentitydict = self.space.config.objspace.std.withidentitydict
if type(w_key) is self.space.StringObjectCls:
self.switch_to_string_strategy(w_dict)
- elif self.space.is_w(self.space.type(w_key), self.space.w_int):
+ return
+ w_type = self.space.type(w_key)
+ if self.space.is_w(w_type, self.space.w_int):
self.switch_to_int_strategy(w_dict)
+ elif withidentitydict and w_type.compares_by_identity():
+ self.switch_to_identity_strategy(w_dict)
else:
self.switch_to_object_strategy(w_dict)
@@ -177,6 +181,13 @@
w_dict.strategy = strategy
w_dict.dstorage = storage
+ def switch_to_identity_strategy(self, w_dict):
+ from pypy.objspace.std.identitydict import IdentityDictStrategy
+ strategy = self.space.fromcache(IdentityDictStrategy)
+ storage = strategy.get_empty_storage()
+ w_dict.strategy = strategy
+ w_dict.dstorage = storage
+
def switch_to_object_strategy(self, w_dict):
strategy = self.space.fromcache(ObjectDictStrategy)
storage = strategy.get_empty_storage()
@@ -338,7 +349,6 @@
def getitem(self, w_dict, w_key):
space = self.space
-
if self.is_correct_type(w_key):
return self.unerase(w_dict.dstorage).get(self.unwrap(w_key), None)
elif self._never_equal_to(space.type(w_key)):
@@ -404,6 +414,7 @@
def keys(self, w_dict):
return self.unerase(w_dict.dstorage).keys()
+
class StringDictStrategy(AbstractTypedStrategy, DictStrategy):
erase, unerase = rerased.new_erasing_pair("string")
@@ -448,7 +459,9 @@
return StrIteratorImplementation(self.space, self, w_dict)
-class StrIteratorImplementation(IteratorImplementation):
+class _WrappedIteratorMixin(object):
+ _mixin_ = True
+
def __init__(self, space, strategy, dictimplementation):
IteratorImplementation.__init__(self, space, dictimplementation)
self.iterator = strategy.unerase(dictimplementation.dstorage).iteritems()
@@ -460,6 +473,23 @@
else:
return None, None
+class _UnwrappedIteratorMixin:
+ _mixin_ = True
+
+ def __init__(self, space, strategy, dictimplementation):
+ IteratorImplementation.__init__(self, space, dictimplementation)
+ self.iterator = strategy.unerase(dictimplementation.dstorage).iteritems()
+
+ def next_entry(self):
+ # note that this 'for' loop only runs once, at most
+ for w_key, w_value in self.iterator:
+ return w_key, w_value
+ else:
+ return None, None
+
+
+class StrIteratorImplementation(_WrappedIteratorMixin, IteratorImplementation):
+ pass
class IntDictStrategy(AbstractTypedStrategy, DictStrategy):
erase, unerase = rerased.new_erasing_pair("int")
@@ -490,31 +520,11 @@
def iter(self, w_dict):
return IntIteratorImplementation(self.space, self, w_dict)
-class IntIteratorImplementation(IteratorImplementation):
- def __init__(self, space, strategy, dictimplementation):
- IteratorImplementation.__init__(self, space, dictimplementation)
- self.iterator = strategy.unerase(dictimplementation.dstorage).iteritems()
+class IntIteratorImplementation(_WrappedIteratorMixin, IteratorImplementation):
+ pass
- def next_entry(self):
- # note that this 'for' loop only runs once, at most
- for key, w_value in self.iterator:
- return self.space.wrap(key), w_value
- else:
- return None, None
-
-
-class ObjectIteratorImplementation(IteratorImplementation):
- def __init__(self, space, strategy, dictimplementation):
- IteratorImplementation.__init__(self, space, dictimplementation)
- self.iterator = strategy.unerase(dictimplementation.dstorage).iteritems()
-
- def next_entry(self):
- # note that this 'for' loop only runs once, at most
- for w_key, w_value in self.iterator:
- return w_key, w_value
- else:
- return None, None
-
+class ObjectIteratorImplementation(_UnwrappedIteratorMixin, IteratorImplementation):
+ pass
init_signature = Signature(['seq_or_map'], None, 'kwargs')
init_defaults = [None]
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -86,7 +86,7 @@
def clear(self, w_dict):
self.unerase(w_dict.dstorage).dict_w.clear()
- self.unerase(w_dict.dstorage).mutated()
+ self.unerase(w_dict.dstorage).mutated(None)
class DictProxyIteratorImplementation(IteratorImplementation):
def __init__(self, space, strategy, dictimplementation):
diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py
--- a/pypy/objspace/std/floatobject.py
+++ b/pypy/objspace/std/floatobject.py
@@ -133,8 +133,7 @@
else:
return space.wrap("0x%sp%s%d" % (s, sign, exp))
-def float2string(space, w_float, code, precision):
- x = w_float.floatval
+def float2string(x, code, precision):
# we special-case explicitly inf and nan here
if isfinite(x):
s = formatd(x, code, precision, DTSF_ADD_DOT_0)
@@ -145,13 +144,13 @@
s = "-inf"
else: # isnan(x):
s = "nan"
- return space.wrap(s)
+ return s
def repr__Float(space, w_float):
- return float2string(space, w_float, 'r', 0)
+ return space.wrap(float2string(w_float.floatval, 'r', 0))
def str__Float(space, w_float):
- return float2string(space, w_float, 'g', DTSF_STR_PRECISION)
+ return space.wrap(float2string(w_float.floatval, 'g', DTSF_STR_PRECISION))
def format__Float_ANY(space, w_float, w_spec):
return newformat.run_formatter(space, w_spec, "format_float", w_float)
diff --git a/pypy/objspace/std/identitydict.py b/pypy/objspace/std/identitydict.py
new file mode 100644
--- /dev/null
+++ b/pypy/objspace/std/identitydict.py
@@ -0,0 +1,85 @@
+## ----------------------------------------------------------------------------
+## dict strategy (see dict_multiobject.py)
+
+from pypy.rlib import rerased
+from pypy.objspace.std.dictmultiobject import (AbstractTypedStrategy,
+ DictStrategy,
+ IteratorImplementation,
+ _UnwrappedIteratorMixin)
+
+
+# this strategy is selected by EmptyDictStrategy.switch_to_correct_strategy
+class IdentityDictStrategy(AbstractTypedStrategy, DictStrategy):
+ """
+ Strategy for custom instances which compares by identity (i.e., the
+ default unless you override __hash__, __eq__ or __cmp__). The storage is
+ just a normal RPython dict, which has already the correct by-identity
+ semantics.
+
+ Note that at a first sight, you might have problems if you mutate the
+ class of an object which is already inside an identitydict. Consider this
+ example::
+
+ class X(object):
+ pass
+ d = {x(): 1}
+ X.__eq__ = ...
+ d[y] # might trigger a call to __eq__?
+
+ We want to be sure that x.__eq__ is called in the same cases as in
+ CPython. However, as long as the strategy is IdentityDictStrategy, the
+ __eq__ will never be called.
+
+ It turns out that it's not a problem. In CPython (and in PyPy without
+ this strategy), the __eq__ is called if ``hash(y) == hash(x)`` and ``x is
+ not y``. Note that hash(x) is computed at the time when we insert x in
+ the dict, not at the time we lookup y.
+
+ Now, how can hash(y) == hash(x)? There are two possibilities:
+
+ 1. we write a custom __hash__ for the class of y, thus making it a not
+ "compares by reference" type
+
+ 2. the class of y is "compares by reference" type, and by chance the
+ hash is the same as x
+
+ In the first case, the getitem immediately notice that y is not of the
+ right type, and switches the strategy to ObjectDictStrategy, then the
+ lookup works as usual.
+
+ The second case is completely non-deterministic, even in CPython.
+ Depending on the phase of the moon, you might call the __eq__ or not, so
+ it is perfectly fine to *never* call it. Morever, in practice with the
+ minimar GC we never have two live objects with the same hash, so it would
+ never happen anyway.
+ """
+
+ erase, unerase = rerased.new_erasing_pair("identitydict")
+ erase = staticmethod(erase)
+ unerase = staticmethod(unerase)
+
+ def wrap(self, unwrapped):
+ return unwrapped
+
+ def unwrap(self, wrapped):
+ return wrapped
+
+ def get_empty_storage(self):
+ return self.erase({})
+
+ def is_correct_type(self, w_obj):
+ w_type = self.space.type(w_obj)
+ return w_type.compares_by_identity()
+
+ def _never_equal_to(self, w_lookup_type):
+ return False
+
+ def iter(self, w_dict):
+ return IdentityDictIteratorImplementation(self.space, self, w_dict)
+
+ def keys(self, w_dict):
+ return self.unerase(w_dict.dstorage).keys()
+
+
+class IdentityDictIteratorImplementation(_UnwrappedIteratorMixin, IteratorImplementation):
+ pass
diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py
--- a/pypy/objspace/std/objecttype.py
+++ b/pypy/objspace/std/objecttype.py
@@ -6,7 +6,7 @@
from pypy.objspace.descroperation import Object
from pypy.objspace.std.stdtypedef import StdTypeDef
from pypy.objspace.std.register_all import register_all
-
+from pypy.objspace.std import identitydict
def descr__repr__(space, w_obj):
w = space.wrap
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -39,7 +39,6 @@
from pypy.objspace.std.stringtype import wrapstr
from pypy.objspace.std.unicodetype import wrapunicode
-
class StdObjSpace(ObjSpace, DescrOperation):
"""The standard object space, implementing a general-purpose object
library in Restricted Python."""
diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py
--- a/pypy/objspace/std/test/test_dictmultiobject.py
+++ b/pypy/objspace/std/test/test_dictmultiobject.py
@@ -898,6 +898,7 @@
withsmalldicts = False
withcelldict = False
withmethodcache = False
+ withidentitydict = False
FakeSpace.config = Config()
@@ -1105,3 +1106,4 @@
fakespace = FakeSpace()
d = fakespace.newdict(module=True)
assert type(d.strategy) is StringDictStrategy
+
diff --git a/pypy/objspace/std/test/test_identitydict.py b/pypy/objspace/std/test/test_identitydict.py
new file mode 100644
--- /dev/null
+++ b/pypy/objspace/std/test/test_identitydict.py
@@ -0,0 +1,138 @@
+import py
+from pypy.interpreter.gateway import interp2app
+from pypy.conftest import gettestobjspace
+from pypy.conftest import option
+
+class AppTestComparesByIdentity:
+
+ def setup_class(cls):
+ from pypy.objspace.std import identitydict
+ cls.space = gettestobjspace(
+ **{"objspace.std.withidentitydict": True})
+ if option.runappdirect:
+ py.test.skip("interp2app doesn't work on appdirect")
+
+ def compares_by_identity(space, w_cls):
+ return space.wrap(w_cls.compares_by_identity())
+ cls.w_compares_by_identity = cls.space.wrap(interp2app(compares_by_identity))
+
+ def test_compares_by_identity(self):
+ class Plain(object):
+ pass
+
+ class CustomEq(object):
+ def __eq__(self, other):
+ return True
+
+ class CustomCmp (object):
+ def __cmp__(self, other):
+ return 0
+
+ class CustomHash(object):
+ def __hash__(self):
+ return 0
+
+ assert self.compares_by_identity(Plain)
+ assert not self.compares_by_identity(CustomEq)
+ assert not self.compares_by_identity(CustomCmp)
+ assert not self.compares_by_identity(CustomHash)
+
+ def test_modify_class(self):
+ class X(object):
+ pass
+
+ assert self.compares_by_identity(X)
+ X.__eq__ = lambda x: None
+ assert not self.compares_by_identity(X)
+ del X.__eq__
+ assert self.compares_by_identity(X)
+
+
+class AppTestIdentityDict(object):
+ def setup_class(cls):
+ cls.space = gettestobjspace(**{"objspace.std.withidentitydict": True})
+ if option.runappdirect:
+ py.test.skip("interp2app doesn't work on appdirect")
+
+ def w_uses_identity_strategy(self, obj):
+ import __pypy__
+ return "IdentityDictStrategy" in __pypy__.internal_repr(obj)
+
+ def test_use_strategy(self):
+ class X(object):
+ pass
+ d = {}
+ x = X()
+ d[x] = 1
+ assert self.uses_identity_strategy(d)
+ assert d[x] == 1
+
+ def test_bad_item(self):
+ class X(object):
+ pass
+ class Y(object):
+ def __hash__(self):
+ return 32
+
+ d = {}
+ x = X()
+ y = Y()
+ d[x] = 1
+ assert self.uses_identity_strategy(d)
+ d[y] = 2
+ assert not self.uses_identity_strategy(d)
+ assert d[x] == 1
+ assert d[y] == 2
+
+ def test_bad_key(self):
+ class X(object):
+ pass
+ d = {}
+ x = X()
+
+ class Y(object):
+ def __hash__(self):
+ return hash(x) # to make sure we do x == y
+
+ def __eq__(self, other):
+ return True
+
+ y = Y()
+ d[x] = 1
+ assert self.uses_identity_strategy(d)
+ assert d[y] == 1
+ assert not self.uses_identity_strategy(d)
+
+ def test_iter(self):
+ class X(object):
+ pass
+ x = X()
+ d = {x: 1}
+ assert self.uses_identity_strategy(d)
+ assert list(iter(d)) == [x]
+
+ def test_mutate_class_and_then_compare(self):
+ class X(object):
+ pass
+ class Y(object):
+ pass
+
+ x = X()
+ y = Y()
+ d1 = {x: 1}
+ d2 = {y: 1}
+ assert self.uses_identity_strategy(d1)
+ assert self.uses_identity_strategy(d2)
+ #
+ X.__hash__ = lambda self: hash(y)
+ X.__eq__ = lambda self, other: True
+ #
+ assert d1 == d2
+ assert self.uses_identity_strategy(d1)
+ assert not self.uses_identity_strategy(d2)
+
+ def test_old_style_classes(self):
+ class X:
+ pass
+ d = {X(): 1}
+ assert not self.uses_identity_strategy(d)
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -7,6 +7,7 @@
from pypy.interpreter.baseobjspace import W_Root
from pypy.objspace.std.stdtypedef import std_dict_descr, issubtypedef, Member
from pypy.objspace.std.objecttype import object_typedef
+from pypy.objspace.std import identitydict
from pypy.rlib.objectmodel import we_are_translated
from pypy.rlib.objectmodel import current_object_addr_as_int, compute_hash
from pypy.rlib.jit import promote, elidable_promote, we_are_jitted
@@ -76,6 +77,10 @@
for i in range(len(self.lookup_where)):
self.lookup_where[i] = None_None
+# possible values of compares_by_identity_status
+UNKNOWN = 0
+COMPARES_BY_IDENTITY = 1
+OVERRIDES_EQ_CMP_OR_HASH = 2
class W_TypeObject(W_Object):
from pypy.objspace.std.typetype import type_typedef as typedef
@@ -102,6 +107,9 @@
# (False is a conservative default, fixed during real usage)
uses_object_getattribute = False
+ # for config.objspace.std.withidentitydict
+ compares_by_identity_status = UNKNOWN
+
# used to cache the type __new__ function if it comes from a builtin type
# != 'type', in that case call__Type will also assumes the result
# of the __new__ is an instance of the type
@@ -146,11 +154,17 @@
else:
w_self.terminator = NoDictTerminator(space, w_self)
- def mutated(w_self):
+ def mutated(w_self, key):
+ """
+ The type is being mutated. key is either the string containing the
+ specific attribute which is being deleted/set or None to indicate a
+ generic mutation.
+ """
space = w_self.space
assert w_self.is_heaptype() or space.config.objspace.std.mutable_builtintypes
if (not space.config.objspace.std.withtypeversion and
not space.config.objspace.std.getattributeshortcut and
+ not space.config.objspace.std.withidentitydict and
not space.config.objspace.std.newshortcut):
return
@@ -158,6 +172,13 @@
w_self.uses_object_getattribute = False
# ^^^ conservative default, fixed during real usage
+ if space.config.objspace.std.withidentitydict:
+ did_compare_by_identity = (
+ w_self.compares_by_identity_status == COMPARES_BY_IDENTITY)
+ if (key is None or key == '__eq__' or
+ key == '__cmp__' or key == '__hash__'):
+ w_self.compares_by_identity_status = UNKNOWN
+
if space.config.objspace.std.newshortcut:
w_self.w_bltin_new = None
@@ -168,7 +189,7 @@
subclasses_w = w_self.get_subclasses()
for w_subclass in subclasses_w:
assert isinstance(w_subclass, W_TypeObject)
- w_subclass.mutated()
+ w_subclass.mutated(key)
def version_tag(w_self):
if (not we_are_jitted() or w_self.is_heaptype() or
@@ -207,6 +228,25 @@
def has_object_getattribute(w_self):
return w_self.getattribute_if_not_from_object() is None
+ def compares_by_identity(w_self):
+ from pypy.objspace.descroperation import object_hash
+ if not w_self.space.config.objspace.std.withidentitydict:
+ return False # conservative
+ #
+ if w_self.compares_by_identity_status != UNKNOWN:
+ # fast path
+ return w_self.compares_by_identity_status == COMPARES_BY_IDENTITY
+ #
+ default_hash = object_hash(w_self.space)
+ overrides_eq_cmp_or_hash = (w_self.lookup('__eq__') or
+ w_self.lookup('__cmp__') or
+ w_self.lookup('__hash__') is not default_hash)
+ if overrides_eq_cmp_or_hash:
+ w_self.compares_by_identity_status = OVERRIDES_EQ_CMP_OR_HASH
+ else:
+ w_self.compares_by_identity_status = COMPARES_BY_IDENTITY
+ return w_self.compares_by_identity_status == COMPARES_BY_IDENTITY
+
def ready(w_self):
for w_base in w_self.bases_w:
if not isinstance(w_base, W_TypeObject):
@@ -269,7 +309,7 @@
w_curr.w_value = w_value
return True
w_value = TypeCell(w_value)
- w_self.mutated()
+ w_self.mutated(name)
w_self.dict_w[name] = w_value
return True
@@ -286,7 +326,7 @@
except KeyError:
return False
else:
- w_self.mutated()
+ w_self.mutated(key)
return True
def lookup(w_self, name):
diff --git a/pypy/objspace/std/typetype.py b/pypy/objspace/std/typetype.py
--- a/pypy/objspace/std/typetype.py
+++ b/pypy/objspace/std/typetype.py
@@ -141,7 +141,7 @@
w_oldbestbase.getname(space))
# invalidate the version_tag of all the current subclasses
- w_type.mutated()
+ w_type.mutated(None)
# now we can go ahead and change 'w_type.bases_w'
saved_bases_w = w_type.bases_w
diff --git a/pypy/rlib/streamio.py b/pypy/rlib/streamio.py
--- a/pypy/rlib/streamio.py
+++ b/pypy/rlib/streamio.py
@@ -875,28 +875,32 @@
if bufsize == -1: # Get default from the class
bufsize = self.bufsize
self.bufsize = bufsize # buffer size (hint only)
- self.buf = ""
+ self.buf = []
+ self.buflen = 0
def flush_buffers(self):
if self.buf:
- self.do_write(self.buf)
- self.buf = ""
+ self.do_write(''.join(self.buf))
+ self.buf = []
+ self.buflen = 0
def tell(self):
- return self.do_tell() + len(self.buf)
+ return self.do_tell() + self.buflen
def write(self, data):
- buflen = len(self.buf)
+ buflen = self.buflen
datalen = len(data)
if datalen + buflen < self.bufsize:
- self.buf += data
+ self.buf.append(data)
+ self.buflen += datalen
elif buflen:
- slice = self.bufsize - buflen
- assert slice >= 0
- self.buf += data[:slice]
- self.do_write(self.buf)
- self.buf = ""
- self.write(data[slice:])
+ i = self.bufsize - buflen
+ assert i >= 0
+ self.buf.append(data[:i])
+ self.do_write(''.join(self.buf))
+ self.buf = []
+ self.buflen = 0
+ self.write(data[i:])
else:
self.do_write(data)
@@ -922,11 +926,27 @@
"""
def write(self, data):
- BufferingOutputStream.write(self, data)
- p = self.buf.rfind('\n') + 1
- if p >= 0:
- self.do_write(self.buf[:p])
- self.buf = self.buf[p:]
+ p = data.rfind('\n') + 1
+ assert p >= 0
+ if self.buflen + len(data) < self.bufsize:
+ if p == 0:
+ self.buf.append(data)
+ self.buflen += len(data)
+ else:
+ if self.buflen:
+ self.do_write(''.join(self.buf))
+ self.do_write(data[:p])
+ self.buf = [data[p:]]
+ self.buflen = len(self.buf[0])
+ else:
+ if self.buflen + p < self.bufsize:
+ p = self.bufsize - self.buflen
+ if self.buflen:
+ self.do_write(''.join(self.buf))
+ assert p >= 0
+ self.do_write(data[:p])
+ self.buf = [data[p:]]
+ self.buflen = len(self.buf[0])
# ____________________________________________________________
diff --git a/pypy/rpython/memory/gctransform/asmgcroot.py b/pypy/rpython/memory/gctransform/asmgcroot.py
--- a/pypy/rpython/memory/gctransform/asmgcroot.py
+++ b/pypy/rpython/memory/gctransform/asmgcroot.py
@@ -184,7 +184,9 @@
# old NULL entries
gcdata.dead_threads_count += 1
if (gcdata.dead_threads_count & 511) == 0:
- gcdata.aid2stack = copy_without_null_values(gcdata.aid2stack)
+ copy = copy_without_null_values(gcdata.aid2stack)
+ gcdata.aid2stack.delete()
+ gcdata.aid2stack = copy
def belongs_to_current_thread(framedata):
# xxx obscure: the answer is Yes if, as a pointer, framedata
diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py
--- a/pypy/rpython/memory/gctransform/framework.py
+++ b/pypy/rpython/memory/gctransform/framework.py
@@ -1449,8 +1449,9 @@
# old NULL entries
gcdata.dead_threads_count += 1
if (gcdata.dead_threads_count & 511) == 0:
- gcdata.thread_stacks = copy_without_null_values(
- gcdata.thread_stacks)
+ copy = copy_without_null_values(gcdata.thread_stacks)
+ gcdata.thread_stacks.delete()
+ gcdata.thread_stacks = copy
def switch_shadow_stacks(new_aid):
save_away_current_stack()
diff --git a/pypy/test_all.py b/pypy/test_all.py
--- a/pypy/test_all.py
+++ b/pypy/test_all.py
@@ -18,4 +18,5 @@
if __name__ == '__main__':
import tool.autopath
import pytest
- sys.exit(pytest.main())
+ import pytest_cov
+ sys.exit(pytest.main(plugins=[pytest_cov]))
diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py
--- a/pypy/tool/jitlogparser/parser.py
+++ b/pypy/tool/jitlogparser/parser.py
@@ -30,6 +30,9 @@
def getres(self):
return self._getvar(self.res)
+ def getdescr(self):
+ return self.descr
+
def _getvar(self, v):
return v
@@ -39,7 +42,7 @@
def repr(self):
args = self.getargs()
if self.descr is not None:
- args.append('descr=%s' % self.descr)
+ args.append('descr=%s' % self.getdescr())
arglist = ', '.join(args)
if self.res is not None:
return '%s = %s(%s)' % (self.getres(), self.name, arglist)
@@ -145,10 +148,10 @@
if operations[0].name == 'debug_merge_point':
self.inline_level = int(operations[0].args[0])
m = re.search('<code object ([<>\w]+)\. file \'(.+?)\'\. line (\d+)> #(\d+) (\w+)',
- operations[0].getarg(1))
+ operations[0].args[1])
if m is None:
# a non-code loop, like StrLiteralSearch or something
- self.bytecode_name = operations[0].args[1].split(" ")[0][1:]
+ self.bytecode_name = operations[0].args[1][1:-1]
else:
self.name, self.filename, lineno, bytecode_no, self.bytecode_name = m.groups()
self.startlineno = int(lineno)
diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py
--- a/pypy/tool/jitlogparser/test/test_parser.py
+++ b/pypy/tool/jitlogparser/test/test_parser.py
@@ -1,6 +1,6 @@
from pypy.tool.jitlogparser.parser import (SimpleParser, TraceForOpcode,
Function, adjust_bridges,
- import_log)
+ import_log, Op)
from pypy.tool.jitlogparser.storage import LoopStorage
import py, sys
@@ -181,7 +181,7 @@
""")
ops = Function.from_operations(loop.operations, LoopStorage())
chunk = ops.chunks[0]
- assert chunk.bytecode_name == 'StrLiteralSearch'
+ assert chunk.bytecode_name.startswith('StrLiteralSearch')
def test_parsing_assembler():
backend_dump = "554889E5534154415541564157488DA500000000488B042590C5540148C7042590C554010000000048898570FFFFFF488B042598C5540148C7042598C554010000000048898568FFFFFF488B0425A0C5540148C70425A0C554010000000048898560FFFFFF488B0425A8C5540148C70425A8C554010000000048898558FFFFFF4C8B3C2550525B0149BB30E06C96FC7F00004D8B334983C60149BB30E06C96FC7F00004D89334981FF102700000F8D000000004983C7014C8B342580F76A024983EE014C89342580F76A024983FE000F8C00000000E9AEFFFFFF488B042588F76A024829E0483B042580EC3C01760D49BB05F30894FC7F000041FFD3554889E5534154415541564157488DA550FFFFFF4889BD70FFFFFF4889B568FFFFFF48899560FFFFFF48898D58FFFFFF4D89C7E954FFFFFF49BB00F00894FC7F000041FFD34440484C3D030300000049BB00F00894FC7F000041FFD34440484C3D070304000000"
@@ -225,3 +225,9 @@
assert 'cmp' in loops[1].operations[1].asm
# bridge
assert 'jo' in loops[3].operations[3].asm
+
+def test_Op_repr_is_pure():
+ op = Op('foobar', ['a', 'b'], 'c', 'mydescr')
+ myrepr = 'c = foobar(a, b, descr=mydescr)'
+ assert op.repr() == myrepr
+ assert op.repr() == myrepr # do it twice
diff --git a/pypy/tool/release/win32build.py b/pypy/tool/release/win32build.py
--- a/pypy/tool/release/win32build.py
+++ b/pypy/tool/release/win32build.py
@@ -24,6 +24,6 @@
shutil.copy(str(pypydir.join('..', '..', 'expat-2.0.1', 'win32', 'bin', 'release', 'libexpat.dll')), str(builddir))
make_pypy('', ['-Ojit'])
-make_pypy('-nojit', [])
+make_pypy('-nojit', ['-O2'])
#make_pypy('-stackless', [--stackless])
#make_pypy('-sandbox', [--sandbox])
diff --git a/pypy/translator/c/genc.py b/pypy/translator/c/genc.py
--- a/pypy/translator/c/genc.py
+++ b/pypy/translator/c/genc.py
@@ -251,12 +251,8 @@
CBuilder.have___thread = self.translator.platform.check___thread()
if not self.standalone:
assert not self.config.translation.instrument
- self.eci, cfile, extra = gen_source(db, modulename, targetdir,
- self.eci,
- defines = defines,
- split=self.split)
else:
- pfname = db.get(pf)
+ defines['PYPY_STANDALONE'] = db.get(pf)
if self.config.translation.instrument:
defines['INSTRUMENT'] = 1
if CBuilder.have___thread:
@@ -266,11 +262,9 @@
defines['PYPY_MAIN_FUNCTION'] = "pypy_main_startup"
self.eci = self.eci.merge(ExternalCompilationInfo(
export_symbols=["pypy_main_startup"]))
- self.eci, cfile, extra = gen_source_standalone(db, modulename,
- targetdir,
- self.eci,
- entrypointname = pfname,
- defines = defines)
+ self.eci, cfile, extra = gen_source(db, modulename, targetdir,
+ self.eci, defines=defines,
+ split=self.split)
self.c_source_filename = py.path.local(cfile)
self.extrafiles = self.eventually_copy(extra)
self.gen_makefile(targetdir, exe_name=exe_name)
@@ -435,6 +429,7 @@
class CStandaloneBuilder(CBuilder):
standalone = True
+ split = True
executable_name = None
shared_library_name = None
@@ -948,63 +943,8 @@
return eci.merge(ExternalCompilationInfo(separate_module_files=files))
-def gen_source_standalone(database, modulename, targetdir, eci,
- entrypointname, defines={}):
- assert database.standalone
- if isinstance(targetdir, str):
- targetdir = py.path.local(targetdir)
-
- filename = targetdir.join(modulename + '.c')
- f = filename.open('w')
- incfilename = targetdir.join('common_header.h')
- fi = incfilename.open('w')
-
- #
- # Header
- #
- print >> f, '#include "common_header.h"'
- print >> f
- commondefs(defines)
- defines['PYPY_STANDALONE'] = entrypointname
- for key, value in defines.items():
- print >> fi, '#define %s %s' % (key, value)
-
- eci.write_c_header(fi)
- print >> fi, '#include "src/g_prerequisite.h"'
-
- fi.close()
-
- preimplementationlines = list(
- pre_include_code_lines(database, database.translator.rtyper))
-
- #
- # 1) All declarations
- # 2) Implementation of functions and global structures and arrays
- #
- sg = SourceGenerator(database, preimplementationlines)
- sg.set_strategy(targetdir)
- database.prepare_inline_helpers()
- sg.gen_readable_parts_of_source(f)
-
- # 3) start-up code
- print >> f
- gen_startupcode(f, database)
-
- f.close()
-
- if 'INSTRUMENT' in defines:
- fi = incfilename.open('a')
- n = database.instrument_ncounter
- print >>fi, "#define INSTRUMENT_NCOUNTER %d" % n
- fi.close()
-
- eci = add_extra_files(eci)
- eci = eci.convert_sources_to_files(being_main=True)
- files, eci = eci.get_module_files()
- return eci, filename, sg.getextrafiles() + list(files)
-
-def gen_source(database, modulename, targetdir, eci, defines={}, split=False):
- assert not database.standalone
+def gen_source(database, modulename, targetdir,
+ eci, defines={}, split=False):
if isinstance(targetdir, str):
targetdir = py.path.local(targetdir)
@@ -1046,6 +986,12 @@
gen_startupcode(f, database)
f.close()
+ if 'INSTRUMENT' in defines:
+ fi = incfilename.open('a')
+ n = database.instrument_ncounter
+ print >>fi, "#define INSTRUMENT_NCOUNTER %d" % n
+ fi.close()
+
eci = add_extra_files(eci)
eci = eci.convert_sources_to_files(being_main=True)
files, eci = eci.get_module_files()
diff --git a/pypy/translator/c/test/test_genc.py b/pypy/translator/c/test/test_genc.py
--- a/pypy/translator/c/test/test_genc.py
+++ b/pypy/translator/c/test/test_genc.py
@@ -4,7 +4,6 @@
from pypy.translator.translator import TranslationContext
from pypy.translator.c.database import LowLevelDatabase
from pypy.translator.c import genc
-from pypy.translator.c.genc import gen_source
from pypy.translator.c.gc import NoneGcPolicy
from pypy.objspace.flow.model import Constant, Variable, SpaceOperation
from pypy.objspace.flow.model import Block, Link, FunctionGraph
diff --git a/pytest.py b/pytest.py
--- a/pytest.py
+++ b/pytest.py
@@ -9,6 +9,8 @@
from _pytest import __version__
if __name__ == '__main__': # if run as a script or by 'python -m pytest'
- raise SystemExit(main())
+ #XXX: sync to upstream later
+ import pytest_cov
+ raise SystemExit(main(plugins=[pytest_cov]))
else:
_preloadplugins() # to populate pytest.* namespace so help(pytest) works
diff --git a/pytest_cov.py b/pytest_cov.py
new file mode 100644
--- /dev/null
+++ b/pytest_cov.py
@@ -0,0 +1,353 @@
+"""produce code coverage reports using the 'coverage' package, including support for distributed testing.
+
+This plugin produces coverage reports. It supports centralised testing and distributed testing in
+both load and each modes. It also supports coverage of subprocesses.
+
+All features offered by the coverage package should be available, either through pytest-cov or
+through coverage's config file.
+
+
+Installation
+------------
+
+The `pytest-cov`_ package may be installed with pip or easy_install::
+
+ pip install pytest-cov
+ easy_install pytest-cov
+
+.. _`pytest-cov`: http://pypi.python.org/pypi/pytest-cov/
+
+
+Uninstallation
+--------------
+
+Uninstalling packages is supported by pip::
+
+ pip uninstall pytest-cov
+
+However easy_install does not provide an uninstall facility.
+
+.. IMPORTANT::
+
+ Ensure that you manually delete the init_cov_core.pth file in your site-packages directory.
+
+ This file starts coverage collection of subprocesses if appropriate during site initialisation
+ at python startup.
+
+
+Usage
+-----
+
+Centralised Testing
+~~~~~~~~~~~~~~~~~~~
+
+Centralised testing will report on the combined coverage of the main process and all of it's
+subprocesses.
+
+Running centralised testing::
+
+ py.test --cov myproj tests/
+
+Shows a terminal report::
+
+ -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
+ Name Stmts Miss Cover
+ ----------------------------------------
+ myproj/__init__ 2 0 100%
+ myproj/myproj 257 13 94%
+ myproj/feature4286 94 7 92%
+ ----------------------------------------
+ TOTAL 353 20 94%
+
+
+Distributed Testing: Load
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Distributed testing with dist mode set to load will report on the combined coverage of all slaves.
+The slaves may be spread out over any number of hosts and each slave may be located anywhere on the
+file system. Each slave will have it's subprocesses measured.
+
+Running distributed testing with dist mode set to load::
+
+ py.test --cov myproj -n 2 tests/
+
+Shows a terminal report::
+
+ -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
+ Name Stmts Miss Cover
+ ----------------------------------------
+ myproj/__init__ 2 0 100%
+ myproj/myproj 257 13 94%
+ myproj/feature4286 94 7 92%
+ ----------------------------------------
+ TOTAL 353 20 94%
+
+
+Again but spread over different hosts and different directories::
+
+ py.test --cov myproj --dist load
+ --tx ssh=memedough at host1//chdir=testenv1
+ --tx ssh=memedough at host2//chdir=/tmp/testenv2//python=/tmp/env1/bin/python
+ --rsyncdir myproj --rsyncdir tests --rsync examples
+ tests/
+
+Shows a terminal report::
+
+ -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
+ Name Stmts Miss Cover
+ ----------------------------------------
+ myproj/__init__ 2 0 100%
+ myproj/myproj 257 13 94%
+ myproj/feature4286 94 7 92%
+ ----------------------------------------
+ TOTAL 353 20 94%
+
+
+Distributed Testing: Each
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Distributed testing with dist mode set to each will report on the combined coverage of all slaves.
+Since each slave is running all tests this allows generating a combined coverage report for multiple
+environments.
+
+Running distributed testing with dist mode set to each::
+
+ py.test --cov myproj --dist each
+ --tx popen//chdir=/tmp/testenv3//python=/usr/local/python27/bin/python
+ --tx ssh=memedough at host2//chdir=/tmp/testenv4//python=/tmp/env2/bin/python
+ --rsyncdir myproj --rsyncdir tests --rsync examples
+ tests/
+
+Shows a terminal report::
+
+ ---------------------------------------- coverage ----------------------------------------
+ platform linux2, python 2.6.5-final-0
+ platform linux2, python 2.7.0-final-0
+ Name Stmts Miss Cover
+ ----------------------------------------
+ myproj/__init__ 2 0 100%
+ myproj/myproj 257 13 94%
+ myproj/feature4286 94 7 92%
+ ----------------------------------------
+ TOTAL 353 20 94%
+
+
+Reporting
+---------
+
+It is possible to generate any combination of the reports for a single test run.
+
+The available reports are terminal (with or without missing line numbers shown), HTML, XML and
+annotated source code.
+
+The terminal report without line numbers (default)::
+
+ py.test --cov-report term --cov myproj tests/
+
+ -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
+ Name Stmts Miss Cover
+ ----------------------------------------
+ myproj/__init__ 2 0 100%
+ myproj/myproj 257 13 94%
+ myproj/feature4286 94 7 92%
+ ----------------------------------------
+ TOTAL 353 20 94%
+
+
+The terminal report with line numbers::
+
+ py.test --cov-report term-missing --cov myproj tests/
+
+ -------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
+ Name Stmts Miss Cover Missing
+ --------------------------------------------------
+ myproj/__init__ 2 0 100%
+ myproj/myproj 257 13 94% 24-26, 99, 149, 233-236, 297-298, 369-370
+ myproj/feature4286 94 7 92% 183-188, 197
+ --------------------------------------------------
+ TOTAL 353 20 94%
+
+
+The remaining three reports output to files without showing anything on the terminal (useful for
+when the output is going to a continuous integration server)::
+
+ py.test --cov-report html
+ --cov-report xml
+ --cov-report annotate
+ --cov myproj tests/
+
+
+Coverage Data File
+------------------
+
+The data file is erased at the beginning of testing to ensure clean data for each test run.
+
+The data file is left at the end of testing so that it is possible to use normal coverage tools to
+examine it.
+
+
+Coverage Config File
+--------------------
+
+This plugin provides a clean minimal set of command line options that are added to pytest. For
+further control of coverage use a coverage config file.
+
+For example if tests are contained within the directory tree being measured the tests may be
+excluded if desired by using a .coveragerc file with the omit option set::
+
+ py.test --cov-config .coveragerc
+ --cov myproj
+ myproj/tests/
+
+Where the .coveragerc file contains file globs::
+
+ [run]
+ omit = tests/*
+
+For full details refer to the `coverage config file`_ documentation.
+
+.. _`coverage config file`: http://nedbatchelder.com/code/coverage/config.html
+
+Note that this plugin controls some options and setting the option in the config file will have no
+effect. These include specifying source to be measured (source option) and all data file handling
+(data_file and parallel options).
+
+
+Limitations
+-----------
+
+For distributed testing the slaves must have the pytest-cov package installed. This is needed since
+the plugin must be registered through setuptools / distribute for pytest to start the plugin on the
+slave.
+
+For subprocess measurement environment variables must make it from the main process to the
+subprocess. The python used by the subprocess must have pytest-cov installed. The subprocess must
+do normal site initialisation so that the environment variables can be detected and coverage
+started.
+
+
+Acknowledgements
+----------------
+
+Whilst this plugin has been built fresh from the ground up it has been influenced by the work done
+on pytest-coverage (Ross Lawley, James Mills, Holger Krekel) and nose-cover (Jason Pellerin) which are
+other coverage plugins.
+
+Ned Batchelder for coverage and its ability to combine the coverage results of parallel runs.
+
+Holger Krekel for pytest with its distributed testing support.
+
+Jason Pellerin for nose.
+
+Michael Foord for unittest2.
+
+No doubt others have contributed to these tools as well.
+"""
+
+
+def pytest_addoption(parser):
+ """Add options to control coverage."""
+
+ group = parser.getgroup('coverage reporting with distributed testing support')
+ group.addoption('--cov', action='append', default=[], metavar='path',
+ dest='cov_source',
+ help='measure coverage for filesystem path (multi-allowed)')
+ group.addoption('--cov-report', action='append', default=[], metavar='type',
+ choices=['term', 'term-missing', 'annotate', 'html', 'xml'],
+ dest='cov_report',
+ help='type of report to generate: term, term-missing, annotate, html, xml (multi-allowed)')
+ group.addoption('--cov-config', action='store', default='.coveragerc', metavar='path',
+ dest='cov_config',
+ help='config file for coverage, default: .coveragerc')
+
+
+def pytest_configure(config):
+ """Activate coverage plugin if appropriate."""
+
+ if config.getvalue('cov_source'):
+ config.pluginmanager.register(CovPlugin(), '_cov')
+
+
+class CovPlugin(object):
+ """Use coverage package to produce code coverage reports.
+
+ Delegates all work to a particular implementation based on whether
+ this test process is centralised, a distributed master or a
+ distributed slave.
+ """
+
+ def __init__(self):
+ """Creates a coverage pytest plugin.
+
+ We read the rc file that coverage uses to get the data file
+ name. This is needed since we give coverage through it's API
+ the data file name.
+ """
+
+ # Our implementation is unknown at this time.
+ self.cov_controller = None
+
+ def pytest_sessionstart(self, session):
+ """At session start determine our implementation and delegate to it."""
+
+ import cov_core
+
+ cov_source = session.config.getvalue('cov_source')
+ cov_report = session.config.getvalue('cov_report') or ['term']
+ cov_config = session.config.getvalue('cov_config')
+
+ session_name = session.__class__.__name__
+ is_master = (session.config.pluginmanager.hasplugin('dsession') or
+ session_name == 'DSession')
+ is_slave = (hasattr(session.config, 'slaveinput') or
+ session_name == 'SlaveSession')
+ nodeid = None
+
+ if is_master:
+ controller_cls = cov_core.DistMaster
+ elif is_slave:
+ controller_cls = cov_core.DistSlave
+ nodeid = session.config.slaveinput.get('slaveid', getattr(session, 'nodeid'))
+ else:
+ controller_cls = cov_core.Central
+
+ self.cov_controller = controller_cls(cov_source,
+ cov_report,
+ cov_config,
+ session.config,
+ nodeid)
+
+ self.cov_controller.start()
+
+ def pytest_configure_node(self, node):
+ """Delegate to our implementation."""
+
+ self.cov_controller.configure_node(node)
+ pytest_configure_node.optionalhook = True
+
+ def pytest_testnodedown(self, node, error):
+ """Delegate to our implementation."""
+
+ self.cov_controller.testnodedown(node, error)
+ pytest_testnodedown.optionalhook = True
+
+ def pytest_sessionfinish(self, session, exitstatus):
+ """Delegate to our implementation."""
+
+ self.cov_controller.finish()
+
+ def pytest_terminal_summary(self, terminalreporter):
+ """Delegate to our implementation."""
+
+ self.cov_controller.summary(terminalreporter._tw)
+
+
+def pytest_funcarg__cov(request):
+ """A pytest funcarg that provides access to the underlying coverage object."""
+
+ # Check with hasplugin to avoid getplugin exception in older pytest.
+ if request.config.pluginmanager.hasplugin('_cov'):
+ plugin = request.config.pluginmanager.getplugin('_cov')
+ if plugin.cov_controller:
+ return plugin.cov_controller.cov
+ return None
More information about the pypy-commit
mailing list