From pypy.commits at gmail.com Thu Sep 1 01:49:35 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 31 Aug 2016 22:49:35 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: this was wrong. if arr.base and dtype='O', simply assume the pointers are gc objects Message-ID: <57c7c16f.a427c20a.76643.179b@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86807:6ee997b509c7 Date: 2016-09-01 08:45 +0300 http://bitbucket.org/pypy/pypy/changeset/6ee997b509c7/ Log: this was wrong. if arr.base and dtype='O', simply assume the pointers are gc objects diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -465,13 +465,6 @@ self.storage = storage self.start = start self.gcstruct = V_OBJECTSTORE - if dtype.num == NPY.OBJECT: - self.gcstruct = _create_objectstore(storage, self.size, - dtype.elsize) - - def __del__(self): - if self.gcstruct: - self.gcstruct.length = 0 def fill(self, space, box): self.dtype.itemtype.fill( diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1851,7 +1851,7 @@ arr.gcstruct) def read(self, arr, i, offset, dtype): - if arr.gcstruct is V_OBJECTSTORE: + if arr.gcstruct is V_OBJECTSTORE and not arr.base(): raise oefmt(self.space.w_NotImplementedError, "cannot read object from array with no gc hook") return self.box(self._read(arr.storage, i, offset)) From pypy.commits at gmail.com Thu Sep 1 01:49:37 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 31 Aug 2016 22:49:37 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: modifying ctor.py, might as well add a test for pypy/numpy issues #52, #53 Message-ID: <57c7c171.2916c20a.8b371.1988@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86808:3b99b5a021ee Date: 2016-09-01 08:48 +0300 http://bitbucket.org/pypy/pypy/changeset/3b99b5a021ee/ Log: modifying ctor.py, might as well add a test for pypy/numpy issues #52, #53 diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -702,3 +702,32 @@ ret = obj.sum() print type(ret) assert ret.info == 'spam' + + def test_ndarray_subclass_assigns_base(self): + import numpy as np + init_called = [] + class _DummyArray(object): + """ Dummy object that just exists to hang __array_interface__ dictionaries + and possibly keep alive a reference to a base array. + """ + def __init__(self, interface, base=None): + self.__array_interface__ = interface + init_called.append(1) + self.base = base + + x = np.zeros(10) + d = _DummyArray(x.__array_interface__, base=x) + y = np.array(d, copy=False) + assert sum(init_called) == 1 + assert y.base is d + + x = np.zeros((0,), dtype='float32') + intf = x.__array_interface__.copy() + intf["strides"] = x.strides + x.__array_interface__["strides"] = x.strides + d = _DummyArray(x.__array_interface__, base=x) + y = np.array(d, copy=False) + assert sum(init_called) == 2 + assert y.base is d + + From pypy.commits at gmail.com Thu Sep 1 03:35:55 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Sep 2016 00:35:55 -0700 (PDT) Subject: [pypy-commit] pypy default: Add a passing test that direct running and annotation both crash Message-ID: <57c7da5b.0575c20a.329f0.f1f6@mx.google.com> Author: Armin Rigo Branch: Changeset: r86809:ce916c69cf00 Date: 2016-09-01 09:35 +0200 http://bitbucket.org/pypy/pypy/changeset/ce916c69cf00/ Log: Add a passing test that direct running and annotation both crash if we give a float to an llexternal function expecting an int diff --git a/rpython/rtyper/lltypesystem/test/test_rffi.py b/rpython/rtyper/lltypesystem/test/test_rffi.py --- a/rpython/rtyper/lltypesystem/test/test_rffi.py +++ b/rpython/rtyper/lltypesystem/test/test_rffi.py @@ -38,6 +38,24 @@ xf = self.compile(f, []) assert xf() == 8+3 + def test_no_float_to_int_conversion(self): + c_source = py.code.Source(""" + int someexternalfunction(int x) + { + return (x + 3); + } + """) + + eci = ExternalCompilationInfo(separate_module_sources=[c_source]) + z = llexternal('someexternalfunction', [Signed], Signed, + compilation_info=eci) + + def f(): + return z(8.2) + + py.test.raises(TypeError, f) + py.test.raises(TypeError, self.compile, f, []) + def test_hashdefine(self): h_source = """ #define X(i) (i+3) From pypy.commits at gmail.com Thu Sep 1 03:43:55 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Sep 2016 00:43:55 -0700 (PDT) Subject: [pypy-commit] pypy default: Another passing test Message-ID: <57c7dc3b.e16ec20a.59a7a.37ef@mx.google.com> Author: Armin Rigo Branch: Changeset: r86810:809a3f78b64f Date: 2016-09-01 09:43 +0200 http://bitbucket.org/pypy/pypy/changeset/809a3f78b64f/ Log: Another passing test diff --git a/rpython/translator/sandbox/test/test_sandbox.py b/rpython/translator/sandbox/test/test_sandbox.py --- a/rpython/translator/sandbox/test/test_sandbox.py +++ b/rpython/translator/sandbox/test/test_sandbox.py @@ -29,6 +29,7 @@ assert msg == fnname msg = read_message(f) assert msg == args + assert [type(x) for x in msg] == [type(x) for x in args] if isinstance(result, Exception): write_exception(g, result) else: From pypy.commits at gmail.com Thu Sep 1 03:50:51 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Sep 2016 00:50:51 -0700 (PDT) Subject: [pypy-commit] pypy default: pfff another hack on top of this pile of hacks Message-ID: <57c7dddb.d4301c0a.97158.2b96@mx.google.com> Author: Armin Rigo Branch: Changeset: r86811:76e37b5f30ae Date: 2016-09-01 09:50 +0200 http://bitbucket.org/pypy/pypy/changeset/76e37b5f30ae/ Log: pfff another hack on top of this pile of hacks diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -170,11 +170,15 @@ # # haaaack for 'pypy py.test -A' if libm.so is a linker script # (see reason in _dlerror_on_dlopen_untranslated()) + must_free = False if not we_are_translated() and platform.name == "linux": if name and rffi.charp2str(name) == 'libm.so': - name = rffi.str2charp('libm.so.6', track_allocation=False) + name = rffi.str2charp('libm.so.6') + must_free = True # res = c_dlopen(name, rffi.cast(rffi.INT, mode)) + if must_free: + rffi.free_charp(name) if not res: if not we_are_translated(): err = _dlerror_on_dlopen_untranslated(name) From pypy.commits at gmail.com Thu Sep 1 04:45:39 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Sep 2016 01:45:39 -0700 (PDT) Subject: [pypy-commit] pypy default: Issue #2388: the problem is obscure interaction with a different call (I Message-ID: <57c7eab3.12331c0a.ee8e2.8538@mx.google.com> Author: Armin Rigo Branch: Changeset: r86812:7d6c66b14770 Date: 2016-09-01 10:26 +0200 http://bitbucket.org/pypy/pypy/changeset/7d6c66b14770/ Log: Issue #2388: the problem is obscure interaction with a different call (I don't know which one) with the signature (string, float), which was considered as more general than the signature (string, int) of os.access(). diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -17,7 +17,7 @@ from rpython.flowspace.model import Variable, Constant, const from rpython.flowspace.operation import op from rpython.rlib import rarithmetic -from rpython.annotator.model import AnnotatorError +from rpython.annotator.model import AnnotatorError, TLS BINARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 2]) @@ -436,6 +436,11 @@ class __extend__(pairtype(SomeFloat, SomeFloat)): def union((flt1, flt2)): + if not TLS.allow_int_to_float: + # in this mode, if one of the two is actually the + # subclass SomeInteger, complain + if isinstance(flt1, SomeInteger) or isinstance(flt2, SomeInteger): + raise UnionError(flt1, flt2) return SomeFloat() add = sub = mul = union diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -44,6 +44,7 @@ # A global attribute :-( Patch it with 'True' to enable checking of # the no_nul attribute... check_str_without_nul = False + allow_int_to_float = True TLS = State() class SomeObject(object): diff --git a/rpython/rlib/rmarshal.py b/rpython/rlib/rmarshal.py --- a/rpython/rlib/rmarshal.py +++ b/rpython/rlib/rmarshal.py @@ -346,11 +346,15 @@ # on s_bigger. It relies on the fact that s_bigger was created with # an expression like 'annotation([s_item])' which returns a ListDef with # no bookkeeper, on which side-effects are not allowed. + saved = annmodel.TLS.allow_int_to_float try: + annmodel.TLS.allow_int_to_float = False s_union = annmodel.unionof(s_bigger, s_smaller) return s_bigger.contains(s_union) except (annmodel.UnionError, TooLateForChange): return False + finally: + annmodel.TLS.allow_int_to_float = saved class __extend__(pairtype(MTag, annmodel.SomeObject)): diff --git a/rpython/rlib/test/test_rmarshal.py b/rpython/rlib/test/test_rmarshal.py --- a/rpython/rlib/test/test_rmarshal.py +++ b/rpython/rlib/test/test_rmarshal.py @@ -128,10 +128,12 @@ def test_llinterp_marshal(): from rpython.rtyper.test.test_llinterp import interpret - marshaller = get_marshaller([(int, str, float)]) + marshaller1 = get_marshaller([(int, str, float)]) + marshaller2 = get_marshaller([(int, str, int)]) def f(): buf = [] - marshaller(buf, [(5, "hello", -0.5), (7, "world", 1E100)]) + marshaller1(buf, [(5, "hello", -0.5), (7, "world", 1E100)]) + marshaller2(buf, [(5, "hello", 1)]) return ''.join(buf) res = interpret(f, []) res = ''.join(res.chars) @@ -139,14 +141,20 @@ assert res == ('[\x02\x00\x00\x00(\x03\x00\x00\x00i\x05\x00\x00\x00' 's\x05\x00\x00\x00hellof\x04-0.5(\x03\x00\x00\x00' 'i\x07\x00\x00\x00s\x05\x00\x00\x00world' - 'f\x061e+100') + 'f\x061e+100' + '[\x01\x00\x00\x00(\x03\x00\x00\x00i\x05\x00\x00\x00' + 's\x05\x00\x00\x00helloi\x01\x00\x00\x00') else: assert res == ('[\x02\x00\x00\x00(\x03\x00\x00\x00' 'I\x05\x00\x00\x00\x00\x00\x00\x00' 's\x05\x00\x00\x00hellof\x04-0.5(\x03\x00\x00\x00' 'I\x07\x00\x00\x00\x00\x00\x00\x00' 's\x05\x00\x00\x00world' - 'f\x061e+100') + 'f\x061e+100' + '[\x01\x00\x00\x00(\x03\x00\x00\x00' + 'I\x05\x00\x00\x00\x00\x00\x00\x00' + 's\x05\x00\x00\x00hello' + 'I\x01\x00\x00\x00\x00\x00\x00\x00') def test_llinterp_unmarshal(): from rpython.rtyper.test.test_llinterp import interpret From pypy.commits at gmail.com Thu Sep 1 05:50:12 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Sep 2016 02:50:12 -0700 (PDT) Subject: [pypy-commit] pypy default: Don't put an __init__ in a Test class, it makes py.test skip(!?!!) that class Message-ID: <57c7f9d4.94a51c0a.bea70.9ff3@mx.google.com> Author: Armin Rigo Branch: Changeset: r86813:8e7fe0c9ab70 Date: 2016-09-01 11:49 +0200 http://bitbucket.org/pypy/pypy/changeset/8e7fe0c9ab70/ Log: Don't put an __init__ in a Test class, it makes py.test skip(!?!!) that class diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -286,7 +286,7 @@ class TestUTF8Decoding(UnicodeTests): - def __init__(self): + def setup_method(self, meth): self.decoder = self.getdecoder('utf-8') def to_bytestring(self, bytes): From pypy.commits at gmail.com Thu Sep 1 05:55:15 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Sep 2016 02:55:15 -0700 (PDT) Subject: [pypy-commit] pypy default: Rename the exception that we call TestException to some class name that Message-ID: <57c7fb03.822e1c0a.bb2f3.f2a6@mx.google.com> Author: Armin Rigo Branch: Changeset: r86814:955998701207 Date: 2016-09-01 11:02 +0100 http://bitbucket.org/pypy/pypy/changeset/955998701207/ Log: Rename the exception that we call TestException to some class name that doesn't start with Test, which confuses py.test diff --git a/rpython/translator/c/test/test_exception.py b/rpython/translator/c/test/test_exception.py --- a/rpython/translator/c/test/test_exception.py +++ b/rpython/translator/c/test/test_exception.py @@ -9,7 +9,7 @@ getcompiledopt = test_backendoptimized.TestTypedOptimizedTestCase().getcompiled -class TestException(Exception): +class InTestException(Exception): pass class MyException(Exception): @@ -18,7 +18,7 @@ def test_simple1(): def raise_(i): if i == 0: - raise TestException() + raise InTestException() elif i == 1: raise MyException() else: @@ -29,7 +29,7 @@ b = raise_(i) + 12 c = raise_(i) + 13 return a+b+c - except TestException: + except InTestException: return 7 except MyException: return 123 From pypy.commits at gmail.com Thu Sep 1 05:55:17 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Sep 2016 02:55:17 -0700 (PDT) Subject: [pypy-commit] pypy default: Hack at our included py.test to fail, not quietly skip, when it sees a Message-ID: <57c7fb05.87adc20a.c9daa.4b81@mx.google.com> Author: Armin Rigo Branch: Changeset: r86815:0ace73a20a81 Date: 2016-09-01 11:03 +0100 http://bitbucket.org/pypy/pypy/changeset/0ace73a20a81/ Log: Hack at our included py.test to fail, not quietly skip, when it sees a TestXxx class with an __init__() method diff --git a/_pytest/python.py b/_pytest/python.py --- a/_pytest/python.py +++ b/_pytest/python.py @@ -498,7 +498,10 @@ """ Collector for test methods. """ def collect(self): if hasinit(self.obj): - pytest.skip("class %s.%s with __init__ won't get collected" % ( + # XXX used to be skip(), but silently skipping classes + # XXX just because they have been written long ago is + # XXX imho a very, very, very bad idea + pytest.fail("class %s.%s with __init__ won't get collected" % ( self.obj.__module__, self.obj.__name__, )) From pypy.commits at gmail.com Thu Sep 1 06:36:54 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Sep 2016 03:36:54 -0700 (PDT) Subject: [pypy-commit] pypy default: Move the bit checking inside helpers, share it from the two places Message-ID: <57c804c6.c186c20a.99002.8175@mx.google.com> Author: Armin Rigo Branch: Changeset: r86817:ee3a2fbec01a Date: 2016-09-01 12:36 +0200 http://bitbucket.org/pypy/pypy/changeset/ee3a2fbec01a/ Log: Move the bit checking inside helpers, share it from the two places diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -137,6 +137,25 @@ result=result) return result.build(), pos +def _invalid_cont_byte(ordch): + return ordch>>6 != 0x2 # 0b10 + +_invalid_byte_2_of_2 = _invalid_cont_byte +_invalid_byte_3_of_3 = _invalid_cont_byte +_invalid_byte_3_of_4 = _invalid_cont_byte +_invalid_byte_4_of_4 = _invalid_cont_byte + +def _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): + return (ordch2>>6 != 0x2 or # 0b10 + (ordch1 == 0xe0 and ordch2 < 0xa0) + # surrogates shouldn't be valid UTF-8! + or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f)) + +def _invalid_byte_2_of_4(ordch1, ordch2): + return (ordch2>>6 != 0x2 or # 0b10 + (ordch1 == 0xf0 and ordch2 < 0x90) or + (ordch1 == 0xf4 and ordch2 > 0x8f)) + @specialize.argtype(6) def str_decode_utf_8_impl(s, size, errors, final, errorhandler, allow_surrogates, result): @@ -173,10 +192,7 @@ ordch2 = ord(s[pos+1]) if n == 3: # 3-bytes seq with only a continuation byte - if (ordch2>>6 != 0x2 or # 0b10 - (ordch1 == 0xe0 and ordch2 < 0xa0) - or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f) - ): + if _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): # second byte invalid, take the first and continue r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', @@ -192,16 +208,14 @@ continue elif n == 4: # 4-bytes seq with 1 or 2 continuation bytes - if (ordch2>>6 != 0x2 or # 0b10 - (ordch1 == 0xf0 and ordch2 < 0x90) or - (ordch1 == 0xf4 and ordch2 > 0x8f)): + if _invalid_byte_2_of_4(ordch1, ordch2): # second byte invalid, take the first and continue r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue - elif charsleft == 2 and ord(s[pos+2])>>6 != 0x2: # 0b10 + elif charsleft == 2 and _invalid_byte_3_of_4(ord(s[pos+2])): # third byte invalid, take the first two and continue r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', @@ -228,7 +242,7 @@ elif n == 2: ordch2 = ord(s[pos+1]) - if ordch2>>6 != 0x2: # 0b10 + if _invalid_byte_2_of_2(ordch2): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) @@ -242,17 +256,13 @@ elif n == 3: ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) - if (ordch2>>6 != 0x2 or # 0b10 - (ordch1 == 0xe0 and ordch2 < 0xa0) - # surrogates shouldn't be valid UTF-8! - or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f) - ): + if _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0x2: # 0b10 + elif _invalid_byte_3_of_3(ordch3): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+2) @@ -268,21 +278,19 @@ ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) ordch4 = ord(s[pos+3]) - if (ordch2>>6 != 0x2 or # 0b10 - (ordch1 == 0xf0 and ordch2 < 0x90) or - (ordch1 == 0xf4 and ordch2 > 0x8f)): + if _invalid_byte_2_of_4(ordch1, ordch2): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0x2: # 0b10 + elif _invalid_byte_3_of_4(ordch3): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue - elif ordch4>>6 != 0x2: # 0b10 + elif _invalid_byte_4_of_4(ordch4): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+3) From pypy.commits at gmail.com Thu Sep 1 06:36:52 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Sep 2016 03:36:52 -0700 (PDT) Subject: [pypy-commit] pypy default: Issue #2389: the custom error handler may return a 'pos' that is smaller Message-ID: <57c804c4.e2efc20a.1a16b.83ef@mx.google.com> Author: Armin Rigo Branch: Changeset: r86816:e9dd5882eed6 Date: 2016-09-01 12:23 +0200 http://bitbucket.org/pypy/pypy/changeset/e9dd5882eed6/ Log: Issue #2389: the custom error handler may return a 'pos' that is smaller than 'size', in which case we need to continue looping diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -157,22 +157,26 @@ if pos + n > size: if not final: break + # argh, this obscure block of code is mostly a copy of + # what follows :-( charsleft = size - pos - 1 # either 0, 1, 2 - # note: when we get the 'unexpected end of data' we don't care - # about the pos anymore and we just ignore the value + # note: when we get the 'unexpected end of data' we need + # to care about the pos returned; it can be lower than size, + # in case we need to continue running this loop if not charsleft: # there's only the start byte and nothing else r, pos = errorhandler(errors, 'utf8', 'unexpected end of data', s, pos, pos+1) result.append(r) - break + continue ordch2 = ord(s[pos+1]) if n == 3: # 3-bytes seq with only a continuation byte if (ordch2>>6 != 0x2 or # 0b10 - (ordch1 == 0xe0 and ordch2 < 0xa0)): - # or (ordch1 == 0xed and ordch2 > 0x9f) + (ordch1 == 0xe0 and ordch2 < 0xa0) + or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f) + ): # second byte invalid, take the first and continue r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', @@ -185,7 +189,7 @@ 'unexpected end of data', s, pos, pos+2) result.append(r) - break + continue elif n == 4: # 4-bytes seq with 1 or 2 continuation bytes if (ordch2>>6 != 0x2 or # 0b10 @@ -210,7 +214,8 @@ 'unexpected end of data', s, pos, pos+charsleft+1) result.append(r) - break + continue + raise AssertionError("unreachable") if n == 0: r, pos = errorhandler(errors, 'utf8', diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -289,6 +289,12 @@ def setup_method(self, meth): self.decoder = self.getdecoder('utf-8') + def custom_replace(self, errors, encoding, msg, s, startingpos, endingpos): + assert errors == 'custom' + # returns FOO, but consumes only one character (not up to endingpos) + FOO = u'\u1234' + return FOO, startingpos + 1 + def to_bytestring(self, bytes): return ''.join(chr(int(c, 16)) for c in bytes.split()) @@ -309,6 +315,7 @@ E.g. <80> is a continuation byte and can appear only after a start byte. """ FFFD = u'\ufffd' + FOO = u'\u1234' for byte in '\x80\xA0\x9F\xBF\xC0\xC1\xF5\xFF': py.test.raises(UnicodeDecodeError, self.decoder, byte, 1, None, final=True) self.checkdecodeerror(byte, 'utf-8', 0, 1, addstuff=False, @@ -320,6 +327,11 @@ assert self.decoder(byte, 1, 'ignore', final=True) == (u'', 1) assert (self.decoder('aaaa' + byte + 'bbbb', 9, 'ignore', final=True) == (u'aaaabbbb', 9)) + assert self.decoder(byte, 1, 'custom', final=True, + errorhandler=self.custom_replace) == (FOO, 1) + assert (self.decoder('aaaa' + byte + 'bbbb', 9, 'custom', + final=True, errorhandler=self.custom_replace) == + (u'aaaa'+ FOO + u'bbbb', 9)) def test_unexpected_end_of_data(self): """ @@ -343,6 +355,7 @@ 'F4 80', 'F4 8F', 'F4 80 80', 'F4 80 BF', 'F4 8F 80', 'F4 8F BF' ] FFFD = u'\ufffd' + FOO = u'\u1234' for seq in sequences: seq = self.to_bytestring(seq) py.test.raises(UnicodeDecodeError, self.decoder, seq, len(seq), @@ -358,6 +371,12 @@ ) == (u'', len(seq)) assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, 'ignore', final=True) == (u'aaaabbbb', len(seq) + 8)) + assert (self.decoder(seq, len(seq), 'custom', final=True, + errorhandler=self.custom_replace) == + (FOO * len(seq), len(seq))) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, 'custom', + final=True, errorhandler=self.custom_replace) == + (u'aaaa'+ FOO * len(seq) + u'bbbb', len(seq) + 8)) def test_invalid_cb_for_2bytes_seq(self): """ From pypy.commits at gmail.com Thu Sep 1 11:11:09 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 01 Sep 2016 08:11:09 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: implement view of __array_interface__, fixes issue #52, #53 on pypy/numpy Message-ID: <57c8450d.87adc20a.c9daa.da07@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86818:4916eb438de5 Date: 2016-09-01 16:50 +0300 http://bitbucket.org/pypy/pypy/changeset/4916eb438de5/ Log: implement view of __array_interface__, fixes issue #52, #53 on pypy/numpy diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -82,13 +82,18 @@ raise oefmt(space.w_ValueError, "__array_interface__ could not decode dtype %R", w_dtype ) - if w_data is not None and (space.isinstance_w(w_data, space.w_tuple) or space.isinstance_w(w_data, space.w_list)): + if w_data is not None and (space.isinstance_w(w_data, space.w_tuple) or + space.isinstance_w(w_data, space.w_list)): data_w = space.listview(w_data) w_data = rffi.cast(RAW_STORAGE_PTR, space.int_w(data_w[0])) - read_only = True # XXX why not space.is_true(data_w[1]) + read_only = space.is_true(data_w[1]) offset = 0 + w_base = w_object + if read_only: + w_base = None return W_NDimArray.from_shape_and_storage(space, shape, w_data, - dtype, strides=strides, start=offset), read_only + dtype, w_base=w_base, strides=strides, + start=offset), read_only if w_data is None: w_data = w_object w_offset = space.finditem(w_interface, space.wrap('offset')) From pypy.commits at gmail.com Thu Sep 1 11:11:11 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 01 Sep 2016 08:11:11 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: extend and fix the already-existing test for __array_interfac__ Message-ID: <57c8450f.262ec20a.f163a.f81a@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86819:8c587f75370f Date: 2016-09-01 17:56 +0300 http://bitbucket.org/pypy/pypy/changeset/8c587f75370f/ Log: extend and fix the already-existing test for __array_interfac__ diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -43,7 +43,7 @@ raise oefmt(space.w_ValueError, "object __array__ method not producing an array") -def try_interface_method(space, w_object): +def try_interface_method(space, w_object, copy): try: w_interface = space.getattr(w_object, space.wrap("__array_interface__")) if w_interface is None: @@ -86,7 +86,7 @@ space.isinstance_w(w_data, space.w_list)): data_w = space.listview(w_data) w_data = rffi.cast(RAW_STORAGE_PTR, space.int_w(data_w[0])) - read_only = space.is_true(data_w[1]) + read_only = space.is_true(data_w[1]) or copy offset = 0 w_base = w_object if read_only: @@ -203,7 +203,7 @@ # use buffer interface w_object = _array_from_buffer_3118(space, w_object, dtype) if not isinstance(w_object, W_NDimArray): - w_array, _copy = try_interface_method(space, w_object) + w_array, _copy = try_interface_method(space, w_object, copy) if w_array is not None: w_object = w_array copy = _copy diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3215,7 +3215,9 @@ raises(TypeError, array, Dummy({'version': 3, 'typestr': 'f8', 'shape': ('a', 3)})) a = array([1, 2, 3]) - b = array(Dummy(a.__array_interface__)) + d = Dummy(a.__array_interface__) + b = array(d) + assert b.base is None b[1] = 200 assert a[1] == 2 # upstream compatibility, is this a bug? interface_a = a.__array_interface__ @@ -3226,6 +3228,8 @@ interface_b.pop('data') interface_a.pop('data') assert interface_a == interface_b + b = array(d, copy=False) + assert b.base is d b = array(Dummy({'version':3, 'shape': (50,), 'typestr': 'u1', 'data': 'a'*100})) From pypy.commits at gmail.com Thu Sep 1 11:45:16 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 01 Sep 2016 08:45:16 -0700 (PDT) Subject: [pypy-commit] pypy redirect-assembler-jitlog: new tag tmp_callback to correctly make the connection between call_assembler <-> trace Message-ID: <57c84d0c.94071c0a.1c406.43e9@mx.google.com> Author: Richard Plangger Branch: redirect-assembler-jitlog Changeset: r86821:a2f692a70b2d Date: 2016-09-01 17:44 +0200 http://bitbucket.org/pypy/pypy/changeset/a2f692a70b2d/ Log: new tag tmp_callback to correctly make the connection between call_assembler <-> trace diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -7,6 +7,7 @@ from rpython.rlib import rstack from rpython.rlib.jit import JitDebugInfo, Counters, dont_look_inside from rpython.rlib.rjitlog import rjitlog as jl +from rpython.rlib.objectmodel import compute_unique_id from rpython.conftest import option from rpython.jit.metainterp.resoperation import ResOperation, rop,\ @@ -1156,6 +1157,9 @@ operations[1].setfailargs([]) operations = get_deep_immutable_oplist(operations) cpu.compile_loop(inputargs, operations, jitcell_token, log=False) + + jl.tmp_callback(looptoken) + if memory_manager is not None: # for tests memory_manager.keep_loop_alive(jitcell_token) return jitcell_token diff --git a/rpython/rlib/rjitlog/rjitlog.py b/rpython/rlib/rjitlog/rjitlog.py --- a/rpython/rlib/rjitlog/rjitlog.py +++ b/rpython/rlib/rjitlog/rjitlog.py @@ -212,7 +212,7 @@ return method return decor -JITLOG_VERSION = 3 +JITLOG_VERSION = 4 JITLOG_VERSION_16BIT_LE = struct.pack(" Author: Richard Plangger Branch: redirect-assembler-jitlog Changeset: r86820:a8a56628fefe Date: 2016-09-01 15:57 +0200 http://bitbucket.org/pypy/pypy/changeset/a8a56628fefe/ Log: merge default diff too long, truncating to 2000 out of 212851 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -27,3 +27,6 @@ 40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2 c09c19272c990a0611b17569a0085ad1ab00c8ff release-pypy2.7-v5.3 7e8df3df96417c16c2d55b41352ec82c9c69c978 release-pypy2.7-v5.3.1 +68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 +68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 +77392ad263504df011ccfcabf6a62e21d04086d0 release-pypy2.7-v5.4.0 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -74,6 +74,7 @@ Seo Sanghyeon Ronny Pfannschmidt Justin Peel + Raffael Tfirst David Edelsohn Anders Hammarquist Jakub Gustak @@ -117,7 +118,6 @@ Wenzhu Man John Witulski Laurence Tratt - Raffael Tfirst Ivan Sichmann Freitas Greg Price Dario Bertini @@ -141,6 +141,7 @@ tav Taavi Burns Georg Brandl + Nicolas Truessel Bert Freudenberg Stian Andreassen Wanja Saatkamp @@ -211,6 +212,7 @@ Vaibhav Sood Alan McIntyre Alexander Sedov + p_zieschang at yahoo.de Attila Gobi Jasper.Schulz Christopher Pope @@ -221,6 +223,7 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + touilleMan Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -229,12 +232,14 @@ Gabriel Lukas Vacek Kunal Grover + Aaron Gallagher Andrew Dalke Sylvain Thenault Jakub Stasiak Nathan Taylor Vladimir Kryachko Omer Katz + Mark Williams Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -355,115 +360,12 @@ yasirs Michael Chermside Anna Ravencroft + pizi Andrey Churin Dan Crosta + Eli Stevens Tobias Diaz Julien Phalip Roman Podoliaka Dan Loewenherz - - Heinrich-Heine University, Germany - Open End AB (formerly AB Strakt), Sweden - merlinux GmbH, Germany - tismerysoft GmbH, Germany - Logilab Paris, France - DFKI GmbH, Germany - Impara, Germany - Change Maker, Sweden - University of California Berkeley, USA - Google Inc. - King's College London - -The PyPy Logo as used by http://speed.pypy.org and others was created -by Samuel Reis and is distributed on terms of Creative Commons Share Alike -License. - -License for 'lib-python/2.7' -============================ - -Except when otherwise stated (look for LICENSE files or copyright/license -information at the beginning of each file) the files in the 'lib-python/2.7' -directory are all copyrighted by the Python Software Foundation and licensed -under the terms that you can find here: https://docs.python.org/2/license.html - -License for 'pypy/module/unicodedata/' -====================================== - -The following files are from the website of The Unicode Consortium -at http://www.unicode.org/. For the terms of use of these files, see -http://www.unicode.org/terms_of_use.html . Or they are derived from -files from the above website, and the same terms of use apply. - - CompositionExclusions-*.txt - EastAsianWidth-*.txt - LineBreak-*.txt - UnicodeData-*.txt - UnihanNumeric-*.txt - -License for 'dotviewer/font/' -============================= - -Copyright (C) 2008 The Android Open Source Project - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -Detailed license information is contained in the NOTICE file in the -directory. - - -Licenses and Acknowledgements for Incorporated Software -======================================================= - -This section is an incomplete, but growing list of licenses and -acknowledgements for third-party software incorporated in the PyPy -distribution. - -License for 'Tcl/Tk' --------------------- - -This copy of PyPy contains library code that may, when used, result in -the Tcl/Tk library to be loaded. PyPy also includes code that may be -regarded as being a copy of some parts of the Tcl/Tk header files. -You may see a copy of the License for Tcl/Tk in the file -`lib_pypy/_tkinter/license.terms` included here. - -License for 'bzip2' -------------------- - -This copy of PyPy may be linked (dynamically or statically) with the -bzip2 library. You may see a copy of the License for bzip2/libbzip2 at - - http://www.bzip.org/1.0.5/bzip2-manual-1.0.5.html - -License for 'openssl' ---------------------- - -This copy of PyPy may be linked (dynamically or statically) with the -openssl library. You may see a copy of the License for OpenSSL at - - https://www.openssl.org/source/license.html - -License for 'gdbm' ------------------- - -The gdbm module includes code from gdbm.h, which is distributed under -the terms of the GPL license version 2 or any later version. Thus the -gdbm module, provided in the file lib_pypy/gdbm.py, is redistributed -under the terms of the GPL license as well. - -License for 'rpython/rlib/rvmprof/src' --------------------------------------- - -The code is based on gperftools. You may see a copy of the License for it at - - https://github.com/gperftools/gperftools/blob/master/COPYING + werat diff --git a/_pytest/python.py b/_pytest/python.py --- a/_pytest/python.py +++ b/_pytest/python.py @@ -498,7 +498,10 @@ """ Collector for test methods. """ def collect(self): if hasinit(self.obj): - pytest.skip("class %s.%s with __init__ won't get collected" % ( + # XXX used to be skip(), but silently skipping classes + # XXX just because they have been written long ago is + # XXX imho a very, very, very bad idea + pytest.fail("class %s.%s with __init__ won't get collected" % ( self.obj.__module__, self.obj.__name__, )) diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -122,22 +122,24 @@ """Dummy method to let some easy_install packages that have optional C speedup components. """ + def customize(executable, flags): + command = compiler.executables[executable] + flags + setattr(compiler, executable, command) + if compiler.compiler_type == "unix": compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') if "CPPFLAGS" in os.environ: cppflags = shlex.split(os.environ["CPPFLAGS"]) - compiler.compiler.extend(cppflags) - compiler.compiler_so.extend(cppflags) - compiler.linker_so.extend(cppflags) + for executable in ('compiler', 'compiler_so', 'linker_so'): + customize(executable, cppflags) if "CFLAGS" in os.environ: cflags = shlex.split(os.environ["CFLAGS"]) - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) + for executable in ('compiler', 'compiler_so', 'linker_so'): + customize(executable, cflags) if "LDFLAGS" in os.environ: ldflags = shlex.split(os.environ["LDFLAGS"]) - compiler.linker_so.extend(ldflags) + customize('linker_so', ldflags) from sysconfig_cpython import ( diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -167,7 +167,7 @@ else: return self.value - def __buffer__(self): + def __buffer__(self, flags): return buffer(self._buffer) def _get_b_base(self): diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -342,7 +342,7 @@ thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) keepalives, newargs, argtypes, outargs, errcheckargs = ( self._convert_args(argtypes, args[1:], kwargs)) - newargs.insert(0, thisvalue.value) + newargs.insert(0, thisarg) argtypes.insert(0, c_void_p) else: thisarg = None diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -137,6 +137,8 @@ lib.gdbm_sync(self.__ll_dbm) def open(filename, flags='r', mode=0666): + if isinstance(filename, unicode): + filename = filename.encode() if flags[0] == 'r': iflags = lib.GDBM_READER elif flags[0] == 'w': diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -58,16 +58,16 @@ # General information about the project. project = u'PyPy' -copyright = u'2015, The PyPy Project' +copyright = u'2016, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '4.0' +version = '5.4' # The full version, including alpha/beta/rc tags. -release = '4.0.0' +release = '5.4.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -44,6 +44,7 @@ Seo Sanghyeon Ronny Pfannschmidt Justin Peel + Raffael Tfirst David Edelsohn Anders Hammarquist Jakub Gustak @@ -87,7 +88,6 @@ Wenzhu Man John Witulski Laurence Tratt - Raffael Tfirst Ivan Sichmann Freitas Greg Price Dario Bertini @@ -111,6 +111,7 @@ tav Taavi Burns Georg Brandl + Nicolas Truessel Bert Freudenberg Stian Andreassen Wanja Saatkamp @@ -181,6 +182,7 @@ Vaibhav Sood Alan McIntyre Alexander Sedov + p_zieschang at yahoo.de Attila Gobi Jasper.Schulz Christopher Pope @@ -191,6 +193,7 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + touilleMan Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -199,12 +202,14 @@ Gabriel Lukas Vacek Kunal Grover + Aaron Gallagher Andrew Dalke Sylvain Thenault Jakub Stasiak Nathan Taylor Vladimir Kryachko Omer Katz + Mark Williams Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -325,9 +330,12 @@ yasirs Michael Chermside Anna Ravencroft + pizi Andrey Churin Dan Crosta + Eli Stevens Tobias Diaz Julien Phalip Roman Podoliaka Dan Loewenherz + werat diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-pypy2.7-v5.4.0.rst release-pypy2.7-v5.3.1.rst release-pypy2.7-v5.3.0.rst release-5.1.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-pypy2-5.4.0.rst whatsnew-pypy2-5.3.1.rst whatsnew-pypy2-5.3.0.rst whatsnew-5.1.0.rst diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -57,7 +57,7 @@ -------------- Our cpyext C-API compatiblity layer can now run upstream NumPy unmodified. -Release PyPy2.7-v5.3 still fails about 200 of the ~6000 test in the NumPy +Release PyPy2.7-v5.4 still fails about 60 of the ~6000 test in the NumPy test suite. We could use help analyzing the failures and fixing them either as patches to upstream NumPy, or as fixes to PyPy. diff --git a/pypy/doc/release-pypy2.7-v5.4.0.rst b/pypy/doc/release-pypy2.7-v5.4.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-pypy2.7-v5.4.0.rst @@ -0,0 +1,218 @@ +============ +PyPy2.7 v5.4 +============ + +We have released PyPy2.7 v5.4, a little under two months after PyPy2.7 v5.3. +This new PyPy2.7 release includes incremental improvements to our C-API +compatability layer (cpyext), enabling us to pass over 99% of the upstream +numpy `test suite`_. We updated built-in cffi_ support to version 1.8, +which now supports the "limited API" mode for c-extensions on +CPython >=3.2. + +We improved tooling for the PyPy JIT_, and expanded VMProf +support to OpenBSD and Dragon Fly BSD + +As always, this release fixed many issues and bugs raised by the +growing community of PyPy users. We strongly recommend updating. + +You can download the PyPy2.7 v5.4 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +.. _`test suite`: https://bitbucket.org/pypy/pypy/wiki/Adventures%20in%20cpyext%20compatibility +.. _cffi: https://cffi.readthedocs.org +.. _JIT: https://morepypy.blogspot.com.au/2016/08/pypy-tooling-upgrade-jitviewer-and.html +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other `dynamic languages`_ to see what RPython +can do for them. + +This release supports: + + * **x86** machines on most common operating systems + (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD) + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s390x** running Linux + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 5.3 released in June 2016) +========================================================= + +* New features: + + * Add `sys.{get,set}dlopenflags` + + * Improve CPython compatibility of 'is' for small and empty strings + + * Support for rgc.FinalizerQueue in the Boehm garbage collector + + * (RPython) support spawnv() if it is called in C `_spawnv` on windows + + * Fill in more slots when creating a PyTypeObject from a W_TypeObject, + like `__hex__`, `__sub__`, `__pow__` + + * Copy CPython's logic more closely for `isinstance()` and + `issubclass()` as well as `type.__instancecheck__()` and + `type.__subclasscheck__()` + + * Expose the name of CDLL objects + + * Rewrite the win32 dependencies of `subprocess` to use cffi + instead of ctypes + + * Improve the `JIT logging`_ facitilities + + * (RPython) make int * string work + + * Allocate all RPython strings with one extra byte, normally + unused. This now allows `ffi.from_buffer(string)` in CFFI with + no copy + + * Adds a new commandline option `-X track-resources` that will + produce a `ResourceWarning` when the GC closes a file or socket. + The traceback for the place where the file or socket was allocated + is given as well, which aids finding places where `close()` is + missing + + * Add missing `PyObject_Realloc`, `PySequence_GetSlice` + + * `type.__dict__` now returns a `dict_proxy` object, like on CPython. + Previously it returned what looked like a regular dict object (but + it was already read-only) + + * (RPython) add `rposix.{get,set}_inheritable()`, needed by Python 3.5 + + * (RPython) add `rposix_scandir` portably, needed for Python 3.5 + + * Increased but incomplete support for memoryview attributes (format, + itemsize, ...) which also adds support for `PyMemoryView_FromObject` + +* Bug Fixes + + * Reject `mkdir()` in read-only sandbox filesystems + + * Add include guards to pymem.h to enable c++ compilation + + * Fix build breakage on OpenBSD and FreeBSD + + * Support OpenBSD, Dragon Fly BSD in VMProf + + * Fix for `bytearray('').replace('a', 'ab')` for empty strings + + * Sync internal state before calling `PyFile_AsFile()` + + * Allow writing to a char* from `PyString_AsString()` until it is + forced, also refactor `PyStringObject` to look like CPython's + and allow subclassing `PyString_Type` and `PyUnicode_Type` + + * Rpython rffi's socket(2) wrapper did not preserve errno + + * Refactor `PyTupleObject` to look like CPython's and allow + subclassing `PyTuple_Type` + + * Allow c-level assignment to a function pointer in a C-API + user-defined type after calling PyTypeReady by retrieving + a pointer to the function via offsets + rather than storing the function pointer itself + + * Use `madvise(MADV_FREE)`, or if that doesn't exist + `MADV_DONTNEED` on freed arenas to release memory back to the + OS for resource monitoring + + * Fix overflow detection in conversion of float to 64-bit integer + in timeout argument to various thread/threading primitives + + * Fix win32 outputting `\r\r\n` in some cases + + * Make `hash(-1)` return -2, as CPython does, and fix all the + ancilary places this matters + + * Fix `PyNumber_Check()` to behave more like CPython + + * (VMProf) Try hard to not miss any Python-level frame in the + captured stacks, even if there is metainterp or blackhole interp + involved. Also fix the stacklet (greenlet) support + + * Fix a critical JIT bug where `raw_malloc` -equivalent functions + lost the additional flags + + * Fix the mapdict cache for subclasses of builtin types that + provide a dict + + * Issues reported with our previous release were resolved_ after + reports from users on our issue tracker at + https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy + +* Performance improvements: + + * Add a before_call()-like equivalent before a few operations like + `malloc_nursery`, to move values from registers into other registers + instead of to the stack. + + * More tightly pack the stack when calling with `release gil` + + * Support `int_floordiv()`, `int_mod()` in the JIT more efficiently + and add `rarithmetic.int_c_div()`, `rarithmetic.int_c_mod()` as + explicit interfaces. Clarify that `int_floordiv()` does python-style + rounding, unlike `llop.int_floordiv()`. + + * Use `ll_assert` (more often) in incminimark + + * (Testing) Simplify handling of interp-level tests and make it + more forward-compatible. Don't use interp-level RPython + machinery to test building app-level extensions in cpyext + + * Constant-fold `ffi.offsetof("structname", "fieldname")` in cffi + backend + + * Avoid a case in the JIT, where successive guard failures in + the same Python function end up as successive levels of + RPython functions, eventually exhausting the stack, while at + app-level the traceback is very short + + * Check for NULL returns from calls to the raw-malloc and raise, + rather than a guard + + * Improve `socket.recvfrom()` so that it copies less if possible + + * When generating C code, inline `goto` to blocks with only one + predecessor, generating less lines of code + + * When running the final backend-optimization phase before emitting + C code, constant-fold calls to we_are_jitted to return False. This + makes the generated C code a few percent smaller + + * Refactor the `uid_t/gid_t` handling in `rlib.rposix` and in + `interp_posix.py`, based on the clean-up of CPython 2.7.x + +.. _`JIT logging`: https://morepypy.blogspot.com/2016/08/pypy-tooling-upgrade-jitviewer-and.html +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.4.0.html + +Please update, and continue to help us make PyPy better. + +Cheers diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,152 +1,9 @@ ========================== -What's new in PyPy2.7 5.3+ +What's new in PyPy2.7 5.4+ ========================== -.. this is a revision shortly after release-pypy2.7-v5.3 -.. startrev: 873218a739f1 +.. this is a revision shortly after release-pypy2.7-v5.4 +.. startrev: 522736f816dc -.. 418b05f95db5 -Improve CPython compatibility for ``is``. Now code like ``if x is ():`` -works the same way as it does on CPython. See http://pypy.readthedocs.io/en/latest/cpython_differences.html#object-identity-of-primitive-values-is-and-id . - -.. pull request #455 -Add sys.{get,set}dlopenflags, for cpyext extensions. - -.. branch: fix-gen-dfa - -Resolves an issue with the generator script to build the dfa for Python syntax. - -.. branch: z196-support - -Fixes a critical issue in the register allocator and extends support on s390x. -PyPy runs and translates on the s390x revisions z10 (released February 2008, experimental) -and z196 (released August 2010) in addition to zEC12 and z13. -To target e.g. z196 on a zEC12 machine supply CFLAGS="-march=z196" to your shell environment. - -.. branch: s390x-5.3-catchup - -Implement the backend related changes for s390x. - -.. branch: incminimark-ll_assert -.. branch: vmprof-openbsd - -.. branch: testing-cleanup - -Simplify handling of interp-level tests and make it more forward- -compatible. - -.. branch: pyfile-tell -Sync w_file with the c-level FILE* before returning FILE* in PyFile_AsFile - -.. branch: rw-PyString_AS_STRING -Allow rw access to the char* returned from PyString_AS_STRING, also refactor -PyStringObject to look like cpython's and allow subclassing PyString_Type and -PyUnicode_Type - -.. branch: save_socket_errno - -Bug fix: if ``socket.socket()`` failed, the ``socket.error`` did not show -the errno of the failing system call, but instead some random previous -errno. - -.. branch: PyTuple_Type-subclass - -Refactor PyTupleObject to look like cpython's and allow subclassing -PyTuple_Type - -.. branch: call-via-pyobj - -Use offsets from PyTypeObject to find actual c function to call rather than -fixed functions, allows function override after PyType_Ready is called - -.. branch: issue2335 - -Avoid exhausting the stack in the JIT due to successive guard -failures in the same Python function ending up as successive levels of -RPython functions, while at app-level the traceback is very short - -.. branch: use-madv-free - -Try harder to memory to the OS. See e.g. issue #2336. Note that it does -not show up as a reduction of the VIRT column in ``top``, and the RES -column might also not show the reduction, particularly on Linux >= 4.5 or -on OS/X: it uses MADV_FREE, which only marks the pages as returnable to -the OS if the memory is low. - -.. branch: cpyext-slotdefs2 - -Fill in more slots when creating a PyTypeObject from a W_TypeObject -More slots are still TBD, like tp_print and richcmp - -.. branch: json-surrogates - -Align json module decode with the cpython's impl, fixes issue 2345 - -.. branch: issue2343 - -Copy CPython's logic more closely for handling of ``__instancecheck__()`` -and ``__subclasscheck__()``. Fixes issue 2343. - -.. branch: msvcrt-cffi - -Rewrite the Win32 dependencies of 'subprocess' to use cffi instead -of ctypes. This avoids importing ctypes in many small programs and -scripts, which in turn avoids enabling threads (because ctypes -creates callbacks at import time, and callbacks need threads). - -.. branch: new-jit-log - -The new logging facility that integrates with and adds features to vmprof.com. - -.. branch: jitlog-32bit - -Resolve issues to use the new logging facility on a 32bit system - -.. branch: ep2016sprint - -Trying harder to make hash(-1) return -2, like it does on CPython - -.. branch: jitlog-exact-source-lines - -Log exact line positions in debug merge points. - -.. branch: null_byte_after_str - -Allocate all RPython strings with one extra byte, normally unused. -It is used to hold a final zero in case we need some ``char *`` -representation of the string, together with checks like ``not -can_move()`` or object pinning. Main new thing that this allows: -``ffi.from_buffer(string)`` in CFFI. Additionally, and most -importantly, CFFI calls that take directly a string as argument don't -copy the string any more---this is like CFFI on CPython. - -.. branch: resource_warning - -Add a new command line option -X track-resources which will produce -ResourceWarnings when the GC closes unclosed files and sockets. - -.. branch: cpyext-realloc - -Implement PyObject_Realloc - -.. branch: inline-blocks - -Improve a little bit the readability of the generated C code - -.. branch: improve-vmprof-testing - -Improved vmprof support: now tries hard to not miss any Python-level -frame in the captured stacks, even if there is the metainterp or -blackhole interp involved. Also fix the stacklet (greenlet) support. - -.. branch: py2-mappingproxy - -``type.__dict__`` now returns a ``dict_proxy`` object, like on CPython. -Previously it returned what looked like a regular dict object (but it -was already read-only). - - -.. branch: const-fold-we-are-jitted - -Reduce the size of the generated C code by constant-folding ``we_are_jitted`` -in non-jitcode. +.. branch: rpython-resync +Backport rpython changes made directly on the py3k and py3.5 branches. diff --git a/pypy/doc/whatsnew-pypy2-5.4.0.rst b/pypy/doc/whatsnew-pypy2-5.4.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-pypy2-5.4.0.rst @@ -0,0 +1,165 @@ +========================= +What's new in PyPy2.7 5.4 +========================= + +.. this is a revision shortly after release-pypy2.7-v5.3 +.. startrev: 873218a739f1 + +.. 418b05f95db5 +Improve CPython compatibility for ``is``. Now code like ``if x is ():`` +works the same way as it does on CPython. See http://pypy.readthedocs.io/en/latest/cpython_differences.html#object-identity-of-primitive-values-is-and-id . + +.. pull request #455 +Add sys.{get,set}dlopenflags, for cpyext extensions. + +.. branch: fix-gen-dfa + +Resolves an issue with the generator script to build the dfa for Python syntax. + +.. branch: z196-support + +Fixes a critical issue in the register allocator and extends support on s390x. +PyPy runs and translates on the s390x revisions z10 (released February 2008, experimental) +and z196 (released August 2010) in addition to zEC12 and z13. +To target e.g. z196 on a zEC12 machine supply CFLAGS="-march=z196" to your shell environment. + +.. branch: s390x-5.3-catchup + +Implement the backend related changes for s390x. + +.. branch: incminimark-ll_assert +.. branch: vmprof-openbsd + +.. branch: testing-cleanup + +Simplify handling of interp-level tests and make it more forward- +compatible. + +.. branch: pyfile-tell +Sync w_file with the c-level FILE* before returning FILE* in PyFile_AsFile + +.. branch: rw-PyString_AS_STRING +Allow rw access to the char* returned from PyString_AS_STRING, also refactor +PyStringObject to look like cpython's and allow subclassing PyString_Type and +PyUnicode_Type + +.. branch: save_socket_errno + +Bug fix: if ``socket.socket()`` failed, the ``socket.error`` did not show +the errno of the failing system call, but instead some random previous +errno. + +.. branch: PyTuple_Type-subclass + +Refactor PyTupleObject to look like cpython's and allow subclassing +PyTuple_Type + +.. branch: call-via-pyobj + +Use offsets from PyTypeObject to find actual c function to call rather than +fixed functions, allows function override after PyType_Ready is called + +.. branch: issue2335 + +Avoid exhausting the stack in the JIT due to successive guard +failures in the same Python function ending up as successive levels of +RPython functions, while at app-level the traceback is very short + +.. branch: use-madv-free + +Try harder to memory to the OS. See e.g. issue #2336. Note that it does +not show up as a reduction of the VIRT column in ``top``, and the RES +column might also not show the reduction, particularly on Linux >= 4.5 or +on OS/X: it uses MADV_FREE, which only marks the pages as returnable to +the OS if the memory is low. + +.. branch: cpyext-slotdefs2 + +Fill in more slots when creating a PyTypeObject from a W_TypeObject +More slots are still TBD, like tp_print and richcmp + +.. branch: json-surrogates + +Align json module decode with the cpython's impl, fixes issue 2345 + +.. branch: issue2343 + +Copy CPython's logic more closely for handling of ``__instancecheck__()`` +and ``__subclasscheck__()``. Fixes issue 2343. + +.. branch: msvcrt-cffi + +Rewrite the Win32 dependencies of 'subprocess' to use cffi instead +of ctypes. This avoids importing ctypes in many small programs and +scripts, which in turn avoids enabling threads (because ctypes +creates callbacks at import time, and callbacks need threads). + +.. branch: new-jit-log + +The new logging facility that integrates with and adds features to vmprof.com. + +.. branch: jitlog-32bit + +Resolve issues to use the new logging facility on a 32bit system + +.. branch: ep2016sprint + +Trying harder to make hash(-1) return -2, like it does on CPython + +.. branch: jitlog-exact-source-lines + +Log exact line positions in debug merge points. + +.. branch: null_byte_after_str + +Allocate all RPython strings with one extra byte, normally unused. +It is used to hold a final zero in case we need some ``char *`` +representation of the string, together with checks like ``not +can_move()`` or object pinning. Main new thing that this allows: +``ffi.from_buffer(string)`` in CFFI. Additionally, and most +importantly, CFFI calls that take directly a string as argument don't +copy the string any more---this is like CFFI on CPython. + +.. branch: resource_warning + +Add a new command line option -X track-resources which will produce +ResourceWarnings when the GC closes unclosed files and sockets. + +.. branch: cpyext-realloc + +Implement PyObject_Realloc + +.. branch: inline-blocks + +Improve a little bit the readability of the generated C code + +.. branch: improve-vmprof-testing + +Improved vmprof support: now tries hard to not miss any Python-level +frame in the captured stacks, even if there is the metainterp or +blackhole interp involved. Also fix the stacklet (greenlet) support. + +.. branch: py2-mappingproxy + +``type.__dict__`` now returns a ``dict_proxy`` object, like on CPython. +Previously it returned what looked like a regular dict object (but it +was already read-only). + + +.. branch: const-fold-we-are-jitted + +Reduce the size of the generated C code by constant-folding ``we_are_jitted`` +in non-jitcode. + +.. branch: memoryview-attributes + +Support for memoryview attributes (format, itemsize, ...). +Extends the cpyext emulation layer. + +.. branch: redirect-assembler-jitlog + +Log more information to properly rebuild the redirected traces in jitviewer. + +.. branch: cpyext-subclass + +Copy Py_TPFLAGS_CHECKTYPES, Py_TPFLAGS_HAVE_INPLACEOPS when inheriting diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -208,7 +208,8 @@ def buffer_w(self, space, flags): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) + w_result = space.get_and_call_function(w_impl, self, + space.newint(flags)) if space.isinstance_w(w_result, space.w_buffer): return w_result.buffer_w(space, flags) raise BufferInterfaceNotFound @@ -216,7 +217,8 @@ def readbuf_w(self, space): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) + w_result = space.get_and_call_function(w_impl, self, + space.newint(space.BUF_FULL_RO)) if space.isinstance_w(w_result, space.w_buffer): return w_result.readbuf_w(space) raise BufferInterfaceNotFound @@ -224,7 +226,8 @@ def writebuf_w(self, space): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) + w_result = space.get_and_call_function(w_impl, self, + space.newint(space.BUF_FULL)) if space.isinstance_w(w_result, space.w_buffer): return w_result.writebuf_w(space) raise BufferInterfaceNotFound @@ -232,7 +235,8 @@ def charbuf_w(self, space): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) + w_result = space.get_and_call_function(w_impl, self, + space.newint(space.BUF_FULL_RO)) if space.isinstance_w(w_result, space.w_buffer): return w_result.charbuf_w(space) raise BufferInterfaceNotFound diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -23,6 +23,14 @@ self.w_objtype = w_type self.w_self = w_obj_or_type + def descr_repr(self, space): + if self.w_objtype is not None: + objtype_name = "<%s object>" % self.w_objtype.getname(space) + else: + objtype_name = 'NULL' + return space.wrap(", %s>" % ( + self.w_starttype.getname(space), objtype_name)) + def get(self, space, w_obj, w_type=None): if self.w_self is None or space.is_w(w_obj, space.w_None): return self @@ -84,7 +92,10 @@ 'super', __new__ = generic_new_descr(W_Super), __init__ = interp2app(W_Super.descr_init), + __repr__ = interp2app(W_Super.descr_repr), __thisclass__ = interp_attrproperty_w("w_starttype", W_Super), + __self__ = interp_attrproperty_w("w_self", W_Super), + __self_class__ = interp_attrproperty_w("w_objtype", W_Super), __getattribute__ = interp2app(W_Super.getattribute), __get__ = interp2app(W_Super.get), __doc__ = """\ diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -38,6 +38,8 @@ class W_ClassObject(W_Root): + _immutable_fields_ = ['bases_w?[*]', 'w_dict?'] + def __init__(self, space, w_name, bases, w_dict): self.name = space.str_w(w_name) make_sure_not_resized(bases) @@ -75,6 +77,7 @@ "__bases__ items must be classes") self.bases_w = bases_w + @jit.unroll_safe def is_subclass_of(self, other): assert isinstance(other, W_ClassObject) if self is other: @@ -313,7 +316,7 @@ # This method ignores the instance dict and the __getattr__. # Returns None if not found. assert isinstance(name, str) - w_value = self.w_class.lookup(space, name) + w_value = jit.promote(self.w_class).lookup(space, name) if w_value is None: return None w_descr_get = space.lookup(w_value, '__get__') diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -250,6 +250,24 @@ assert super(B, B()).__thisclass__ is B assert super(A, B()).__thisclass__ is A + def test_super_self_selfclass(self): + class A(object): + pass + class B(A): + pass + b = B() + assert super(A, b).__self__ is b + assert super(A).__self__ is None + assert super(A, b).__self_class__ is B + assert super(A).__self_class__ is None + + def test_super_repr(self): + class A(object): + def __repr__(self): + return super(A, self).__repr__() + '!' + assert repr(A()).endswith('>!') + assert repr(super(A, A())) == ", >" + def test_property_docstring(self): assert property.__doc__.startswith('property') diff --git a/pypy/module/_sre/__init__.py b/pypy/module/_sre/__init__.py --- a/pypy/module/_sre/__init__.py +++ b/pypy/module/_sre/__init__.py @@ -1,4 +1,4 @@ -from pypy.interpreter.mixedmodule import MixedModule +from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): @@ -7,7 +7,7 @@ interpleveldefs = { 'CODESIZE': 'space.wrap(interp_sre.CODESIZE)', - 'MAGIC': 'space.wrap(interp_sre.MAGIC)', + 'MAGIC': 'space.newint(20031017)', 'MAXREPEAT': 'space.wrap(interp_sre.MAXREPEAT)', 'compile': 'interp_sre.W_SRE_Pattern', 'getlower': 'interp_sre.w_getlower', diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -14,7 +14,7 @@ # Constants and exposed functions from rpython.rlib.rsre import rsre_core -from rpython.rlib.rsre.rsre_char import MAGIC, CODESIZE, MAXREPEAT, getlower, set_unicode_db +from rpython.rlib.rsre.rsre_char import CODESIZE, MAXREPEAT, getlower, set_unicode_db @unwrap_spec(char_ord=int, flags=int) diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -358,9 +358,15 @@ elif typ == rwinreg.REG_SZ or typ == rwinreg.REG_EXPAND_SZ: if not buflen: - return space.wrap("") - s = rffi.charp2strn(rffi.cast(rffi.CCHARP, buf), buflen) - return space.wrap(s) + s = "" + else: + # may or may not have a trailing NULL in the buffer. + buf = rffi.cast(rffi.CCHARP, buf) + if buf[buflen - 1] == '\x00': + buflen -= 1 + s = rffi.charp2strn(buf, buflen) + w_s = space.wrap(s) + return space.call_method(w_s, 'decode', space.wrap('mbcs')) elif typ == rwinreg.REG_MULTI_SZ: if not buflen: @@ -460,7 +466,7 @@ return space.newtuple([ convert_from_regdata(space, databuf, length, retType[0]), - space.wrap(retType[0]), + space.wrap(intmask(retType[0])), ]) @unwrap_spec(subkey=str) @@ -612,7 +618,7 @@ space.wrap(rffi.charp2str(valuebuf)), convert_from_regdata(space, databuf, length, retType[0]), - space.wrap(retType[0]), + space.wrap(intmask(retType[0])), ]) @unwrap_spec(index=int) diff --git a/pypy/module/_winreg/test/test_winreg.py b/pypy/module/_winreg/test/test_winreg.py --- a/pypy/module/_winreg/test/test_winreg.py +++ b/pypy/module/_winreg/test/test_winreg.py @@ -151,6 +151,7 @@ def test_readValues(self): from _winreg import OpenKey, EnumValue, QueryValueEx, EnumKey + from _winreg import REG_SZ, REG_EXPAND_SZ key = OpenKey(self.root_key, self.test_key_name) sub_key = OpenKey(key, "sub_key") index = 0 @@ -164,7 +165,10 @@ assert index == len(self.test_data) for name, value, type in self.test_data: - assert QueryValueEx(sub_key, name) == (value, type) + result = QueryValueEx(sub_key, name) + assert result == (value, type) + if type == REG_SZ or type == REG_EXPAND_SZ: + assert isinstance(result[0], unicode) # not string assert EnumKey(key, 0) == "sub_key" raises(EnvironmentError, EnumKey, key, 1) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -120,8 +120,8 @@ constant_names = """ Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE -METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O -Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS +METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HAVE_INPLACEOPS +Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_TPFLAGS_HAVE_NEWBUFFER Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES """.split() for name in constant_names: @@ -649,6 +649,7 @@ #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), ('internal', rffi.VOIDP) )) +Py_bufferP = lltype.Ptr(Py_buffer) @specialize.memo() def is_PyObject(TYPE): @@ -976,8 +977,10 @@ py_type_ready(space, get_capsule_type()) INIT_FUNCTIONS.append(init_types) from pypy.module.posix.interp_posix import add_fork_hook - reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, - compilation_info=eci) + _reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], + lltype.Void, compilation_info=eci) + def reinit_tls(space): + _reinit_tls() add_fork_hook('child', reinit_tls) def init_function(func): diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,13 +1,17 @@ from pypy.interpreter.error import oefmt from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, CANNOT_FAIL, Py_buffer) + cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER) from pypy.module.cpyext.pyobject import PyObject @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyObject_CheckBuffer(space, w_obj): +def PyObject_CheckBuffer(space, pyobj): """Return 1 if obj supports the buffer interface otherwise 0.""" - return 0 # the bf_getbuffer field is never filled by cpyext + as_buffer = pyobj.c_ob_type.c_tp_as_buffer + flags = pyobj.c_ob_type.c_tp_flags + if (flags & Py_TPFLAGS_HAVE_NEWBUFFER and as_buffer.c_bf_getbuffer): + return 1 + return 0 @cpython_api([PyObject, lltype.Ptr(Py_buffer), rffi.INT_real], rffi.INT_real, error=-1) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -123,5 +123,4 @@ pathname = code.co_filename w_mod = importing.add_module(space, w_name) space.setattr(w_mod, space.wrap('__file__'), space.wrap(pathname)) - importing.exec_code_module(space, w_mod, code) - return w_mod + return importing.exec_code_module(space, w_mod, code, w_name) diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "5.3.2-alpha0" -#define PYPY_VERSION_NUM 0x05030200 +#define PYPY_VERSION "5.4.1-alpha0" +#define PYPY_VERSION_NUM 0x05040100 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -12,7 +12,7 @@ @cpython_api([PyObject], PyObject) def PyMemoryView_GET_BASE(space, w_obj): # return the obj field of the Py_buffer created by PyMemoryView_GET_BUFFER - raise NotImplementedError + raise NotImplementedError('PyMemoryView_GET_BUFFER') @cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL) def PyMemoryView_GET_BUFFER(space, w_obj): diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -3,15 +3,16 @@ import re from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import widen from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, - mangle_name, pypy_decl) + mangle_name, pypy_decl, Py_buffer, Py_bufferP) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, ternaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, - readbufferproc, ssizessizeobjargproc) + readbufferproc, getbufferproc, ssizessizeobjargproc) from pypy.module.cpyext.pyobject import from_ref, make_ref, Py_DecRef from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State @@ -22,6 +23,9 @@ from rpython.rlib.objectmodel import specialize from rpython.tool.sourcetools import func_renamer from rpython.rtyper.annlowlevel import llhelper +from pypy.module.sys.version import CPYTHON_VERSION + +PY3 = CPYTHON_VERSION[0] == 3 # XXX: Also defined in object.h Py_LT = 0 @@ -298,11 +302,23 @@ # Similar to Py_buffer _immutable_ = True - def __init__(self, ptr, size, w_obj): + def __init__(self, ptr, size, w_obj, format='B', shape=None, + strides=None, ndim=1, itemsize=1, readonly=True): self.ptr = ptr self.size = size self.w_obj = w_obj # kept alive - self.readonly = True + self.format = format + if not shape: + self.shape = [size] + else: + self.shape = shape + if not strides: + self.strides = [1] + else: + self.strides = strides + self.ndim = ndim + self.itemsize = itemsize + self.readonly = readonly def getlength(self): return self.size @@ -313,6 +329,15 @@ def get_raw_address(self): return rffi.cast(rffi.CCHARP, self.ptr) + def getformat(self): + return self.format + + def getshape(self): + return self.shape + + def getitemsize(self): + return self.itemsize + def wrap_getreadbuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: @@ -322,6 +347,30 @@ space.fromcache(State).check_and_raise_exception(always=True) return space.newbuffer(CPyBuffer(ptr[0], size, w_self)) +def wrap_getbuffer(space, w_self, w_args, func): + func_target = rffi.cast(getbufferproc, func) + with lltype.scoped_alloc(Py_buffer) as pybuf: + _flags = 0 + if space.len_w(w_args) > 0: + _flags = space.int_w(space.listview(w_args)[0]) + flags = rffi.cast(rffi.INT_real,_flags) + size = generic_cpy_call(space, func_target, w_self, pybuf, flags) + if widen(size) < 0: + space.fromcache(State).check_and_raise_exception(always=True) + ptr = pybuf.c_buf + size = pybuf.c_len + ndim = widen(pybuf.c_ndim) + shape = [pybuf.c_shape[i] for i in range(ndim)] + strides = [pybuf.c_strides[i] for i in range(ndim)] + if pybuf.c_format: + format = rffi.charp2str(pybuf.c_format) + else: + format = 'B' + return space.newbuffer(CPyBuffer(ptr, size, w_self, format=format, + ndim=ndim, shape=shape, strides=strides, + itemsize=pybuf.c_itemsize, + readonly=widen(pybuf.c_readonly))) + def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): func_target = rffi.cast(richcmpfunc, func) @@ -486,7 +535,6 @@ def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) api_func = slot_tp_getattro.api_func - elif name == 'tp_call': call_fn = w_type.getdictvalue(space, '__call__') if call_fn is None: @@ -542,6 +590,21 @@ w_stararg=w_args, w_starstararg=w_kwds) return space.call_args(space.get(new_fn, w_self), args) api_func = slot_tp_new.api_func + elif name == 'tp_as_buffer.c_bf_getbuffer': + buff_fn = w_type.getdictvalue(space, '__buffer__') + if buff_fn is None: + return + @cpython_api([PyObject, Py_bufferP, rffi.INT_real], + rffi.INT_real, header=None, error=-1) + @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) + def buff_w(space, w_self, pybuf, flags): + # XXX this is wrong, needs a test + raise oefmt(space.w_NotImplemented, + "calling bf_getbuffer on a builtin type not supported yet") + #args = Arguments(space, [w_self], + # w_stararg=w_args, w_starstararg=w_kwds) + #return space.call_args(space.get(buff_fn, w_self), args) + api_func = buff_w.api_func else: # missing: tp_as_number.nb_nonzero, tp_as_number.nb_coerce # tp_as_sequence.c_sq_contains, tp_as_sequence.c_sq_length @@ -850,11 +913,19 @@ slotdefs = eval(slotdefs_str) # PyPy addition slotdefs += ( - TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), + # XXX that might not be what we want! + TPSLOT("__buffer__", "tp_as_buffer.c_bf_getbuffer", None, "wrap_getbuffer", ""), ) +if not PY3: + slotdefs += ( + TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), + ) + + # partial sort to solve some slot conflicts: # Number slots before Mapping slots before Sequence slots. +# also prefer the new buffer interface # These are the only conflicts between __name__ methods def slotdef_sort_key(slotdef): if slotdef.slot_name.startswith('tp_as_number'): @@ -863,6 +934,10 @@ return 2 if slotdef.slot_name.startswith('tp_as_sequence'): return 3 + if slotdef.slot_name == 'tp_as_buffer.c_bf_getbuffer': + return 100 + if slotdef.slot_name == 'tp_as_buffer.c_bf_getreadbuffer': + return 101 return 0 slotdefs = sorted(slotdefs, key=slotdef_sort_key) diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/buffer_test.c @@ -0,0 +1,248 @@ +#ifdef _MSC_VER +#define _CRT_SECURE_NO_WARNINGS 1 +#endif +#include +#include +#include + +/* + * Adapted from https://jakevdp.github.io/blog/2014/05/05/introduction-to-the-python-buffer-protocol, + * which is copyright Jake Vanderplas and released under the BSD license + */ + +/* Structure defines a 1-dimensional strided array */ +typedef struct{ + int* arr; + Py_ssize_t length; +} MyArray; + +/* initialize the array with integers 0...length */ +void initialize_MyArray(MyArray* a, long length){ + int i; + a->length = length; + a->arr = (int*)malloc(length * sizeof(int)); + for(i=0; iarr[i] = i; + } +} + +/* free the memory when finished */ +void deallocate_MyArray(MyArray* a){ + free(a->arr); + a->arr = NULL; +} + +/* tools to print the array */ +char* stringify(MyArray* a, int nmax){ + char* output = (char*) malloc(nmax * 20); + int k, pos = sprintf(&output[0], "["); + + for (k=0; k < a->length && k < nmax; k++){ + pos += sprintf(&output[pos], " %d", a->arr[k]); + } + if(a->length > nmax) + pos += sprintf(&output[pos], "..."); + sprintf(&output[pos], " ]"); + return output; +} + +void print_MyArray(MyArray* a, int nmax){ + char* s = stringify(a, nmax); + printf("%s", s); + free(s); +} + +/* This is where we define the PyMyArray object structure */ +typedef struct { + PyObject_HEAD + /* Type-specific fields go below. */ + MyArray arr; +} PyMyArray; + + +/* This is the __init__ function, implemented in C */ +static int +PyMyArray_init(PyMyArray *self, PyObject *args, PyObject *kwds) +{ + int length = 0; + static char *kwlist[] = {"length", NULL}; + // init may have already been called + if (self->arr.arr != NULL) { + deallocate_MyArray(&self->arr); + } + + if (! PyArg_ParseTupleAndKeywords(args, kwds, "|i", kwlist, &length)) + return -1; + + if (length < 0) + length = 0; + + initialize_MyArray(&self->arr, length); + + return 0; +} + + +/* this function is called when the object is deallocated */ +static void +PyMyArray_dealloc(PyMyArray* self) +{ + deallocate_MyArray(&self->arr); + Py_TYPE(self)->tp_free((PyObject*)self); +} + + +/* This function returns the string representation of our object */ +static PyObject * +PyMyArray_str(PyMyArray * self) +{ + char* s = stringify(&self->arr, 10); + PyObject* ret = PyUnicode_FromString(s); + free(s); + return ret; +} + +/* Here is the buffer interface function */ +static int +PyMyArray_getbuffer(PyObject *obj, Py_buffer *view, int flags) +{ + PyMyArray* self = (PyMyArray*)obj; + fprintf(stdout, "in PyMyArray_getbuffer\n"); + if (view == NULL) { + fprintf(stdout, "view is NULL\n"); + PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer"); + return -1; + } + if (flags == 0) { + fprintf(stdout, "flags is 0\n"); + PyErr_SetString(PyExc_ValueError, "flags == 0 in getbuffer"); + return -1; + } + + view->obj = (PyObject*)self; + view->buf = (void*)self->arr.arr; + view->len = self->arr.length * sizeof(int); + view->readonly = 0; + view->itemsize = sizeof(int); + view->format = "i"; // integer + view->ndim = 1; + view->shape = &self->arr.length; // length-1 sequence of dimensions + view->strides = &view->itemsize; // for the simple case we can do this + view->suboffsets = NULL; + view->internal = NULL; + + Py_INCREF(self); // need to increase the reference count + return 0; +} + +static PyBufferProcs PyMyArray_as_buffer = { +#if PY_MAJOR_VERSION < 3 + (readbufferproc)0, + (writebufferproc)0, + (segcountproc)0, + (charbufferproc)0, +#endif + (getbufferproc)PyMyArray_getbuffer, + (releasebufferproc)0, // we do not require any special release function +}; + + +/* Here is the type structure: we put the above functions in the appropriate place + in order to actually define the Python object type */ +static PyTypeObject PyMyArrayType = { + PyVarObject_HEAD_INIT(NULL, 0) + "pymyarray.PyMyArray", /* tp_name */ + sizeof(PyMyArray), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor)PyMyArray_dealloc,/* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_reserved */ + (reprfunc)PyMyArray_str, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + (reprfunc)PyMyArray_str, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + &PyMyArray_as_buffer, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_NEWBUFFER, /* tp_flags */ + "PyMyArray object", /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)PyMyArray_init, /* tp_init */ +}; + +static PyMethodDef buffer_functions[] = { + {NULL, NULL} /* Sentinel */ +}; + +#if PY_MAJOR_VERSION >= 3 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "buffer_test", + "Module Doc", + -1, + buffer_functions, + NULL, + NULL, + NULL, + NULL, +}; +#define INITERROR return NULL + +/* Initialize this module. */ +#ifdef __GNUC__ +extern __attribute__((visibility("default"))) +#else +extern __declspec(dllexport) +#endif + +PyMODINIT_FUNC +PyInit_buffer_test(void) + +#else + +#define INITERROR return + +/* Initialize this module. */ +#ifdef __GNUC__ +extern __attribute__((visibility("default"))) +#else +#endif + +PyMODINIT_FUNC +initbuffer_test(void) +#endif +{ +#if PY_MAJOR_VERSION >= 3 + PyObject *m= PyModule_Create(&moduledef); +#else + PyObject *m= Py_InitModule("buffer_test", buffer_functions); +#endif + if (m == NULL) + INITERROR; + PyMyArrayType.tp_new = PyType_GenericNew; + if (PyType_Ready(&PyMyArrayType) < 0) + INITERROR; + Py_INCREF(&PyMyArrayType); + PyModule_AddObject(m, "PyMyArray", (PyObject *)&PyMyArrayType); +#if PY_MAJOR_VERSION >=3 + return m; +#endif +} diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -87,4 +87,13 @@ module.switch_multiply() res = [1, 2, 3] * arr assert res == [2, 4, 6] + + def test_subclass(self): + module = self.import_module(name='array') + class Sub(module.array): + pass + + arr = Sub('i', [2]) + res = [1, 2, 3] * arr + assert res == [1, 2, 3, 1, 2, 3] diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -92,10 +92,20 @@ link_extra=link_extra, libraries=libraries) from pypy.module.imp.importing import get_so_extension - pydname = soname.new(purebasename=modname, ext=get_so_extension(space)) + ext = get_so_extension(space) + pydname = soname.new(purebasename=modname, ext=ext) soname.rename(pydname) return str(pydname) +def get_so_suffix(): + from imp import get_suffixes, C_EXTENSION + for suffix, mode, typ in get_suffixes(): + if typ == C_EXTENSION: + return suffix + else: + raise RuntimeError("This interpreter does not define a filename " + "suffix for C extensions!") + def compile_extension_module_applevel(space, modname, include_dirs=[], source_files=None, source_strings=None): """ @@ -126,13 +136,9 @@ source_strings=source_strings, compile_extra=compile_extra, link_extra=link_extra) - from imp import get_suffixes, C_EXTENSION - pydname = soname - for suffix, mode, typ in get_suffixes(): - if typ == C_EXTENSION: - pydname = soname.new(purebasename=modname, ext=suffix) - soname.rename(pydname) - break + ext = get_so_suffix() + pydname = soname.new(purebasename=modname, ext=ext) + soname.rename(pydname) return str(pydname) def freeze_refcnts(self): @@ -145,6 +151,24 @@ #state.print_refcounts() self.frozen_ll2callocations = set(ll2ctypes.ALLOCATED.values()) +class FakeSpace(object): + """Like TinyObjSpace, but different""" + def __init__(self, config): + from distutils.sysconfig import get_python_inc + self.config = config + self.include_dir = get_python_inc() + + def passthrough(self, arg): + return arg + listview = passthrough + str_w = passthrough + + def unwrap(self, args): + try: + return args.str_w(None) + except: + return args + class LeakCheckingTest(object): """Base class for all cpyext tests.""" spaceconfig = dict(usemodules=['cpyext', 'thread', '_rawffi', 'array', @@ -433,21 +457,8 @@ self.imported_module_names = [] if self.runappdirect: + fake = FakeSpace(self.space.config) def interp2app(func): - from distutils.sysconfig import get_python_inc - class FakeSpace(object): - def passthrough(self, arg): - return arg - listview = passthrough - str_w = passthrough - def unwrap(self, args): - try: - return args.str_w(None) - except: - return args - fake = FakeSpace() - fake.include_dir = get_python_inc() - fake.config = self.space.config def run(*args, **kwargs): for k in kwargs.keys(): if k not in func.unwrap_spec and not k.startswith('w_'): diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -1,17 +1,26 @@ -import pytest from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase + class TestMemoryViewObject(BaseApiTest): def test_fromobject(self, space, api): - if space.is_true(space.lt(space.sys.get('version_info'), - space.wrap((2, 7)))): - py.test.skip("unsupported before Python 2.7") - w_hello = space.newbytes("hello") + assert api.PyObject_CheckBuffer(w_hello) w_view = api.PyMemoryView_FromObject(w_hello) + w_char = space.call_method(w_view, '__getitem__', space.wrap(0)) + assert space.eq_w(w_char, space.wrap('h')) w_bytes = space.call_method(w_view, "tobytes") assert space.unwrap(w_bytes) == "hello" - @pytest.mark.skipif(True, reason='write a test for this') - def test_get_base_and_get_buffer(self, space, api): - assert False # XXX test PyMemoryView_GET_BASE, PyMemoryView_GET_BUFFER + +class AppTestBufferProtocol(AppTestCpythonExtensionBase): + def test_buffer_protocol(self): + import struct + module = self.import_module(name='buffer_test') + arr = module.PyMyArray(10) + y = memoryview(arr) + assert y.format == 'i' + assert y.shape == (10,) + s = y[3] + assert len(s) == struct.calcsize('i') + assert s == struct.pack('i', 3) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -17,7 +17,9 @@ generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, Py_TPFLAGS_HAVE_GETCHARBUFFER, build_type_checkers, StaticObjectBuilder, - PyObjectFields, Py_TPFLAGS_BASETYPE, PyTypeObject, PyTypeObjectPtr) + PyObjectFields, Py_TPFLAGS_BASETYPE, PyTypeObject, PyTypeObjectPtr, + Py_TPFLAGS_HAVE_NEWBUFFER, Py_TPFLAGS_CHECKTYPES, + Py_TPFLAGS_HAVE_INPLACEOPS) from pypy.module.cpyext.methodobject import (W_PyCClassMethodObject, W_PyCWrapperObject, PyCFunction_NewEx, PyCFunction_typedef, PyMethodDef, W_PyCMethodObject, W_PyCFunctionObject) @@ -385,6 +387,8 @@ pto.c_tp_basicsize = base_pto.c_tp_basicsize if pto.c_tp_itemsize < base_pto.c_tp_itemsize: pto.c_tp_itemsize = base_pto.c_tp_itemsize + pto.c_tp_flags |= base_pto.c_tp_flags & Py_TPFLAGS_CHECKTYPES + pto.c_tp_flags |= base_pto.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS flags = rffi.cast(lltype.Signed, pto.c_tp_flags) base_object_pyo = make_ref(space, space.w_object) base_object_pto = rffi.cast(PyTypeObjectPtr, base_object_pyo) @@ -608,6 +612,7 @@ bf_getwritebuffer.api_func.get_wrapper(space)) pto.c_tp_as_buffer = c_buf pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER + pto.c_tp_flags |= Py_TPFLAGS_HAVE_NEWBUFFER @cpython_api([PyObject], lltype.Void, header=None) def type_dealloc(space, obj): @@ -774,6 +779,8 @@ pto.c_tp_setattro = base.c_tp_setattro if not pto.c_tp_getattro: pto.c_tp_getattro = base.c_tp_getattro + if not pto.c_tp_as_buffer: + pto.c_tp_as_buffer = base.c_tp_as_buffer finally: Py_DecRef(space, base_pyo) @@ -810,8 +817,13 @@ # inheriting tp_as_* slots base = py_type.c_tp_base if base: - if not py_type.c_tp_as_number: py_type.c_tp_as_number = base.c_tp_as_number - if not py_type.c_tp_as_sequence: py_type.c_tp_as_sequence = base.c_tp_as_sequence + if not py_type.c_tp_as_number: + py_type.c_tp_as_number = base.c_tp_as_number + py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_CHECKTYPES + py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS + if not py_type.c_tp_as_sequence: + py_type.c_tp_as_sequence = base.c_tp_as_sequence + py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS if not py_type.c_tp_as_mapping: py_type.c_tp_as_mapping = base.c_tp_as_mapping if not py_type.c_tp_as_buffer: py_type.c_tp_as_buffer = base.c_tp_as_buffer diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -5,6 +5,7 @@ Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.modsupport import PyMethodDef +from pypy.module.cpyext.api import Py_bufferP P, FT, PyO = Ptr, FuncType, PyObject @@ -58,8 +59,7 @@ writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) -## We don't support new buffer interface for now -getbufferproc = rffi.VOIDP +getbufferproc = P(FT([PyO, Py_bufferP, rffi.INT_real], rffi.INT_real)) releasebufferproc = rffi.VOIDP diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -597,6 +597,11 @@ @jit.dont_look_inside def load_module(space, w_modulename, find_info, reuse=False): + """Like load_module() in CPython's import.c, this will normally + make a module object, store it in sys.modules, execute code in it, + and then fetch it again from sys.modules. But this logic is not + used if we're calling a PEP302 loader. + """ if find_info is None: return @@ -625,17 +630,15 @@ try: if find_info.modtype == PY_SOURCE: - load_source_module( + return load_source_module( space, w_modulename, w_mod, find_info.filename, find_info.stream.readall(), find_info.stream.try_to_find_file_descriptor()) - return w_mod elif find_info.modtype == PY_COMPILED: magic = _r_long(find_info.stream) timestamp = _r_long(find_info.stream) - load_compiled_module(space, w_modulename, w_mod, find_info.filename, + return load_compiled_module(space, w_modulename, w_mod, find_info.filename, magic, timestamp, find_info.stream.readall()) - return w_mod elif find_info.modtype == PKG_DIRECTORY: w_path = space.newlist([space.wrap(find_info.filename)]) space.setattr(w_mod, space.wrap('__path__'), w_path) @@ -644,14 +647,13 @@ if find_info is None: return w_mod try: - load_module(space, w_modulename, find_info, reuse=True) + w_mod = load_module(space, w_modulename, find_info, + reuse=True) finally: try: find_info.stream.close() except StreamErrors: pass - # fetch the module again, in case of "substitution" - w_mod = check_sys_modules(space, w_modulename) return w_mod elif find_info.modtype == C_EXTENSION and has_so_extension(space): load_c_extension(space, find_info.filename, space.str_w(w_modulename)) @@ -677,13 +679,6 @@ try: if find_info: w_mod = load_module(space, w_modulename, find_info) - try: - w_mod = space.getitem(space.sys.get("modules"), - w_modulename) - except OperationError as oe: - if not oe.match(space, space.w_KeyError): - raise - raise OperationError(space.w_ImportError, w_modulename) if w_parent is not None: space.setattr(w_parent, space.wrap(partname), w_mod) return w_mod @@ -875,20 +870,32 @@ pycode = ec.compiler.compile(source, pathname, 'exec', 0) return pycode -def exec_code_module(space, w_mod, code_w): +def exec_code_module(space, w_mod, code_w, w_modulename, check_afterwards=True): + """ + Execute a code object in the module's dict. Returns + 'sys.modules[modulename]', which must exist. + """ w_dict = space.getattr(w_mod, space.wrap('__dict__')) space.call_method(w_dict, 'setdefault', space.wrap('__builtins__'), space.wrap(space.builtin)) code_w.exec_code(space, w_dict, w_dict) + if check_afterwards: + w_mod = check_sys_modules(space, w_modulename) + if w_mod is None: + raise oefmt(space.w_ImportError, From pypy.commits at gmail.com Thu Sep 1 12:00:54 2016 From: pypy.commits at gmail.com (vext01) Date: Thu, 01 Sep 2016 09:00:54 -0700 (PDT) Subject: [pypy-commit] pypy asmmemmgr-for-code-only: Fix translation. Message-ID: <57c850b6.c41f1c0a.c732d.4489@mx.google.com> Author: Edd Barrett Branch: asmmemmgr-for-code-only Changeset: r86822:f777170ec79b Date: 2016-09-01 17:00 +0100 http://bitbucket.org/pypy/pypy/changeset/f777170ec79b/ Log: Fix translation. diff --git a/rpython/rlib/rmmap.py b/rpython/rlib/rmmap.py --- a/rpython/rlib/rmmap.py +++ b/rpython/rlib/rmmap.py @@ -746,20 +746,20 @@ def set_pages_executable(addr, size): assert lltype.typeOf(addr) == rffi.CCHARP assert isinstance(size, int) - #assert size >= 0 rv = mprotect(addr, size, PROT_EXEC | PROT_READ) if int(rv) < 0: from rpython.rlib import debug debug.fatalerror_notb("set_pages_executable failed") + set_pages_executable._annenforceargs_ = (None, int) def set_pages_writable(addr, size): assert lltype.typeOf(addr) == rffi.CCHARP assert isinstance(size, int) - #assert size >= 0 rv = mprotect(addr, size, PROT_WRITE | PROT_READ) if int(rv) < 0: from rpython.rlib import debug debug.fatalerror_notb("set_pages_writable failed") + set_pages_writable._annenforceargs_ = (None, int) def clear_large_memory_chunk_aligned(addr, map_size): addr = rffi.cast(PTR, addr) From pypy.commits at gmail.com Thu Sep 1 13:40:26 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 01 Sep 2016 10:40:26 -0700 (PDT) Subject: [pypy-commit] pypy default: Improve error message when attempting to modify annotations in a fixed graph Message-ID: <57c8680a.262ec20a.f163a.308c@mx.google.com> Author: Ronan Lamy Branch: Changeset: r86823:30767c452330 Date: 2016-09-01 18:39 +0100 http://bitbucket.org/pypy/pypy/changeset/30767c452330/ Log: Improve error message when attempting to modify annotations in a fixed graph diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -164,8 +164,13 @@ # annotations that are passed in, and don't annotate the old # graph -- it's already low-level operations! for a, s_newarg in zip(block.inputargs, cells): - s_oldarg = self.binding(a) - assert annmodel.unionof(s_oldarg, s_newarg) == s_oldarg + s_oldarg = a.annotation + if not s_oldarg.contains(s_newarg): + raise annmodel.AnnotatorError( + "Late-stage annotation is not allowed to modify the " + "existing annotation for variable %s: %s" % + (a, s_oldarg)) + else: assert not self.frozen if block not in self.annotated: From pypy.commits at gmail.com Thu Sep 1 15:19:37 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 01 Sep 2016 12:19:37 -0700 (PDT) Subject: [pypy-commit] pypy redirect-assembler-jitlog: translation issue, assigning unique number to tmp_callback jit cell token Message-ID: <57c87f49.cb7f1c0a.fa27d.922f@mx.google.com> Author: Richard Plangger Branch: redirect-assembler-jitlog Changeset: r86824:cd97133d768d Date: 2016-09-01 21:18 +0200 http://bitbucket.org/pypy/pypy/changeset/cd97133d768d/ Log: translation issue, assigning unique number to tmp_callback jit cell token diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -540,9 +540,9 @@ looptoken._x86_ops_offset = ops_offset looptoken._ll_function_addr = rawstart + functionpos - if logger: - log = logger.log_trace(jl.MARK_TRACE_ASM, None, self.mc) - log.write(inputargs, operations, ops_offset=ops_offset) + if log and logger: + l = logger.log_trace(jl.MARK_TRACE_ASM, None, self.mc) + l.write(inputargs, operations, ops_offset=ops_offset) # legacy if logger.logger_ops: diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1122,6 +1122,11 @@ version of the code may end up replacing it. """ jitcell_token = make_jitcell_token(jitdriver_sd) + # + logger = jitdriver_sd.metainterp_sd.jitlog + jitcell_token.number = logger.next_id() + jl.tmp_callback(jitcell_token) + # nb_red_args = jitdriver_sd.num_red_args assert len(redargtypes) == nb_red_args inputargs = [] @@ -1158,8 +1163,6 @@ operations = get_deep_immutable_oplist(operations) cpu.compile_loop(inputargs, operations, jitcell_token, log=False) - jl.tmp_callback(looptoken) - if memory_manager is not None: # for tests memory_manager.keep_loop_alive(jitcell_token) return jitcell_token diff --git a/rpython/rlib/rjitlog/rjitlog.py b/rpython/rlib/rjitlog/rjitlog.py --- a/rpython/rlib/rjitlog/rjitlog.py +++ b/rpython/rlib/rjitlog/rjitlog.py @@ -350,6 +350,10 @@ def finish(self): jitlog_teardown() + def next_id(self): + self.trace_id += 1 + return self.trace_id + def start_new_trace(self, metainterp_sd, faildescr=None, entry_bridge=False, jd_name=""): # even if the logger is not enabled, increment the trace id self.trace_id += 1 From pypy.commits at gmail.com Thu Sep 1 17:45:19 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Sep 2016 14:45:19 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <57c8a16f.8f8e1c0a.41a0.1e2d@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r784:8cea640f2073 Date: 2016-09-01 23:45 +0200 http://bitbucket.org/pypy/pypy.org/changeset/8cea640f2073/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $64931 of $105000 (61.8%) + $64959 of $105000 (61.9%)
@@ -23,7 +23,7 @@
  • Read proposal
  • From pypy.commits at gmail.com Thu Sep 1 18:03:29 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 01 Sep 2016 15:03:29 -0700 (PDT) Subject: [pypy-commit] pypy default: Fix translation and add a warning in annmodel.unionof Message-ID: <57c8a5b1.87d11c0a.2592e.27bb@mx.google.com> Author: Ronan Lamy Branch: Changeset: r86825:82980a978280 Date: 2016-09-01 23:02 +0100 http://bitbucket.org/pypy/pypy/changeset/82980a978280/ Log: Fix translation and add a warning in annmodel.unionof diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -165,7 +165,9 @@ # graph -- it's already low-level operations! for a, s_newarg in zip(block.inputargs, cells): s_oldarg = a.annotation - if not s_oldarg.contains(s_newarg): + # XXX: Should use s_oldarg.contains(s_newarg) but that breaks + # PyPy translation + if annmodel.unionof(s_oldarg, s_newarg) != s_oldarg: raise annmodel.AnnotatorError( "Late-stage annotation is not allowed to modify the " "existing annotation for variable %s: %s" % diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -750,6 +750,7 @@ s1 = pair(s1, s2).union() else: # this is just a performance shortcut + # XXX: This is a lie! Grep for no_side_effects_in_union and weep. if s1 != s2: s1 = pair(s1, s2).union() return s1 From pypy.commits at gmail.com Thu Sep 1 19:03:28 2016 From: pypy.commits at gmail.com (sbauman) Date: Thu, 01 Sep 2016 16:03:28 -0700 (PDT) Subject: [pypy-commit] pypy force-virtual-state: Merge Message-ID: <57c8b3c0.c3f0c20a.54624.9728@mx.google.com> Author: Spenser Andrew Bauman Branch: force-virtual-state Changeset: r86826:5649730037a2 Date: 2016-09-01 15:17 -0400 http://bitbucket.org/pypy/pypy/changeset/5649730037a2/ Log: Merge diff too long, truncating to 2000 out of 213792 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -27,3 +27,6 @@ 40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2 c09c19272c990a0611b17569a0085ad1ab00c8ff release-pypy2.7-v5.3 7e8df3df96417c16c2d55b41352ec82c9c69c978 release-pypy2.7-v5.3.1 +68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 +68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 +77392ad263504df011ccfcabf6a62e21d04086d0 release-pypy2.7-v5.4.0 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -74,6 +74,7 @@ Seo Sanghyeon Ronny Pfannschmidt Justin Peel + Raffael Tfirst David Edelsohn Anders Hammarquist Jakub Gustak @@ -117,7 +118,6 @@ Wenzhu Man John Witulski Laurence Tratt - Raffael Tfirst Ivan Sichmann Freitas Greg Price Dario Bertini @@ -141,6 +141,7 @@ tav Taavi Burns Georg Brandl + Nicolas Truessel Bert Freudenberg Stian Andreassen Wanja Saatkamp @@ -211,6 +212,7 @@ Vaibhav Sood Alan McIntyre Alexander Sedov + p_zieschang at yahoo.de Attila Gobi Jasper.Schulz Christopher Pope @@ -221,6 +223,7 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + touilleMan Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -229,12 +232,14 @@ Gabriel Lukas Vacek Kunal Grover + Aaron Gallagher Andrew Dalke Sylvain Thenault Jakub Stasiak Nathan Taylor Vladimir Kryachko Omer Katz + Mark Williams Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -355,115 +360,12 @@ yasirs Michael Chermside Anna Ravencroft + pizi Andrey Churin Dan Crosta + Eli Stevens Tobias Diaz Julien Phalip Roman Podoliaka Dan Loewenherz - - Heinrich-Heine University, Germany - Open End AB (formerly AB Strakt), Sweden - merlinux GmbH, Germany - tismerysoft GmbH, Germany - Logilab Paris, France - DFKI GmbH, Germany - Impara, Germany - Change Maker, Sweden - University of California Berkeley, USA - Google Inc. - King's College London - -The PyPy Logo as used by http://speed.pypy.org and others was created -by Samuel Reis and is distributed on terms of Creative Commons Share Alike -License. - -License for 'lib-python/2.7' -============================ - -Except when otherwise stated (look for LICENSE files or copyright/license -information at the beginning of each file) the files in the 'lib-python/2.7' -directory are all copyrighted by the Python Software Foundation and licensed -under the terms that you can find here: https://docs.python.org/2/license.html - -License for 'pypy/module/unicodedata/' -====================================== - -The following files are from the website of The Unicode Consortium -at http://www.unicode.org/. For the terms of use of these files, see -http://www.unicode.org/terms_of_use.html . Or they are derived from -files from the above website, and the same terms of use apply. - - CompositionExclusions-*.txt - EastAsianWidth-*.txt - LineBreak-*.txt - UnicodeData-*.txt - UnihanNumeric-*.txt - -License for 'dotviewer/font/' -============================= - -Copyright (C) 2008 The Android Open Source Project - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -Detailed license information is contained in the NOTICE file in the -directory. - - -Licenses and Acknowledgements for Incorporated Software -======================================================= - -This section is an incomplete, but growing list of licenses and -acknowledgements for third-party software incorporated in the PyPy -distribution. - -License for 'Tcl/Tk' --------------------- - -This copy of PyPy contains library code that may, when used, result in -the Tcl/Tk library to be loaded. PyPy also includes code that may be -regarded as being a copy of some parts of the Tcl/Tk header files. -You may see a copy of the License for Tcl/Tk in the file -`lib_pypy/_tkinter/license.terms` included here. - -License for 'bzip2' -------------------- - -This copy of PyPy may be linked (dynamically or statically) with the -bzip2 library. You may see a copy of the License for bzip2/libbzip2 at - - http://www.bzip.org/1.0.5/bzip2-manual-1.0.5.html - -License for 'openssl' ---------------------- - -This copy of PyPy may be linked (dynamically or statically) with the -openssl library. You may see a copy of the License for OpenSSL at - - https://www.openssl.org/source/license.html - -License for 'gdbm' ------------------- - -The gdbm module includes code from gdbm.h, which is distributed under -the terms of the GPL license version 2 or any later version. Thus the -gdbm module, provided in the file lib_pypy/gdbm.py, is redistributed -under the terms of the GPL license as well. - -License for 'rpython/rlib/rvmprof/src' --------------------------------------- - -The code is based on gperftools. You may see a copy of the License for it at - - https://github.com/gperftools/gperftools/blob/master/COPYING + werat diff --git a/_pytest/python.py b/_pytest/python.py --- a/_pytest/python.py +++ b/_pytest/python.py @@ -498,7 +498,10 @@ """ Collector for test methods. """ def collect(self): if hasinit(self.obj): - pytest.skip("class %s.%s with __init__ won't get collected" % ( + # XXX used to be skip(), but silently skipping classes + # XXX just because they have been written long ago is + # XXX imho a very, very, very bad idea + pytest.fail("class %s.%s with __init__ won't get collected" % ( self.obj.__module__, self.obj.__name__, )) diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -122,22 +122,24 @@ """Dummy method to let some easy_install packages that have optional C speedup components. """ + def customize(executable, flags): + command = compiler.executables[executable] + flags + setattr(compiler, executable, command) + if compiler.compiler_type == "unix": compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') if "CPPFLAGS" in os.environ: cppflags = shlex.split(os.environ["CPPFLAGS"]) - compiler.compiler.extend(cppflags) - compiler.compiler_so.extend(cppflags) - compiler.linker_so.extend(cppflags) + for executable in ('compiler', 'compiler_so', 'linker_so'): + customize(executable, cppflags) if "CFLAGS" in os.environ: cflags = shlex.split(os.environ["CFLAGS"]) - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) + for executable in ('compiler', 'compiler_so', 'linker_so'): + customize(executable, cflags) if "LDFLAGS" in os.environ: ldflags = shlex.split(os.environ["LDFLAGS"]) - compiler.linker_so.extend(ldflags) + customize('linker_so', ldflags) from sysconfig_cpython import ( diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -167,7 +167,7 @@ else: return self.value - def __buffer__(self): + def __buffer__(self, flags): return buffer(self._buffer) def _get_b_base(self): diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -342,7 +342,7 @@ thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) keepalives, newargs, argtypes, outargs, errcheckargs = ( self._convert_args(argtypes, args[1:], kwargs)) - newargs.insert(0, thisvalue.value) + newargs.insert(0, thisarg) argtypes.insert(0, c_void_p) else: thisarg = None diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -515,7 +515,7 @@ tovar, errcode) return # - elif isinstance(tp, (model.StructOrUnion, model.EnumType)): + elif isinstance(tp, model.StructOrUnionOrEnum): # a struct (not a struct pointer) as a function argument self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)' % (tovar, self._gettypenum(tp), fromvar)) @@ -572,7 +572,7 @@ elif isinstance(tp, model.ArrayType): return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(model.PointerType(tp.item))) - elif isinstance(tp, model.StructType): + elif isinstance(tp, model.StructOrUnion): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( tp._get_c_name(), context)) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -308,7 +308,7 @@ elif isinstance(tp, model.ArrayType): return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(model.PointerType(tp.item))) - elif isinstance(tp, model.StructType): + elif isinstance(tp, model.StructOrUnion): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( tp._get_c_name(), context)) diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -137,6 +137,8 @@ lib.gdbm_sync(self.__ll_dbm) def open(filename, flags='r', mode=0666): + if isinstance(filename, unicode): + filename = filename.encode() if flags[0] == 'r': iflags = lib.GDBM_READER elif flags[0] == 'w': diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -58,16 +58,16 @@ # General information about the project. project = u'PyPy' -copyright = u'2015, The PyPy Project' +copyright = u'2016, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '4.0' +version = '5.4' # The full version, including alpha/beta/rc tags. -release = '4.0.0' +release = '5.4.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -44,6 +44,7 @@ Seo Sanghyeon Ronny Pfannschmidt Justin Peel + Raffael Tfirst David Edelsohn Anders Hammarquist Jakub Gustak @@ -87,7 +88,6 @@ Wenzhu Man John Witulski Laurence Tratt - Raffael Tfirst Ivan Sichmann Freitas Greg Price Dario Bertini @@ -111,6 +111,7 @@ tav Taavi Burns Georg Brandl + Nicolas Truessel Bert Freudenberg Stian Andreassen Wanja Saatkamp @@ -181,6 +182,7 @@ Vaibhav Sood Alan McIntyre Alexander Sedov + p_zieschang at yahoo.de Attila Gobi Jasper.Schulz Christopher Pope @@ -191,6 +193,7 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + touilleMan Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -199,12 +202,14 @@ Gabriel Lukas Vacek Kunal Grover + Aaron Gallagher Andrew Dalke Sylvain Thenault Jakub Stasiak Nathan Taylor Vladimir Kryachko Omer Katz + Mark Williams Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -325,9 +330,12 @@ yasirs Michael Chermside Anna Ravencroft + pizi Andrey Churin Dan Crosta + Eli Stevens Tobias Diaz Julien Phalip Roman Podoliaka Dan Loewenherz + werat diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-pypy2.7-v5.4.0.rst release-pypy2.7-v5.3.1.rst release-pypy2.7-v5.3.0.rst release-5.1.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-pypy2-5.4.0.rst whatsnew-pypy2-5.3.1.rst whatsnew-pypy2-5.3.0.rst whatsnew-5.1.0.rst diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -57,7 +57,7 @@ -------------- Our cpyext C-API compatiblity layer can now run upstream NumPy unmodified. -Release PyPy2.7-v5.3 still fails about 200 of the ~6000 test in the NumPy +Release PyPy2.7-v5.4 still fails about 60 of the ~6000 test in the NumPy test suite. We could use help analyzing the failures and fixing them either as patches to upstream NumPy, or as fixes to PyPy. diff --git a/pypy/doc/release-pypy2.7-v5.4.0.rst b/pypy/doc/release-pypy2.7-v5.4.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-pypy2.7-v5.4.0.rst @@ -0,0 +1,218 @@ +============ +PyPy2.7 v5.4 +============ + +We have released PyPy2.7 v5.4, a little under two months after PyPy2.7 v5.3. +This new PyPy2.7 release includes incremental improvements to our C-API +compatability layer (cpyext), enabling us to pass over 99% of the upstream +numpy `test suite`_. We updated built-in cffi_ support to version 1.8, +which now supports the "limited API" mode for c-extensions on +CPython >=3.2. + +We improved tooling for the PyPy JIT_, and expanded VMProf +support to OpenBSD and Dragon Fly BSD + +As always, this release fixed many issues and bugs raised by the +growing community of PyPy users. We strongly recommend updating. + +You can download the PyPy2.7 v5.4 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +.. _`test suite`: https://bitbucket.org/pypy/pypy/wiki/Adventures%20in%20cpyext%20compatibility +.. _cffi: https://cffi.readthedocs.org +.. _JIT: https://morepypy.blogspot.com.au/2016/08/pypy-tooling-upgrade-jitviewer-and.html +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other `dynamic languages`_ to see what RPython +can do for them. + +This release supports: + + * **x86** machines on most common operating systems + (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD) + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s390x** running Linux + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 5.3 released in June 2016) +========================================================= + +* New features: + + * Add `sys.{get,set}dlopenflags` + + * Improve CPython compatibility of 'is' for small and empty strings + + * Support for rgc.FinalizerQueue in the Boehm garbage collector + + * (RPython) support spawnv() if it is called in C `_spawnv` on windows + + * Fill in more slots when creating a PyTypeObject from a W_TypeObject, + like `__hex__`, `__sub__`, `__pow__` + + * Copy CPython's logic more closely for `isinstance()` and + `issubclass()` as well as `type.__instancecheck__()` and + `type.__subclasscheck__()` + + * Expose the name of CDLL objects + + * Rewrite the win32 dependencies of `subprocess` to use cffi + instead of ctypes + + * Improve the `JIT logging`_ facitilities + + * (RPython) make int * string work + + * Allocate all RPython strings with one extra byte, normally + unused. This now allows `ffi.from_buffer(string)` in CFFI with + no copy + + * Adds a new commandline option `-X track-resources` that will + produce a `ResourceWarning` when the GC closes a file or socket. + The traceback for the place where the file or socket was allocated + is given as well, which aids finding places where `close()` is + missing + + * Add missing `PyObject_Realloc`, `PySequence_GetSlice` + + * `type.__dict__` now returns a `dict_proxy` object, like on CPython. + Previously it returned what looked like a regular dict object (but + it was already read-only) + + * (RPython) add `rposix.{get,set}_inheritable()`, needed by Python 3.5 + + * (RPython) add `rposix_scandir` portably, needed for Python 3.5 + + * Increased but incomplete support for memoryview attributes (format, + itemsize, ...) which also adds support for `PyMemoryView_FromObject` + +* Bug Fixes + + * Reject `mkdir()` in read-only sandbox filesystems + + * Add include guards to pymem.h to enable c++ compilation + + * Fix build breakage on OpenBSD and FreeBSD + + * Support OpenBSD, Dragon Fly BSD in VMProf + + * Fix for `bytearray('').replace('a', 'ab')` for empty strings + + * Sync internal state before calling `PyFile_AsFile()` + + * Allow writing to a char* from `PyString_AsString()` until it is + forced, also refactor `PyStringObject` to look like CPython's + and allow subclassing `PyString_Type` and `PyUnicode_Type` + + * Rpython rffi's socket(2) wrapper did not preserve errno + + * Refactor `PyTupleObject` to look like CPython's and allow + subclassing `PyTuple_Type` + + * Allow c-level assignment to a function pointer in a C-API + user-defined type after calling PyTypeReady by retrieving + a pointer to the function via offsets + rather than storing the function pointer itself + + * Use `madvise(MADV_FREE)`, or if that doesn't exist + `MADV_DONTNEED` on freed arenas to release memory back to the + OS for resource monitoring + + * Fix overflow detection in conversion of float to 64-bit integer + in timeout argument to various thread/threading primitives + + * Fix win32 outputting `\r\r\n` in some cases + + * Make `hash(-1)` return -2, as CPython does, and fix all the + ancilary places this matters + + * Fix `PyNumber_Check()` to behave more like CPython + + * (VMProf) Try hard to not miss any Python-level frame in the + captured stacks, even if there is metainterp or blackhole interp + involved. Also fix the stacklet (greenlet) support + + * Fix a critical JIT bug where `raw_malloc` -equivalent functions + lost the additional flags + + * Fix the mapdict cache for subclasses of builtin types that + provide a dict + + * Issues reported with our previous release were resolved_ after + reports from users on our issue tracker at + https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy + +* Performance improvements: + + * Add a before_call()-like equivalent before a few operations like + `malloc_nursery`, to move values from registers into other registers + instead of to the stack. + + * More tightly pack the stack when calling with `release gil` + + * Support `int_floordiv()`, `int_mod()` in the JIT more efficiently + and add `rarithmetic.int_c_div()`, `rarithmetic.int_c_mod()` as + explicit interfaces. Clarify that `int_floordiv()` does python-style + rounding, unlike `llop.int_floordiv()`. + + * Use `ll_assert` (more often) in incminimark + + * (Testing) Simplify handling of interp-level tests and make it + more forward-compatible. Don't use interp-level RPython + machinery to test building app-level extensions in cpyext + + * Constant-fold `ffi.offsetof("structname", "fieldname")` in cffi + backend + + * Avoid a case in the JIT, where successive guard failures in + the same Python function end up as successive levels of + RPython functions, eventually exhausting the stack, while at + app-level the traceback is very short + + * Check for NULL returns from calls to the raw-malloc and raise, + rather than a guard + + * Improve `socket.recvfrom()` so that it copies less if possible + + * When generating C code, inline `goto` to blocks with only one + predecessor, generating less lines of code + + * When running the final backend-optimization phase before emitting + C code, constant-fold calls to we_are_jitted to return False. This + makes the generated C code a few percent smaller + + * Refactor the `uid_t/gid_t` handling in `rlib.rposix` and in + `interp_posix.py`, based on the clean-up of CPython 2.7.x + +.. _`JIT logging`: https://morepypy.blogspot.com/2016/08/pypy-tooling-upgrade-jitviewer-and.html +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.4.0.html + +Please update, and continue to help us make PyPy better. + +Cheers diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,146 +1,9 @@ ========================== -What's new in PyPy2.7 5.3+ +What's new in PyPy2.7 5.4+ ========================== -.. this is a revision shortly after release-pypy2.7-v5.3 -.. startrev: 873218a739f1 +.. this is a revision shortly after release-pypy2.7-v5.4 +.. startrev: 522736f816dc -.. 418b05f95db5 -Improve CPython compatibility for ``is``. Now code like ``if x is ():`` -works the same way as it does on CPython. See http://pypy.readthedocs.io/en/latest/cpython_differences.html#object-identity-of-primitive-values-is-and-id . - -.. pull request #455 -Add sys.{get,set}dlopenflags, for cpyext extensions. - -.. branch: fix-gen-dfa - -Resolves an issue with the generator script to build the dfa for Python syntax. - -.. branch: z196-support - -Fixes a critical issue in the register allocator and extends support on s390x. -PyPy runs and translates on the s390x revisions z10 (released February 2008, experimental) -and z196 (released August 2010) in addition to zEC12 and z13. -To target e.g. z196 on a zEC12 machine supply CFLAGS="-march=z196" to your shell environment. - -.. branch: s390x-5.3-catchup - -Implement the backend related changes for s390x. - -.. branch: incminimark-ll_assert -.. branch: vmprof-openbsd - -.. branch: testing-cleanup - -Simplify handling of interp-level tests and make it more forward- -compatible. - -.. branch: pyfile-tell -Sync w_file with the c-level FILE* before returning FILE* in PyFile_AsFile - -.. branch: rw-PyString_AS_STRING -Allow rw access to the char* returned from PyString_AS_STRING, also refactor -PyStringObject to look like cpython's and allow subclassing PyString_Type and -PyUnicode_Type - -.. branch: save_socket_errno - -Bug fix: if ``socket.socket()`` failed, the ``socket.error`` did not show -the errno of the failing system call, but instead some random previous -errno. - -.. branch: PyTuple_Type-subclass - -Refactor PyTupleObject to look like cpython's and allow subclassing -PyTuple_Type - -.. branch: call-via-pyobj - -Use offsets from PyTypeObject to find actual c function to call rather than -fixed functions, allows function override after PyType_Ready is called - -.. branch: issue2335 - -Avoid exhausting the stack in the JIT due to successive guard -failures in the same Python function ending up as successive levels of -RPython functions, while at app-level the traceback is very short - -.. branch: use-madv-free - -Try harder to memory to the OS. See e.g. issue #2336. Note that it does -not show up as a reduction of the VIRT column in ``top``, and the RES -column might also not show the reduction, particularly on Linux >= 4.5 or -on OS/X: it uses MADV_FREE, which only marks the pages as returnable to -the OS if the memory is low. - -.. branch: cpyext-slotdefs2 - -Fill in more slots when creating a PyTypeObject from a W_TypeObject -More slots are still TBD, like tp_print and richcmp - -.. branch: json-surrogates - -Align json module decode with the cpython's impl, fixes issue 2345 - -.. branch: issue2343 - -Copy CPython's logic more closely for handling of ``__instancecheck__()`` -and ``__subclasscheck__()``. Fixes issue 2343. - -.. branch: msvcrt-cffi - -Rewrite the Win32 dependencies of 'subprocess' to use cffi instead -of ctypes. This avoids importing ctypes in many small programs and -scripts, which in turn avoids enabling threads (because ctypes -creates callbacks at import time, and callbacks need threads). - -.. branch: new-jit-log - -The new logging facility that integrates with and adds features to vmprof.com. - -.. branch: jitlog-32bit - -Resolve issues to use the new logging facility on a 32bit system - -.. branch: ep2016sprint - -Trying harder to make hash(-1) return -2, like it does on CPython - -.. branch: jitlog-exact-source-lines - -Log exact line positions in debug merge points. - -.. branch: null_byte_after_str - -Allocate all RPython strings with one extra byte, normally unused. -It is used to hold a final zero in case we need some ``char *`` -representation of the string, together with checks like ``not -can_move()`` or object pinning. Main new thing that this allows: -``ffi.from_buffer(string)`` in CFFI. Additionally, and most -importantly, CFFI calls that take directly a string as argument don't -copy the string any more---this is like CFFI on CPython. - -.. branch: resource_warning - -Add a new command line option -X track-resources which will produce -ResourceWarnings when the GC closes unclosed files and sockets. - -.. branch: cpyext-realloc - -Implement PyObject_Realloc - -.. branch: inline-blocks - -Improve a little bit the readability of the generated C code - -.. branch: improve-vmprof-testing - -Improved vmprof support: now tries hard to not miss any Python-level -frame in the captured stacks, even if there is the metainterp or -blackhole interp involved. Also fix the stacklet (greenlet) support. - -.. branch: py2-mappingproxy - -``type.__dict__`` now returns a ``dict_proxy`` object, like on CPython. -Previously it returned what looked like a regular dict object (but it -was already read-only). +.. branch: rpython-resync +Backport rpython changes made directly on the py3k and py3.5 branches. diff --git a/pypy/doc/whatsnew-pypy2-5.4.0.rst b/pypy/doc/whatsnew-pypy2-5.4.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-pypy2-5.4.0.rst @@ -0,0 +1,165 @@ +========================= +What's new in PyPy2.7 5.4 +========================= + +.. this is a revision shortly after release-pypy2.7-v5.3 +.. startrev: 873218a739f1 + +.. 418b05f95db5 +Improve CPython compatibility for ``is``. Now code like ``if x is ():`` +works the same way as it does on CPython. See http://pypy.readthedocs.io/en/latest/cpython_differences.html#object-identity-of-primitive-values-is-and-id . + +.. pull request #455 +Add sys.{get,set}dlopenflags, for cpyext extensions. + +.. branch: fix-gen-dfa + +Resolves an issue with the generator script to build the dfa for Python syntax. + +.. branch: z196-support + +Fixes a critical issue in the register allocator and extends support on s390x. +PyPy runs and translates on the s390x revisions z10 (released February 2008, experimental) +and z196 (released August 2010) in addition to zEC12 and z13. +To target e.g. z196 on a zEC12 machine supply CFLAGS="-march=z196" to your shell environment. + +.. branch: s390x-5.3-catchup + +Implement the backend related changes for s390x. + +.. branch: incminimark-ll_assert +.. branch: vmprof-openbsd + +.. branch: testing-cleanup + +Simplify handling of interp-level tests and make it more forward- +compatible. + +.. branch: pyfile-tell +Sync w_file with the c-level FILE* before returning FILE* in PyFile_AsFile + +.. branch: rw-PyString_AS_STRING +Allow rw access to the char* returned from PyString_AS_STRING, also refactor +PyStringObject to look like cpython's and allow subclassing PyString_Type and +PyUnicode_Type + +.. branch: save_socket_errno + +Bug fix: if ``socket.socket()`` failed, the ``socket.error`` did not show +the errno of the failing system call, but instead some random previous +errno. + +.. branch: PyTuple_Type-subclass + +Refactor PyTupleObject to look like cpython's and allow subclassing +PyTuple_Type + +.. branch: call-via-pyobj + +Use offsets from PyTypeObject to find actual c function to call rather than +fixed functions, allows function override after PyType_Ready is called + +.. branch: issue2335 + +Avoid exhausting the stack in the JIT due to successive guard +failures in the same Python function ending up as successive levels of +RPython functions, while at app-level the traceback is very short + +.. branch: use-madv-free + +Try harder to memory to the OS. See e.g. issue #2336. Note that it does +not show up as a reduction of the VIRT column in ``top``, and the RES +column might also not show the reduction, particularly on Linux >= 4.5 or +on OS/X: it uses MADV_FREE, which only marks the pages as returnable to +the OS if the memory is low. + +.. branch: cpyext-slotdefs2 + +Fill in more slots when creating a PyTypeObject from a W_TypeObject +More slots are still TBD, like tp_print and richcmp + +.. branch: json-surrogates + +Align json module decode with the cpython's impl, fixes issue 2345 + +.. branch: issue2343 + +Copy CPython's logic more closely for handling of ``__instancecheck__()`` +and ``__subclasscheck__()``. Fixes issue 2343. + +.. branch: msvcrt-cffi + +Rewrite the Win32 dependencies of 'subprocess' to use cffi instead +of ctypes. This avoids importing ctypes in many small programs and +scripts, which in turn avoids enabling threads (because ctypes +creates callbacks at import time, and callbacks need threads). + +.. branch: new-jit-log + +The new logging facility that integrates with and adds features to vmprof.com. + +.. branch: jitlog-32bit + +Resolve issues to use the new logging facility on a 32bit system + +.. branch: ep2016sprint + +Trying harder to make hash(-1) return -2, like it does on CPython + +.. branch: jitlog-exact-source-lines + +Log exact line positions in debug merge points. + +.. branch: null_byte_after_str + +Allocate all RPython strings with one extra byte, normally unused. +It is used to hold a final zero in case we need some ``char *`` +representation of the string, together with checks like ``not +can_move()`` or object pinning. Main new thing that this allows: +``ffi.from_buffer(string)`` in CFFI. Additionally, and most +importantly, CFFI calls that take directly a string as argument don't +copy the string any more---this is like CFFI on CPython. + +.. branch: resource_warning + +Add a new command line option -X track-resources which will produce +ResourceWarnings when the GC closes unclosed files and sockets. + +.. branch: cpyext-realloc + +Implement PyObject_Realloc + +.. branch: inline-blocks + +Improve a little bit the readability of the generated C code + +.. branch: improve-vmprof-testing + +Improved vmprof support: now tries hard to not miss any Python-level +frame in the captured stacks, even if there is the metainterp or +blackhole interp involved. Also fix the stacklet (greenlet) support. + +.. branch: py2-mappingproxy + +``type.__dict__`` now returns a ``dict_proxy`` object, like on CPython. +Previously it returned what looked like a regular dict object (but it +was already read-only). + + +.. branch: const-fold-we-are-jitted + +Reduce the size of the generated C code by constant-folding ``we_are_jitted`` +in non-jitcode. + +.. branch: memoryview-attributes + +Support for memoryview attributes (format, itemsize, ...). +Extends the cpyext emulation layer. + +.. branch: redirect-assembler-jitlog + +Log more information to properly rebuild the redirected traces in jitviewer. + +.. branch: cpyext-subclass + +Copy Py_TPFLAGS_CHECKTYPES, Py_TPFLAGS_HAVE_INPLACEOPS when inheriting diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -208,7 +208,8 @@ def buffer_w(self, space, flags): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) + w_result = space.get_and_call_function(w_impl, self, + space.newint(flags)) if space.isinstance_w(w_result, space.w_buffer): return w_result.buffer_w(space, flags) raise BufferInterfaceNotFound @@ -216,7 +217,8 @@ def readbuf_w(self, space): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) + w_result = space.get_and_call_function(w_impl, self, + space.newint(space.BUF_FULL_RO)) if space.isinstance_w(w_result, space.w_buffer): return w_result.readbuf_w(space) raise BufferInterfaceNotFound @@ -224,7 +226,8 @@ def writebuf_w(self, space): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) + w_result = space.get_and_call_function(w_impl, self, + space.newint(space.BUF_FULL)) if space.isinstance_w(w_result, space.w_buffer): return w_result.writebuf_w(space) raise BufferInterfaceNotFound @@ -232,7 +235,8 @@ def charbuf_w(self, space): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) + w_result = space.get_and_call_function(w_impl, self, + space.newint(space.BUF_FULL_RO)) if space.isinstance_w(w_result, space.w_buffer): return w_result.charbuf_w(space) raise BufferInterfaceNotFound diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -23,6 +23,14 @@ self.w_objtype = w_type self.w_self = w_obj_or_type + def descr_repr(self, space): + if self.w_objtype is not None: + objtype_name = "<%s object>" % self.w_objtype.getname(space) + else: + objtype_name = 'NULL' + return space.wrap(", %s>" % ( + self.w_starttype.getname(space), objtype_name)) + def get(self, space, w_obj, w_type=None): if self.w_self is None or space.is_w(w_obj, space.w_None): return self @@ -84,7 +92,10 @@ 'super', __new__ = generic_new_descr(W_Super), __init__ = interp2app(W_Super.descr_init), + __repr__ = interp2app(W_Super.descr_repr), __thisclass__ = interp_attrproperty_w("w_starttype", W_Super), + __self__ = interp_attrproperty_w("w_self", W_Super), + __self_class__ = interp_attrproperty_w("w_objtype", W_Super), __getattribute__ = interp2app(W_Super.getattribute), __get__ = interp2app(W_Super.get), __doc__ = """\ diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -38,6 +38,8 @@ class W_ClassObject(W_Root): + _immutable_fields_ = ['bases_w?[*]', 'w_dict?'] + def __init__(self, space, w_name, bases, w_dict): self.name = space.str_w(w_name) make_sure_not_resized(bases) @@ -75,6 +77,7 @@ "__bases__ items must be classes") self.bases_w = bases_w + @jit.unroll_safe def is_subclass_of(self, other): assert isinstance(other, W_ClassObject) if self is other: @@ -313,7 +316,7 @@ # This method ignores the instance dict and the __getattr__. # Returns None if not found. assert isinstance(name, str) - w_value = self.w_class.lookup(space, name) + w_value = jit.promote(self.w_class).lookup(space, name) if w_value is None: return None w_descr_get = space.lookup(w_value, '__get__') diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -250,6 +250,24 @@ assert super(B, B()).__thisclass__ is B assert super(A, B()).__thisclass__ is A + def test_super_self_selfclass(self): + class A(object): + pass + class B(A): + pass + b = B() + assert super(A, b).__self__ is b + assert super(A).__self__ is None + assert super(A, b).__self_class__ is B + assert super(A).__self_class__ is None + + def test_super_repr(self): + class A(object): + def __repr__(self): + return super(A, self).__repr__() + '!' + assert repr(A()).endswith('>!') + assert repr(super(A, A())) == ", >" + def test_property_docstring(self): assert property.__doc__.startswith('property') diff --git a/pypy/module/_sre/__init__.py b/pypy/module/_sre/__init__.py --- a/pypy/module/_sre/__init__.py +++ b/pypy/module/_sre/__init__.py @@ -1,4 +1,4 @@ -from pypy.interpreter.mixedmodule import MixedModule +from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): @@ -7,7 +7,7 @@ interpleveldefs = { 'CODESIZE': 'space.wrap(interp_sre.CODESIZE)', - 'MAGIC': 'space.wrap(interp_sre.MAGIC)', + 'MAGIC': 'space.newint(20031017)', 'MAXREPEAT': 'space.wrap(interp_sre.MAXREPEAT)', 'compile': 'interp_sre.W_SRE_Pattern', 'getlower': 'interp_sre.w_getlower', diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -14,7 +14,7 @@ # Constants and exposed functions from rpython.rlib.rsre import rsre_core -from rpython.rlib.rsre.rsre_char import MAGIC, CODESIZE, MAXREPEAT, getlower, set_unicode_db +from rpython.rlib.rsre.rsre_char import CODESIZE, MAXREPEAT, getlower, set_unicode_db @unwrap_spec(char_ord=int, flags=int) diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -358,9 +358,15 @@ elif typ == rwinreg.REG_SZ or typ == rwinreg.REG_EXPAND_SZ: if not buflen: - return space.wrap("") - s = rffi.charp2strn(rffi.cast(rffi.CCHARP, buf), buflen) - return space.wrap(s) + s = "" + else: + # may or may not have a trailing NULL in the buffer. + buf = rffi.cast(rffi.CCHARP, buf) + if buf[buflen - 1] == '\x00': + buflen -= 1 + s = rffi.charp2strn(buf, buflen) + w_s = space.wrap(s) + return space.call_method(w_s, 'decode', space.wrap('mbcs')) elif typ == rwinreg.REG_MULTI_SZ: if not buflen: @@ -460,7 +466,7 @@ return space.newtuple([ convert_from_regdata(space, databuf, length, retType[0]), - space.wrap(retType[0]), + space.wrap(intmask(retType[0])), ]) @unwrap_spec(subkey=str) @@ -612,7 +618,7 @@ space.wrap(rffi.charp2str(valuebuf)), convert_from_regdata(space, databuf, length, retType[0]), - space.wrap(retType[0]), + space.wrap(intmask(retType[0])), ]) @unwrap_spec(index=int) diff --git a/pypy/module/_winreg/test/test_winreg.py b/pypy/module/_winreg/test/test_winreg.py --- a/pypy/module/_winreg/test/test_winreg.py +++ b/pypy/module/_winreg/test/test_winreg.py @@ -151,6 +151,7 @@ def test_readValues(self): from _winreg import OpenKey, EnumValue, QueryValueEx, EnumKey + from _winreg import REG_SZ, REG_EXPAND_SZ key = OpenKey(self.root_key, self.test_key_name) sub_key = OpenKey(key, "sub_key") index = 0 @@ -164,7 +165,10 @@ assert index == len(self.test_data) for name, value, type in self.test_data: - assert QueryValueEx(sub_key, name) == (value, type) + result = QueryValueEx(sub_key, name) + assert result == (value, type) + if type == REG_SZ or type == REG_EXPAND_SZ: + assert isinstance(result[0], unicode) # not string assert EnumKey(key, 0) == "sub_key" raises(EnvironmentError, EnumKey, key, 1) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -120,8 +120,8 @@ constant_names = """ Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE -METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O -Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS +METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HAVE_INPLACEOPS +Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_TPFLAGS_HAVE_NEWBUFFER Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES """.split() for name in constant_names: @@ -649,6 +649,7 @@ #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), ('internal', rffi.VOIDP) )) +Py_bufferP = lltype.Ptr(Py_buffer) @specialize.memo() def is_PyObject(TYPE): @@ -976,8 +977,10 @@ py_type_ready(space, get_capsule_type()) INIT_FUNCTIONS.append(init_types) from pypy.module.posix.interp_posix import add_fork_hook - reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, - compilation_info=eci) + _reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], + lltype.Void, compilation_info=eci) + def reinit_tls(space): + _reinit_tls() add_fork_hook('child', reinit_tls) def init_function(func): diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,13 +1,17 @@ from pypy.interpreter.error import oefmt from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, CANNOT_FAIL, Py_buffer) + cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER) from pypy.module.cpyext.pyobject import PyObject @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyObject_CheckBuffer(space, w_obj): +def PyObject_CheckBuffer(space, pyobj): """Return 1 if obj supports the buffer interface otherwise 0.""" - return 0 # the bf_getbuffer field is never filled by cpyext + as_buffer = pyobj.c_ob_type.c_tp_as_buffer + flags = pyobj.c_ob_type.c_tp_flags + if (flags & Py_TPFLAGS_HAVE_NEWBUFFER and as_buffer.c_bf_getbuffer): + return 1 + return 0 @cpython_api([PyObject, lltype.Ptr(Py_buffer), rffi.INT_real], rffi.INT_real, error=-1) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -123,5 +123,4 @@ pathname = code.co_filename w_mod = importing.add_module(space, w_name) space.setattr(w_mod, space.wrap('__file__'), space.wrap(pathname)) - importing.exec_code_module(space, w_mod, code) - return w_mod + return importing.exec_code_module(space, w_mod, code, w_name) diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "5.3.2-alpha0" -#define PYPY_VERSION_NUM 0x05030200 +#define PYPY_VERSION "5.4.1-alpha0" +#define PYPY_VERSION_NUM 0x05040100 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -12,7 +12,7 @@ @cpython_api([PyObject], PyObject) def PyMemoryView_GET_BASE(space, w_obj): # return the obj field of the Py_buffer created by PyMemoryView_GET_BUFFER - raise NotImplementedError + raise NotImplementedError('PyMemoryView_GET_BUFFER') @cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL) def PyMemoryView_GET_BUFFER(space, w_obj): diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -3,15 +3,16 @@ import re from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import widen from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, - mangle_name, pypy_decl) + mangle_name, pypy_decl, Py_buffer, Py_bufferP) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, ternaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, - readbufferproc, ssizessizeobjargproc) + readbufferproc, getbufferproc, ssizessizeobjargproc) from pypy.module.cpyext.pyobject import from_ref, make_ref, Py_DecRef from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State @@ -22,6 +23,9 @@ from rpython.rlib.objectmodel import specialize from rpython.tool.sourcetools import func_renamer from rpython.rtyper.annlowlevel import llhelper +from pypy.module.sys.version import CPYTHON_VERSION + +PY3 = CPYTHON_VERSION[0] == 3 # XXX: Also defined in object.h Py_LT = 0 @@ -298,11 +302,23 @@ # Similar to Py_buffer _immutable_ = True - def __init__(self, ptr, size, w_obj): + def __init__(self, ptr, size, w_obj, format='B', shape=None, + strides=None, ndim=1, itemsize=1, readonly=True): self.ptr = ptr self.size = size self.w_obj = w_obj # kept alive - self.readonly = True + self.format = format + if not shape: + self.shape = [size] + else: + self.shape = shape + if not strides: + self.strides = [1] + else: + self.strides = strides + self.ndim = ndim + self.itemsize = itemsize + self.readonly = readonly def getlength(self): return self.size @@ -313,6 +329,15 @@ def get_raw_address(self): return rffi.cast(rffi.CCHARP, self.ptr) + def getformat(self): + return self.format + + def getshape(self): + return self.shape + + def getitemsize(self): + return self.itemsize + def wrap_getreadbuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: @@ -322,6 +347,30 @@ space.fromcache(State).check_and_raise_exception(always=True) return space.newbuffer(CPyBuffer(ptr[0], size, w_self)) +def wrap_getbuffer(space, w_self, w_args, func): + func_target = rffi.cast(getbufferproc, func) + with lltype.scoped_alloc(Py_buffer) as pybuf: + _flags = 0 + if space.len_w(w_args) > 0: + _flags = space.int_w(space.listview(w_args)[0]) + flags = rffi.cast(rffi.INT_real,_flags) + size = generic_cpy_call(space, func_target, w_self, pybuf, flags) + if widen(size) < 0: + space.fromcache(State).check_and_raise_exception(always=True) + ptr = pybuf.c_buf + size = pybuf.c_len + ndim = widen(pybuf.c_ndim) + shape = [pybuf.c_shape[i] for i in range(ndim)] + strides = [pybuf.c_strides[i] for i in range(ndim)] + if pybuf.c_format: + format = rffi.charp2str(pybuf.c_format) + else: + format = 'B' + return space.newbuffer(CPyBuffer(ptr, size, w_self, format=format, + ndim=ndim, shape=shape, strides=strides, + itemsize=pybuf.c_itemsize, + readonly=widen(pybuf.c_readonly))) + def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): func_target = rffi.cast(richcmpfunc, func) @@ -486,7 +535,6 @@ def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) api_func = slot_tp_getattro.api_func - elif name == 'tp_call': call_fn = w_type.getdictvalue(space, '__call__') if call_fn is None: @@ -542,6 +590,21 @@ w_stararg=w_args, w_starstararg=w_kwds) return space.call_args(space.get(new_fn, w_self), args) api_func = slot_tp_new.api_func + elif name == 'tp_as_buffer.c_bf_getbuffer': + buff_fn = w_type.getdictvalue(space, '__buffer__') + if buff_fn is None: + return + @cpython_api([PyObject, Py_bufferP, rffi.INT_real], + rffi.INT_real, header=None, error=-1) + @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) + def buff_w(space, w_self, pybuf, flags): + # XXX this is wrong, needs a test + raise oefmt(space.w_NotImplemented, + "calling bf_getbuffer on a builtin type not supported yet") + #args = Arguments(space, [w_self], + # w_stararg=w_args, w_starstararg=w_kwds) + #return space.call_args(space.get(buff_fn, w_self), args) + api_func = buff_w.api_func else: # missing: tp_as_number.nb_nonzero, tp_as_number.nb_coerce # tp_as_sequence.c_sq_contains, tp_as_sequence.c_sq_length @@ -850,11 +913,19 @@ slotdefs = eval(slotdefs_str) # PyPy addition slotdefs += ( - TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), + # XXX that might not be what we want! + TPSLOT("__buffer__", "tp_as_buffer.c_bf_getbuffer", None, "wrap_getbuffer", ""), ) +if not PY3: + slotdefs += ( + TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), + ) + + # partial sort to solve some slot conflicts: # Number slots before Mapping slots before Sequence slots. +# also prefer the new buffer interface # These are the only conflicts between __name__ methods def slotdef_sort_key(slotdef): if slotdef.slot_name.startswith('tp_as_number'): @@ -863,6 +934,10 @@ return 2 if slotdef.slot_name.startswith('tp_as_sequence'): return 3 + if slotdef.slot_name == 'tp_as_buffer.c_bf_getbuffer': + return 100 + if slotdef.slot_name == 'tp_as_buffer.c_bf_getreadbuffer': + return 101 return 0 slotdefs = sorted(slotdefs, key=slotdef_sort_key) diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/buffer_test.c @@ -0,0 +1,248 @@ +#ifdef _MSC_VER +#define _CRT_SECURE_NO_WARNINGS 1 +#endif +#include +#include +#include + +/* + * Adapted from https://jakevdp.github.io/blog/2014/05/05/introduction-to-the-python-buffer-protocol, + * which is copyright Jake Vanderplas and released under the BSD license + */ + +/* Structure defines a 1-dimensional strided array */ +typedef struct{ + int* arr; + Py_ssize_t length; +} MyArray; + +/* initialize the array with integers 0...length */ +void initialize_MyArray(MyArray* a, long length){ + int i; + a->length = length; + a->arr = (int*)malloc(length * sizeof(int)); + for(i=0; iarr[i] = i; + } +} + +/* free the memory when finished */ +void deallocate_MyArray(MyArray* a){ + free(a->arr); + a->arr = NULL; +} + +/* tools to print the array */ +char* stringify(MyArray* a, int nmax){ + char* output = (char*) malloc(nmax * 20); + int k, pos = sprintf(&output[0], "["); + + for (k=0; k < a->length && k < nmax; k++){ + pos += sprintf(&output[pos], " %d", a->arr[k]); + } + if(a->length > nmax) + pos += sprintf(&output[pos], "..."); + sprintf(&output[pos], " ]"); + return output; +} + +void print_MyArray(MyArray* a, int nmax){ + char* s = stringify(a, nmax); + printf("%s", s); + free(s); +} + +/* This is where we define the PyMyArray object structure */ +typedef struct { + PyObject_HEAD + /* Type-specific fields go below. */ + MyArray arr; +} PyMyArray; + + +/* This is the __init__ function, implemented in C */ +static int +PyMyArray_init(PyMyArray *self, PyObject *args, PyObject *kwds) +{ + int length = 0; + static char *kwlist[] = {"length", NULL}; + // init may have already been called + if (self->arr.arr != NULL) { + deallocate_MyArray(&self->arr); + } + + if (! PyArg_ParseTupleAndKeywords(args, kwds, "|i", kwlist, &length)) + return -1; + + if (length < 0) + length = 0; + + initialize_MyArray(&self->arr, length); + + return 0; +} + + +/* this function is called when the object is deallocated */ +static void +PyMyArray_dealloc(PyMyArray* self) +{ + deallocate_MyArray(&self->arr); + Py_TYPE(self)->tp_free((PyObject*)self); +} + + +/* This function returns the string representation of our object */ +static PyObject * +PyMyArray_str(PyMyArray * self) +{ + char* s = stringify(&self->arr, 10); + PyObject* ret = PyUnicode_FromString(s); + free(s); + return ret; +} + +/* Here is the buffer interface function */ +static int +PyMyArray_getbuffer(PyObject *obj, Py_buffer *view, int flags) +{ + PyMyArray* self = (PyMyArray*)obj; + fprintf(stdout, "in PyMyArray_getbuffer\n"); + if (view == NULL) { + fprintf(stdout, "view is NULL\n"); + PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer"); + return -1; + } + if (flags == 0) { + fprintf(stdout, "flags is 0\n"); + PyErr_SetString(PyExc_ValueError, "flags == 0 in getbuffer"); + return -1; + } + + view->obj = (PyObject*)self; + view->buf = (void*)self->arr.arr; + view->len = self->arr.length * sizeof(int); + view->readonly = 0; + view->itemsize = sizeof(int); + view->format = "i"; // integer + view->ndim = 1; + view->shape = &self->arr.length; // length-1 sequence of dimensions + view->strides = &view->itemsize; // for the simple case we can do this + view->suboffsets = NULL; + view->internal = NULL; + + Py_INCREF(self); // need to increase the reference count + return 0; +} + +static PyBufferProcs PyMyArray_as_buffer = { +#if PY_MAJOR_VERSION < 3 + (readbufferproc)0, + (writebufferproc)0, + (segcountproc)0, + (charbufferproc)0, +#endif + (getbufferproc)PyMyArray_getbuffer, + (releasebufferproc)0, // we do not require any special release function +}; + + +/* Here is the type structure: we put the above functions in the appropriate place + in order to actually define the Python object type */ +static PyTypeObject PyMyArrayType = { + PyVarObject_HEAD_INIT(NULL, 0) + "pymyarray.PyMyArray", /* tp_name */ + sizeof(PyMyArray), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor)PyMyArray_dealloc,/* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_reserved */ + (reprfunc)PyMyArray_str, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + (reprfunc)PyMyArray_str, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + &PyMyArray_as_buffer, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_NEWBUFFER, /* tp_flags */ + "PyMyArray object", /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)PyMyArray_init, /* tp_init */ +}; + +static PyMethodDef buffer_functions[] = { + {NULL, NULL} /* Sentinel */ +}; + +#if PY_MAJOR_VERSION >= 3 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "buffer_test", + "Module Doc", + -1, + buffer_functions, + NULL, + NULL, + NULL, + NULL, +}; +#define INITERROR return NULL + +/* Initialize this module. */ +#ifdef __GNUC__ +extern __attribute__((visibility("default"))) +#else +extern __declspec(dllexport) +#endif + +PyMODINIT_FUNC +PyInit_buffer_test(void) + +#else + +#define INITERROR return + +/* Initialize this module. */ +#ifdef __GNUC__ +extern __attribute__((visibility("default"))) +#else +#endif + +PyMODINIT_FUNC +initbuffer_test(void) +#endif +{ +#if PY_MAJOR_VERSION >= 3 + PyObject *m= PyModule_Create(&moduledef); +#else + PyObject *m= Py_InitModule("buffer_test", buffer_functions); +#endif + if (m == NULL) + INITERROR; + PyMyArrayType.tp_new = PyType_GenericNew; + if (PyType_Ready(&PyMyArrayType) < 0) + INITERROR; + Py_INCREF(&PyMyArrayType); + PyModule_AddObject(m, "PyMyArray", (PyObject *)&PyMyArrayType); +#if PY_MAJOR_VERSION >=3 + return m; +#endif +} diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -87,4 +87,13 @@ module.switch_multiply() res = [1, 2, 3] * arr assert res == [2, 4, 6] + + def test_subclass(self): + module = self.import_module(name='array') + class Sub(module.array): + pass + + arr = Sub('i', [2]) + res = [1, 2, 3] * arr + assert res == [1, 2, 3, 1, 2, 3] diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -92,10 +92,20 @@ link_extra=link_extra, libraries=libraries) from pypy.module.imp.importing import get_so_extension - pydname = soname.new(purebasename=modname, ext=get_so_extension(space)) + ext = get_so_extension(space) + pydname = soname.new(purebasename=modname, ext=ext) soname.rename(pydname) return str(pydname) +def get_so_suffix(): + from imp import get_suffixes, C_EXTENSION + for suffix, mode, typ in get_suffixes(): + if typ == C_EXTENSION: + return suffix + else: + raise RuntimeError("This interpreter does not define a filename " + "suffix for C extensions!") + def compile_extension_module_applevel(space, modname, include_dirs=[], source_files=None, source_strings=None): """ @@ -126,13 +136,9 @@ source_strings=source_strings, compile_extra=compile_extra, link_extra=link_extra) - from imp import get_suffixes, C_EXTENSION - pydname = soname - for suffix, mode, typ in get_suffixes(): - if typ == C_EXTENSION: - pydname = soname.new(purebasename=modname, ext=suffix) - soname.rename(pydname) - break + ext = get_so_suffix() + pydname = soname.new(purebasename=modname, ext=ext) + soname.rename(pydname) return str(pydname) def freeze_refcnts(self): @@ -145,6 +151,24 @@ #state.print_refcounts() self.frozen_ll2callocations = set(ll2ctypes.ALLOCATED.values()) +class FakeSpace(object): + """Like TinyObjSpace, but different""" + def __init__(self, config): + from distutils.sysconfig import get_python_inc + self.config = config + self.include_dir = get_python_inc() + + def passthrough(self, arg): + return arg + listview = passthrough + str_w = passthrough + + def unwrap(self, args): + try: + return args.str_w(None) + except: + return args + class LeakCheckingTest(object): """Base class for all cpyext tests.""" spaceconfig = dict(usemodules=['cpyext', 'thread', '_rawffi', 'array', @@ -433,21 +457,8 @@ self.imported_module_names = [] if self.runappdirect: + fake = FakeSpace(self.space.config) def interp2app(func): - from distutils.sysconfig import get_python_inc - class FakeSpace(object): - def passthrough(self, arg): - return arg - listview = passthrough - str_w = passthrough - def unwrap(self, args): - try: - return args.str_w(None) - except: - return args - fake = FakeSpace() - fake.include_dir = get_python_inc() - fake.config = self.space.config def run(*args, **kwargs): for k in kwargs.keys(): if k not in func.unwrap_spec and not k.startswith('w_'): diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -1,17 +1,26 @@ -import pytest from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase + class TestMemoryViewObject(BaseApiTest): def test_fromobject(self, space, api): - if space.is_true(space.lt(space.sys.get('version_info'), - space.wrap((2, 7)))): - py.test.skip("unsupported before Python 2.7") - w_hello = space.newbytes("hello") + assert api.PyObject_CheckBuffer(w_hello) w_view = api.PyMemoryView_FromObject(w_hello) + w_char = space.call_method(w_view, '__getitem__', space.wrap(0)) + assert space.eq_w(w_char, space.wrap('h')) w_bytes = space.call_method(w_view, "tobytes") assert space.unwrap(w_bytes) == "hello" - @pytest.mark.skipif(True, reason='write a test for this') - def test_get_base_and_get_buffer(self, space, api): - assert False # XXX test PyMemoryView_GET_BASE, PyMemoryView_GET_BUFFER + +class AppTestBufferProtocol(AppTestCpythonExtensionBase): + def test_buffer_protocol(self): + import struct + module = self.import_module(name='buffer_test') + arr = module.PyMyArray(10) + y = memoryview(arr) + assert y.format == 'i' + assert y.shape == (10,) + s = y[3] + assert len(s) == struct.calcsize('i') + assert s == struct.pack('i', 3) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -17,7 +17,9 @@ generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, Py_TPFLAGS_HAVE_GETCHARBUFFER, build_type_checkers, StaticObjectBuilder, - PyObjectFields, Py_TPFLAGS_BASETYPE, PyTypeObject, PyTypeObjectPtr) + PyObjectFields, Py_TPFLAGS_BASETYPE, PyTypeObject, PyTypeObjectPtr, + Py_TPFLAGS_HAVE_NEWBUFFER, Py_TPFLAGS_CHECKTYPES, + Py_TPFLAGS_HAVE_INPLACEOPS) from pypy.module.cpyext.methodobject import (W_PyCClassMethodObject, W_PyCWrapperObject, PyCFunction_NewEx, PyCFunction_typedef, PyMethodDef, W_PyCMethodObject, W_PyCFunctionObject) @@ -385,6 +387,8 @@ pto.c_tp_basicsize = base_pto.c_tp_basicsize if pto.c_tp_itemsize < base_pto.c_tp_itemsize: pto.c_tp_itemsize = base_pto.c_tp_itemsize + pto.c_tp_flags |= base_pto.c_tp_flags & Py_TPFLAGS_CHECKTYPES + pto.c_tp_flags |= base_pto.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS flags = rffi.cast(lltype.Signed, pto.c_tp_flags) base_object_pyo = make_ref(space, space.w_object) base_object_pto = rffi.cast(PyTypeObjectPtr, base_object_pyo) @@ -608,6 +612,7 @@ bf_getwritebuffer.api_func.get_wrapper(space)) pto.c_tp_as_buffer = c_buf pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER + pto.c_tp_flags |= Py_TPFLAGS_HAVE_NEWBUFFER @cpython_api([PyObject], lltype.Void, header=None) def type_dealloc(space, obj): @@ -774,6 +779,8 @@ pto.c_tp_setattro = base.c_tp_setattro if not pto.c_tp_getattro: pto.c_tp_getattro = base.c_tp_getattro + if not pto.c_tp_as_buffer: + pto.c_tp_as_buffer = base.c_tp_as_buffer finally: Py_DecRef(space, base_pyo) @@ -810,8 +817,13 @@ # inheriting tp_as_* slots base = py_type.c_tp_base if base: - if not py_type.c_tp_as_number: py_type.c_tp_as_number = base.c_tp_as_number - if not py_type.c_tp_as_sequence: py_type.c_tp_as_sequence = base.c_tp_as_sequence + if not py_type.c_tp_as_number: + py_type.c_tp_as_number = base.c_tp_as_number + py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_CHECKTYPES + py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS + if not py_type.c_tp_as_sequence: + py_type.c_tp_as_sequence = base.c_tp_as_sequence + py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS if not py_type.c_tp_as_mapping: py_type.c_tp_as_mapping = base.c_tp_as_mapping if not py_type.c_tp_as_buffer: py_type.c_tp_as_buffer = base.c_tp_as_buffer diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -5,6 +5,7 @@ Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.modsupport import PyMethodDef +from pypy.module.cpyext.api import Py_bufferP P, FT, PyO = Ptr, FuncType, PyObject @@ -58,8 +59,7 @@ writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) -## We don't support new buffer interface for now -getbufferproc = rffi.VOIDP +getbufferproc = P(FT([PyO, Py_bufferP, rffi.INT_real], rffi.INT_real)) releasebufferproc = rffi.VOIDP diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -597,6 +597,11 @@ @jit.dont_look_inside def load_module(space, w_modulename, find_info, reuse=False): + """Like load_module() in CPython's import.c, this will normally + make a module object, store it in sys.modules, execute code in it, + and then fetch it again from sys.modules. But this logic is not + used if we're calling a PEP302 loader. + """ if find_info is None: return @@ -625,17 +630,15 @@ try: if find_info.modtype == PY_SOURCE: - load_source_module( + return load_source_module( space, w_modulename, w_mod, find_info.filename, find_info.stream.readall(), find_info.stream.try_to_find_file_descriptor()) - return w_mod elif find_info.modtype == PY_COMPILED: magic = _r_long(find_info.stream) timestamp = _r_long(find_info.stream) - load_compiled_module(space, w_modulename, w_mod, find_info.filename, + return load_compiled_module(space, w_modulename, w_mod, find_info.filename, magic, timestamp, find_info.stream.readall()) - return w_mod elif find_info.modtype == PKG_DIRECTORY: w_path = space.newlist([space.wrap(find_info.filename)]) space.setattr(w_mod, space.wrap('__path__'), w_path) @@ -644,14 +647,13 @@ if find_info is None: return w_mod try: - load_module(space, w_modulename, find_info, reuse=True) + w_mod = load_module(space, w_modulename, find_info, + reuse=True) finally: try: find_info.stream.close() except StreamErrors: pass - # fetch the module again, in case of "substitution" - w_mod = check_sys_modules(space, w_modulename) return w_mod elif find_info.modtype == C_EXTENSION and has_so_extension(space): load_c_extension(space, find_info.filename, space.str_w(w_modulename)) @@ -677,13 +679,6 @@ try: if find_info: w_mod = load_module(space, w_modulename, find_info) - try: - w_mod = space.getitem(space.sys.get("modules"), - w_modulename) From pypy.commits at gmail.com Thu Sep 1 23:36:35 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Sep 2016 20:36:35 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: haaaack on top of previous hacks Message-ID: <57c8f3c3.262ec20a.f163a.c930@mx.google.com> Author: Armin Rigo Branch: release-5.x Changeset: r86827:12bd393e0411 Date: 2016-09-01 20:35 -0700 http://bitbucket.org/pypy/pypy/changeset/12bd393e0411/ Log: haaaack on top of previous hacks diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -98,8 +98,15 @@ try: ctypes.CDLL(name) except OSError as e: + # common case: ctypes fails too, with the real dlerror() + # message in str(e). Return that error message. return str(e) else: + # uncommon case: may happen if 'name' is a linker script + # (which the C-level dlopen() can't handle) and we are + # directly running on pypy (whose implementation of ctypes + # or cffi will resolve linker scripts). In that case, + # unsure what we can do. return ("opening %r with ctypes.CDLL() works, " "but not with c_dlopen()??" % (name,)) @@ -160,6 +167,13 @@ mode = _dlopen_default_mode() elif (mode & (RTLD_LAZY | RTLD_NOW)) == 0: mode |= RTLD_NOW + # + # haaaack for 'pypy py.test -A' if libm.so is a linker script + # (see reason in _dlerror_on_dlopen_untranslated()) + if not we_are_translated() and platform.name == "linux": + if rffi.charp2str(name) == 'libm.so': + name = rffi.str2charp('libm.so.6', track_allocation=False) + # res = c_dlopen(name, rffi.cast(rffi.INT, mode)) if not res: if not we_are_translated(): From pypy.commits at gmail.com Thu Sep 1 23:36:37 2016 From: pypy.commits at gmail.com (stefanor) Date: Thu, 01 Sep 2016 20:36:37 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: name can be None Message-ID: <57c8f3c5.85c11c0a.ed820.5fbe@mx.google.com> Author: Stefano Rivera Branch: release-5.x Changeset: r86828:3806b361cac3 Date: 2016-09-01 20:35 -0700 http://bitbucket.org/pypy/pypy/changeset/3806b361cac3/ Log: name can be None diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -171,7 +171,7 @@ # haaaack for 'pypy py.test -A' if libm.so is a linker script # (see reason in _dlerror_on_dlopen_untranslated()) if not we_are_translated() and platform.name == "linux": - if rffi.charp2str(name) == 'libm.so': + if name and rffi.charp2str(name) == 'libm.so': name = rffi.str2charp('libm.so.6', track_allocation=False) # res = c_dlopen(name, rffi.cast(rffi.INT, mode)) From pypy.commits at gmail.com Thu Sep 1 23:36:39 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Sep 2016 20:36:39 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: Add a failing (skipped) whitebox test and a test about ctypes that fails Message-ID: <57c8f3c7.a717c20a.dd7b5.d15f@mx.google.com> Author: Armin Rigo Branch: release-5.x Changeset: r86829:2cab10de65ad Date: 2016-09-01 20:35 -0700 http://bitbucket.org/pypy/pypy/changeset/2cab10de65ad/ Log: Add a failing (skipped) whitebox test and a test about ctypes that fails on -A (xfailed) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py b/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py @@ -195,6 +195,29 @@ _fields_ = [('t', enum)] assert isinstance(S().t, enum) + def test_no_missing_shape_to_ffi_type(self): + # whitebox test + import sys + if '__pypy__' not in sys.builtin_module_names: + skip("only for pypy's ctypes") + skip("re-enable after adding 'g' to _shape_to_ffi_type.typemap, " + "which I think needs fighting all the way up from " + "rpython.rlib.libffi") + from _ctypes.basics import _shape_to_ffi_type + from _rawffi import Array + for i in range(1, 256): + try: + Array(chr(i)) + except ValueError: + pass + else: + assert chr(i) in _shape_to_ffi_type.typemap + + @py.test.mark.xfail + def test_pointer_to_long_double(self): + import ctypes + ctypes.POINTER(ctypes.c_longdouble) + ## def test_perf(self): ## check_perf() diff --git a/rpython/rlib/libffi.py b/rpython/rlib/libffi.py --- a/rpython/rlib/libffi.py +++ b/rpython/rlib/libffi.py @@ -47,6 +47,8 @@ cls.ulonglong = clibffi.cast_type_to_ffitype(rffi.ULONGLONG) cls.signed = clibffi.cast_type_to_ffitype(rffi.SIGNED) cls.wchar_t = clibffi.cast_type_to_ffitype(lltype.UniChar) + # XXX long double support: clibffi.ffi_type_longdouble, but then + # XXX fix the whole rest of this file to add a case for long double del cls._import @staticmethod From pypy.commits at gmail.com Thu Sep 1 23:36:42 2016 From: pypy.commits at gmail.com (stefanor) Date: Thu, 01 Sep 2016 20:36:42 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: Avoid blowing up with results that are ~0 but <0 due to floating point imprecision Message-ID: <57c8f3ca.82cbc20a.81584.c590@mx.google.com> Author: Stefano Rivera Branch: release-5.x Changeset: r86831:5169ca3e696d Date: 2016-09-01 20:35 -0700 http://bitbucket.org/pypy/pypy/changeset/5169ca3e696d/ Log: Avoid blowing up with results that are ~0 but <0 due to floating point imprecision diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -532,8 +532,7 @@ return sys.maxint else: res = Solution[blockmap[graph.startblock]] - assert res >= 0 - return res + return max(res, 0.0) def static_instruction_count(graph): count = 0 From pypy.commits at gmail.com Thu Sep 1 23:36:41 2016 From: pypy.commits at gmail.com (stefanor) Date: Thu, 01 Sep 2016 20:36:41 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: Bump recursionlimit, for translating with cpython Message-ID: <57c8f3c9.05d71c0a.db689.689c@mx.google.com> Author: Stefano Rivera Branch: release-5.x Changeset: r86830:a5db0f4359ab Date: 2016-09-01 20:35 -0700 http://bitbucket.org/pypy/pypy/changeset/a5db0f4359ab/ Log: Bump recursionlimit, for translating with cpython diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -213,6 +213,7 @@ log.WARNING(warning) def main(): + sys.setrecursionlimit(2000) # PyPy can't translate within cpython's 1k limit targetspec_dic, translateconfig, config, args = parse_options_and_load_target() from rpython.translator import translator from rpython.translator import driver From pypy.commits at gmail.com Fri Sep 2 01:11:55 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 01 Sep 2016 22:11:55 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: pfff another hack on top of this pile of hacks Message-ID: <57c90a1b.81091c0a.d9c91.7c58@mx.google.com> Author: Matti Picus Branch: release-5.x Changeset: r86832:3e411b32904e Date: 2016-09-02 08:10 +0300 http://bitbucket.org/pypy/pypy/changeset/3e411b32904e/ Log: pfff another hack on top of this pile of hacks (grafted from 76e37b5f30ae446c64388d1d7cd2d2f01999621e) diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -170,11 +170,15 @@ # # haaaack for 'pypy py.test -A' if libm.so is a linker script # (see reason in _dlerror_on_dlopen_untranslated()) + must_free = False if not we_are_translated() and platform.name == "linux": if name and rffi.charp2str(name) == 'libm.so': - name = rffi.str2charp('libm.so.6', track_allocation=False) + name = rffi.str2charp('libm.so.6') + must_free = True # res = c_dlopen(name, rffi.cast(rffi.INT, mode)) + if must_free: + rffi.free_charp(name) if not res: if not we_are_translated(): err = _dlerror_on_dlopen_untranslated(name) From pypy.commits at gmail.com Fri Sep 2 01:44:30 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 01 Sep 2016 22:44:30 -0700 (PDT) Subject: [pypy-commit] pypy default: fix b127faf95f86 which updated contributors but removed the rest of LICENSE Message-ID: <57c911be.94a51c0a.94102.7fea@mx.google.com> Author: Matti Picus Branch: Changeset: r86833:d6a8fc8b6b27 Date: 2016-09-02 08:42 +0300 http://bitbucket.org/pypy/pypy/changeset/d6a8fc8b6b27/ Log: fix b127faf95f86 which updated contributors but removed the rest of LICENSE diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -369,3 +369,109 @@ Roman Podoliaka Dan Loewenherz werat + + Heinrich-Heine University, Germany + Open End AB (formerly AB Strakt), Sweden + merlinux GmbH, Germany + tismerysoft GmbH, Germany + Logilab Paris, France + DFKI GmbH, Germany + Impara, Germany + Change Maker, Sweden + University of California Berkeley, USA + Google Inc. + King's College London + +The PyPy Logo as used by http://speed.pypy.org and others was created +by Samuel Reis and is distributed on terms of Creative Commons Share Alike +License. + +License for 'lib-python/2.7' +============================ + +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the terms that you can find here: https://docs.python.org/2/license.html + +License for 'pypy/module/unicodedata/' +====================================== + +The following files are from the website of The Unicode Consortium +at http://www.unicode.org/. For the terms of use of these files, see +http://www.unicode.org/terms_of_use.html . Or they are derived from +files from the above website, and the same terms of use apply. + + CompositionExclusions-*.txt + EastAsianWidth-*.txt + LineBreak-*.txt + UnicodeData-*.txt + UnihanNumeric-*.txt + +License for 'dotviewer/font/' +============================= + +Copyright (C) 2008 The Android Open Source Project + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Detailed license information is contained in the NOTICE file in the +directory. + + +Licenses and Acknowledgements for Incorporated Software +======================================================= + +This section is an incomplete, but growing list of licenses and +acknowledgements for third-party software incorporated in the PyPy +distribution. + +License for 'Tcl/Tk' +-------------------- + +This copy of PyPy contains library code that may, when used, result in +the Tcl/Tk library to be loaded. PyPy also includes code that may be +regarded as being a copy of some parts of the Tcl/Tk header files. +You may see a copy of the License for Tcl/Tk in the file +`lib_pypy/_tkinter/license.terms` included here. + +License for 'bzip2' +------------------- + +This copy of PyPy may be linked (dynamically or statically) with the +bzip2 library. You may see a copy of the License for bzip2/libbzip2 at + + http://www.bzip.org/1.0.5/bzip2-manual-1.0.5.html + +License for 'openssl' +--------------------- + +This copy of PyPy may be linked (dynamically or statically) with the +openssl library. You may see a copy of the License for OpenSSL at + + https://www.openssl.org/source/license.html + +License for 'gdbm' +------------------ + +The gdbm module includes code from gdbm.h, which is distributed under +the terms of the GPL license version 2 or any later version. Thus the +gdbm module, provided in the file lib_pypy/gdbm.py, is redistributed +under the terms of the GPL license as well. + +License for 'rpython/rlib/rvmprof/src' +-------------------------------------- + +The code is based on gperftools. You may see a copy of the License for it at + + https://github.com/gperftools/gperftools/blob/master/COPYING From pypy.commits at gmail.com Fri Sep 2 01:44:32 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 01 Sep 2016 22:44:32 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: fix b127faf95f86 which updated contributors but removed the rest of LICENSE Message-ID: <57c911c0.82cbc20a.81584.e2e9@mx.google.com> Author: Matti Picus Branch: release-5.x Changeset: r86834:d68da509772b Date: 2016-09-02 08:43 +0300 http://bitbucket.org/pypy/pypy/changeset/d68da509772b/ Log: fix b127faf95f86 which updated contributors but removed the rest of LICENSE (grafted from d6a8fc8b6b278362d6a4c7602135354b91ac2e9a) diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -369,3 +369,109 @@ Roman Podoliaka Dan Loewenherz werat + + Heinrich-Heine University, Germany + Open End AB (formerly AB Strakt), Sweden + merlinux GmbH, Germany + tismerysoft GmbH, Germany + Logilab Paris, France + DFKI GmbH, Germany + Impara, Germany + Change Maker, Sweden + University of California Berkeley, USA + Google Inc. + King's College London + +The PyPy Logo as used by http://speed.pypy.org and others was created +by Samuel Reis and is distributed on terms of Creative Commons Share Alike +License. + +License for 'lib-python/2.7' +============================ + +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the terms that you can find here: https://docs.python.org/2/license.html + +License for 'pypy/module/unicodedata/' +====================================== + +The following files are from the website of The Unicode Consortium +at http://www.unicode.org/. For the terms of use of these files, see +http://www.unicode.org/terms_of_use.html . Or they are derived from +files from the above website, and the same terms of use apply. + + CompositionExclusions-*.txt + EastAsianWidth-*.txt + LineBreak-*.txt + UnicodeData-*.txt + UnihanNumeric-*.txt + +License for 'dotviewer/font/' +============================= + +Copyright (C) 2008 The Android Open Source Project + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Detailed license information is contained in the NOTICE file in the +directory. + + +Licenses and Acknowledgements for Incorporated Software +======================================================= + +This section is an incomplete, but growing list of licenses and +acknowledgements for third-party software incorporated in the PyPy +distribution. + +License for 'Tcl/Tk' +-------------------- + +This copy of PyPy contains library code that may, when used, result in +the Tcl/Tk library to be loaded. PyPy also includes code that may be +regarded as being a copy of some parts of the Tcl/Tk header files. +You may see a copy of the License for Tcl/Tk in the file +`lib_pypy/_tkinter/license.terms` included here. + +License for 'bzip2' +------------------- + +This copy of PyPy may be linked (dynamically or statically) with the +bzip2 library. You may see a copy of the License for bzip2/libbzip2 at + + http://www.bzip.org/1.0.5/bzip2-manual-1.0.5.html + +License for 'openssl' +--------------------- + +This copy of PyPy may be linked (dynamically or statically) with the +openssl library. You may see a copy of the License for OpenSSL at + + https://www.openssl.org/source/license.html + +License for 'gdbm' +------------------ + +The gdbm module includes code from gdbm.h, which is distributed under +the terms of the GPL license version 2 or any later version. Thus the +gdbm module, provided in the file lib_pypy/gdbm.py, is redistributed +under the terms of the GPL license as well. + +License for 'rpython/rlib/rvmprof/src' +-------------------------------------- + +The code is based on gperftools. You may see a copy of the License for it at + + https://github.com/gperftools/gperftools/blob/master/COPYING From pypy.commits at gmail.com Fri Sep 2 02:49:19 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 01 Sep 2016 23:49:19 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: merge default into branch Message-ID: <57c920ef.4152c20a.7f40d.0401@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86836:802bf4e38a0d Date: 2016-09-02 09:48 +0300 http://bitbucket.org/pypy/pypy/changeset/802bf4e38a0d/ Log: merge default into branch diff too long, truncating to 2000 out of 210666 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -27,3 +27,6 @@ 40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2 c09c19272c990a0611b17569a0085ad1ab00c8ff release-pypy2.7-v5.3 7e8df3df96417c16c2d55b41352ec82c9c69c978 release-pypy2.7-v5.3.1 +68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 +68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 +77392ad263504df011ccfcabf6a62e21d04086d0 release-pypy2.7-v5.4.0 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -74,6 +74,7 @@ Seo Sanghyeon Ronny Pfannschmidt Justin Peel + Raffael Tfirst David Edelsohn Anders Hammarquist Jakub Gustak @@ -117,7 +118,6 @@ Wenzhu Man John Witulski Laurence Tratt - Raffael Tfirst Ivan Sichmann Freitas Greg Price Dario Bertini @@ -141,6 +141,7 @@ tav Taavi Burns Georg Brandl + Nicolas Truessel Bert Freudenberg Stian Andreassen Wanja Saatkamp @@ -211,6 +212,7 @@ Vaibhav Sood Alan McIntyre Alexander Sedov + p_zieschang at yahoo.de Attila Gobi Jasper.Schulz Christopher Pope @@ -221,6 +223,7 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + touilleMan Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -229,12 +232,14 @@ Gabriel Lukas Vacek Kunal Grover + Aaron Gallagher Andrew Dalke Sylvain Thenault Jakub Stasiak Nathan Taylor Vladimir Kryachko Omer Katz + Mark Williams Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -355,12 +360,15 @@ yasirs Michael Chermside Anna Ravencroft + pizi Andrey Churin Dan Crosta + Eli Stevens Tobias Diaz Julien Phalip Roman Podoliaka Dan Loewenherz + werat Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden diff --git a/_pytest/python.py b/_pytest/python.py --- a/_pytest/python.py +++ b/_pytest/python.py @@ -498,7 +498,10 @@ """ Collector for test methods. """ def collect(self): if hasinit(self.obj): - pytest.skip("class %s.%s with __init__ won't get collected" % ( + # XXX used to be skip(), but silently skipping classes + # XXX just because they have been written long ago is + # XXX imho a very, very, very bad idea + pytest.fail("class %s.%s with __init__ won't get collected" % ( self.obj.__module__, self.obj.__name__, )) diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -122,22 +122,24 @@ """Dummy method to let some easy_install packages that have optional C speedup components. """ + def customize(executable, flags): + command = compiler.executables[executable] + flags + setattr(compiler, executable, command) + if compiler.compiler_type == "unix": compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') if "CPPFLAGS" in os.environ: cppflags = shlex.split(os.environ["CPPFLAGS"]) - compiler.compiler.extend(cppflags) - compiler.compiler_so.extend(cppflags) - compiler.linker_so.extend(cppflags) + for executable in ('compiler', 'compiler_so', 'linker_so'): + customize(executable, cppflags) if "CFLAGS" in os.environ: cflags = shlex.split(os.environ["CFLAGS"]) - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) + for executable in ('compiler', 'compiler_so', 'linker_so'): + customize(executable, cflags) if "LDFLAGS" in os.environ: ldflags = shlex.split(os.environ["LDFLAGS"]) - compiler.linker_so.extend(ldflags) + customize('linker_so', ldflags) from sysconfig_cpython import ( diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -342,7 +342,7 @@ thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) keepalives, newargs, argtypes, outargs, errcheckargs = ( self._convert_args(argtypes, args[1:], kwargs)) - newargs.insert(0, thisvalue.value) + newargs.insert(0, thisarg) argtypes.insert(0, c_void_p) else: thisarg = None diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -44,6 +44,7 @@ Seo Sanghyeon Ronny Pfannschmidt Justin Peel + Raffael Tfirst David Edelsohn Anders Hammarquist Jakub Gustak @@ -87,7 +88,6 @@ Wenzhu Man John Witulski Laurence Tratt - Raffael Tfirst Ivan Sichmann Freitas Greg Price Dario Bertini @@ -111,6 +111,7 @@ tav Taavi Burns Georg Brandl + Nicolas Truessel Bert Freudenberg Stian Andreassen Wanja Saatkamp @@ -181,6 +182,7 @@ Vaibhav Sood Alan McIntyre Alexander Sedov + p_zieschang at yahoo.de Attila Gobi Jasper.Schulz Christopher Pope @@ -191,6 +193,7 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + touilleMan Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -199,12 +202,14 @@ Gabriel Lukas Vacek Kunal Grover + Aaron Gallagher Andrew Dalke Sylvain Thenault Jakub Stasiak Nathan Taylor Vladimir Kryachko Omer Katz + Mark Williams Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -325,9 +330,12 @@ yasirs Michael Chermside Anna Ravencroft + pizi Andrey Churin Dan Crosta + Eli Stevens Tobias Diaz Julien Phalip Roman Podoliaka Dan Loewenherz + werat diff --git a/pypy/doc/release-pypy2.7-v5.4.0.rst b/pypy/doc/release-pypy2.7-v5.4.0.rst --- a/pypy/doc/release-pypy2.7-v5.4.0.rst +++ b/pypy/doc/release-pypy2.7-v5.4.0.rst @@ -3,7 +3,8 @@ ============ We have released PyPy2.7 v5.4, a little under two months after PyPy2.7 v5.3. -This new PyPy2.7 release includes further improvements to our C-API compatability layer (cpyext), enabling us to pass over 99% of the upstream +This new PyPy2.7 release includes incremental improvements to our C-API +compatability layer (cpyext), enabling us to pass over 99% of the upstream numpy `test suite`_. We updated built-in cffi_ support to version 1.8, which now supports the "limited API" mode for c-extensions on CPython >=3.2. @@ -12,9 +13,7 @@ support to OpenBSD and Dragon Fly BSD As always, this release fixed many issues and bugs raised by the -growing community of PyPy users. - -XXXXX MORE ??? +growing community of PyPy users. We strongly recommend updating. You can download the PyPy2.7 v5.4 release here: @@ -110,8 +109,8 @@ * (RPython) add `rposix_scandir` portably, needed for Python 3.5 - * Support for memoryview attributes (format, itemsize, ...) which also - adds support for `PyMemoryView_FromObject` + * Increased but incomplete support for memoryview attributes (format, + itemsize, ...) which also adds support for `PyMemoryView_FromObject` * Bug Fixes @@ -153,10 +152,6 @@ * Make `hash(-1)` return -2, as CPython does, and fix all the ancilary places this matters - * Issues reported with our previous release were resolved_ after - reports from users on our issue tracker at - https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy - * Fix `PyNumber_Check()` to behave more like CPython * (VMProf) Try hard to not miss any Python-level frame in the @@ -169,6 +164,10 @@ * Fix the mapdict cache for subclasses of builtin types that provide a dict + * Issues reported with our previous release were resolved_ after + reports from users on our issue tracker at + https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy + * Performance improvements: * Add a before_call()-like equivalent before a few operations like diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,4 +5,5 @@ .. this is a revision shortly after release-pypy2.7-v5.4 .. startrev: 522736f816dc - +.. branch: rpython-resync +Backport rpython changes made directly on the py3k and py3.5 branches. diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -23,6 +23,14 @@ self.w_objtype = w_type self.w_self = w_obj_or_type + def descr_repr(self, space): + if self.w_objtype is not None: + objtype_name = "<%s object>" % self.w_objtype.getname(space) + else: + objtype_name = 'NULL' + return space.wrap(", %s>" % ( + self.w_starttype.getname(space), objtype_name)) + def get(self, space, w_obj, w_type=None): if self.w_self is None or space.is_w(w_obj, space.w_None): return self @@ -84,7 +92,10 @@ 'super', __new__ = generic_new_descr(W_Super), __init__ = interp2app(W_Super.descr_init), + __repr__ = interp2app(W_Super.descr_repr), __thisclass__ = interp_attrproperty_w("w_starttype", W_Super), + __self__ = interp_attrproperty_w("w_self", W_Super), + __self_class__ = interp_attrproperty_w("w_objtype", W_Super), __getattribute__ = interp2app(W_Super.getattribute), __get__ = interp2app(W_Super.get), __doc__ = """\ diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -38,6 +38,8 @@ class W_ClassObject(W_Root): + _immutable_fields_ = ['bases_w?[*]', 'w_dict?'] + def __init__(self, space, w_name, bases, w_dict): self.name = space.str_w(w_name) make_sure_not_resized(bases) @@ -75,6 +77,7 @@ "__bases__ items must be classes") self.bases_w = bases_w + @jit.unroll_safe def is_subclass_of(self, other): assert isinstance(other, W_ClassObject) if self is other: @@ -313,7 +316,7 @@ # This method ignores the instance dict and the __getattr__. # Returns None if not found. assert isinstance(name, str) - w_value = self.w_class.lookup(space, name) + w_value = jit.promote(self.w_class).lookup(space, name) if w_value is None: return None w_descr_get = space.lookup(w_value, '__get__') diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -250,6 +250,24 @@ assert super(B, B()).__thisclass__ is B assert super(A, B()).__thisclass__ is A + def test_super_self_selfclass(self): + class A(object): + pass + class B(A): + pass + b = B() + assert super(A, b).__self__ is b + assert super(A).__self__ is None + assert super(A, b).__self_class__ is B + assert super(A).__self_class__ is None + + def test_super_repr(self): + class A(object): + def __repr__(self): + return super(A, self).__repr__() + '!' + assert repr(A()).endswith('>!') + assert repr(super(A, A())) == ", >" + def test_property_docstring(self): assert property.__doc__.startswith('property') diff --git a/pypy/module/_sre/__init__.py b/pypy/module/_sre/__init__.py --- a/pypy/module/_sre/__init__.py +++ b/pypy/module/_sre/__init__.py @@ -1,4 +1,4 @@ -from pypy.interpreter.mixedmodule import MixedModule +from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): @@ -7,7 +7,7 @@ interpleveldefs = { 'CODESIZE': 'space.wrap(interp_sre.CODESIZE)', - 'MAGIC': 'space.wrap(interp_sre.MAGIC)', + 'MAGIC': 'space.newint(20031017)', 'MAXREPEAT': 'space.wrap(interp_sre.MAXREPEAT)', 'compile': 'interp_sre.W_SRE_Pattern', 'getlower': 'interp_sre.w_getlower', diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -14,7 +14,7 @@ # Constants and exposed functions from rpython.rlib.rsre import rsre_core -from rpython.rlib.rsre.rsre_char import MAGIC, CODESIZE, MAXREPEAT, getlower, set_unicode_db +from rpython.rlib.rsre.rsre_char import CODESIZE, MAXREPEAT, getlower, set_unicode_db @unwrap_spec(char_ord=int, flags=int) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -123,5 +123,4 @@ pathname = code.co_filename w_mod = importing.add_module(space, w_name) space.setattr(w_mod, space.wrap('__file__'), space.wrap(pathname)) - importing.exec_code_module(space, w_mod, code) - return w_mod + return importing.exec_code_module(space, w_mod, code, w_name) diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -92,10 +92,20 @@ link_extra=link_extra, libraries=libraries) from pypy.module.imp.importing import get_so_extension - pydname = soname.new(purebasename=modname, ext=get_so_extension(space)) + ext = get_so_extension(space) + pydname = soname.new(purebasename=modname, ext=ext) soname.rename(pydname) return str(pydname) +def get_so_suffix(): + from imp import get_suffixes, C_EXTENSION + for suffix, mode, typ in get_suffixes(): + if typ == C_EXTENSION: + return suffix + else: + raise RuntimeError("This interpreter does not define a filename " + "suffix for C extensions!") + def compile_extension_module_applevel(space, modname, include_dirs=[], source_files=None, source_strings=None): """ @@ -126,13 +136,9 @@ source_strings=source_strings, compile_extra=compile_extra, link_extra=link_extra) - from imp import get_suffixes, C_EXTENSION - pydname = soname - for suffix, mode, typ in get_suffixes(): - if typ == C_EXTENSION: - pydname = soname.new(purebasename=modname, ext=suffix) - soname.rename(pydname) - break + ext = get_so_suffix() + pydname = soname.new(purebasename=modname, ext=ext) + soname.rename(pydname) return str(pydname) def freeze_refcnts(self): @@ -145,6 +151,24 @@ #state.print_refcounts() self.frozen_ll2callocations = set(ll2ctypes.ALLOCATED.values()) +class FakeSpace(object): + """Like TinyObjSpace, but different""" + def __init__(self, config): + from distutils.sysconfig import get_python_inc + self.config = config + self.include_dir = get_python_inc() + + def passthrough(self, arg): + return arg + listview = passthrough + str_w = passthrough + + def unwrap(self, args): + try: + return args.str_w(None) + except: + return args + class LeakCheckingTest(object): """Base class for all cpyext tests.""" spaceconfig = dict(usemodules=['cpyext', 'thread', '_rawffi', 'array', @@ -433,21 +457,8 @@ self.imported_module_names = [] if self.runappdirect: + fake = FakeSpace(self.space.config) def interp2app(func): - from distutils.sysconfig import get_python_inc - class FakeSpace(object): - def passthrough(self, arg): - return arg - listview = passthrough - str_w = passthrough - def unwrap(self, args): - try: - return args.str_w(None) - except: - return args - fake = FakeSpace() - fake.include_dir = get_python_inc() - fake.config = self.space.config def run(*args, **kwargs): for k in kwargs.keys(): if k not in func.unwrap_spec and not k.startswith('w_'): diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -597,6 +597,11 @@ @jit.dont_look_inside def load_module(space, w_modulename, find_info, reuse=False): + """Like load_module() in CPython's import.c, this will normally + make a module object, store it in sys.modules, execute code in it, + and then fetch it again from sys.modules. But this logic is not + used if we're calling a PEP302 loader. + """ if find_info is None: return @@ -625,17 +630,15 @@ try: if find_info.modtype == PY_SOURCE: - load_source_module( + return load_source_module( space, w_modulename, w_mod, find_info.filename, find_info.stream.readall(), find_info.stream.try_to_find_file_descriptor()) - return w_mod elif find_info.modtype == PY_COMPILED: magic = _r_long(find_info.stream) timestamp = _r_long(find_info.stream) - load_compiled_module(space, w_modulename, w_mod, find_info.filename, + return load_compiled_module(space, w_modulename, w_mod, find_info.filename, magic, timestamp, find_info.stream.readall()) - return w_mod elif find_info.modtype == PKG_DIRECTORY: w_path = space.newlist([space.wrap(find_info.filename)]) space.setattr(w_mod, space.wrap('__path__'), w_path) @@ -644,14 +647,13 @@ if find_info is None: return w_mod try: - load_module(space, w_modulename, find_info, reuse=True) + w_mod = load_module(space, w_modulename, find_info, + reuse=True) finally: try: find_info.stream.close() except StreamErrors: pass - # fetch the module again, in case of "substitution" - w_mod = check_sys_modules(space, w_modulename) return w_mod elif find_info.modtype == C_EXTENSION and has_so_extension(space): load_c_extension(space, find_info.filename, space.str_w(w_modulename)) @@ -677,13 +679,6 @@ try: if find_info: w_mod = load_module(space, w_modulename, find_info) - try: - w_mod = space.getitem(space.sys.get("modules"), - w_modulename) - except OperationError as oe: - if not oe.match(space, space.w_KeyError): - raise - raise OperationError(space.w_ImportError, w_modulename) if w_parent is not None: space.setattr(w_parent, space.wrap(partname), w_mod) return w_mod @@ -875,20 +870,32 @@ pycode = ec.compiler.compile(source, pathname, 'exec', 0) return pycode -def exec_code_module(space, w_mod, code_w): +def exec_code_module(space, w_mod, code_w, w_modulename, check_afterwards=True): + """ + Execute a code object in the module's dict. Returns + 'sys.modules[modulename]', which must exist. + """ w_dict = space.getattr(w_mod, space.wrap('__dict__')) space.call_method(w_dict, 'setdefault', space.wrap('__builtins__'), space.wrap(space.builtin)) code_w.exec_code(space, w_dict, w_dict) + if check_afterwards: + w_mod = check_sys_modules(space, w_modulename) + if w_mod is None: + raise oefmt(space.w_ImportError, + "Loaded module %R not found in sys.modules", + w_modulename) + return w_mod + @jit.dont_look_inside def load_source_module(space, w_modulename, w_mod, pathname, source, fd, - write_pyc=True): + write_pyc=True, check_afterwards=True): """ - Load a source module from a given file and return its module - object. + Load a source module from a given file. Returns the result + of sys.modules[modulename], which must exist. """ w = space.wrap @@ -927,9 +934,8 @@ code_w.remove_docstrings(space) update_code_filenames(space, code_w, pathname) - exec_code_module(space, w_mod, code_w) - - return w_mod + return exec_code_module(space, w_mod, code_w, w_modulename, + check_afterwards=check_afterwards) def update_code_filenames(space, code_w, pathname, oldname=None): assert isinstance(code_w, PyCode) @@ -1012,10 +1018,10 @@ @jit.dont_look_inside def load_compiled_module(space, w_modulename, w_mod, cpathname, magic, - timestamp, source): + timestamp, source, check_afterwards=True): """ - Load a module from a compiled file, execute it, and return its - module object. + Load a module from a compiled file and execute it. Returns + 'sys.modules[modulename]', which must exist. """ log_pyverbose(space, 1, "import %s # compiled from %s\n" % (space.str_w(w_modulename), cpathname)) @@ -1032,9 +1038,8 @@ if optimize >= 2: code_w.remove_docstrings(space) - exec_code_module(space, w_mod, code_w) - - return w_mod + return exec_code_module(space, w_mod, code_w, w_modulename, + check_afterwards=check_afterwards) def open_exclusive(space, cpathname, mode): try: diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -98,33 +98,35 @@ w_mod = space.wrap(Module(space, w_modulename)) importing._prepare_module(space, w_mod, filename, None) - importing.load_source_module( + w_mod = importing.load_source_module( space, w_modulename, w_mod, filename, stream.readall(), stream.try_to_find_file_descriptor()) if space.is_none(w_file): stream.close() return w_mod - at unwrap_spec(filename='str0') -def _run_compiled_module(space, w_modulename, filename, w_file, w_module): + at unwrap_spec(filename='str0', check_afterwards=int) +def _run_compiled_module(space, w_modulename, filename, w_file, w_module, + check_afterwards=False): # the function 'imp._run_compiled_module' is a pypy-only extension stream = get_file(space, w_file, filename, 'rb') magic = importing._r_long(stream) timestamp = importing._r_long(stream) - importing.load_compiled_module( + w_mod = importing.load_compiled_module( space, w_modulename, w_module, filename, magic, timestamp, - stream.readall()) + stream.readall(), check_afterwards=check_afterwards) if space.is_none(w_file): stream.close() + return w_mod @unwrap_spec(filename='str0') def load_compiled(space, w_modulename, filename, w_file=None): w_mod = space.wrap(Module(space, w_modulename)) importing._prepare_module(space, w_mod, filename, None) - _run_compiled_module(space, w_modulename, filename, w_file, w_mod) - return w_mod + return _run_compiled_module(space, w_modulename, filename, w_file, w_mod, + check_afterwards=True) @unwrap_spec(filename=str) def load_dynamic(space, w_modulename, filename, w_file=None): diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -118,7 +118,7 @@ filename = str(p.join("x.py")) stream = streamio.open_file_as_stream(filename, "r") try: - importing.load_source_module( + _load_source_module( space, w_modname, w(importing.Module(space, w_modname)), filename, stream.readall(), stream.try_to_find_file_descriptor()) @@ -139,6 +139,15 @@ return str(root) +def _load_source_module(space, w_modname, w_mod, *args, **kwds): + kwds.setdefault('check_afterwards', False) + return importing.load_source_module(space, w_modname, w_mod, *args, **kwds) + +def _load_compiled_module(space, w_modname, w_mod, *args, **kwds): + kwds.setdefault('check_afterwards', False) + return importing.load_compiled_module(space, w_modname, w_mod, + *args, **kwds) + def _setup(space): dn = setup_directory_structure(space) @@ -887,8 +896,7 @@ w_mod = space.wrap(Module(space, w_modulename)) magic = importing._r_long(stream) timestamp = importing._r_long(stream) - w_ret = importing.load_compiled_module(space, - w_modulename, + w_ret = _load_compiled_module(space, w_modulename, w_mod, cpathname, magic, @@ -946,7 +954,7 @@ pathname = _testfilesource() stream = streamio.open_file_as_stream(pathname, "r") try: - w_ret = importing.load_source_module( + w_ret = _load_source_module( space, w_modulename, w_mod, pathname, stream.readall(), stream.try_to_find_file_descriptor()) @@ -968,7 +976,7 @@ pathname = _testfilesource() stream = streamio.open_file_as_stream(pathname, "r") try: - w_ret = importing.load_source_module( + w_ret = _load_source_module( space, w_modulename, w_mod, pathname, stream.readall(), stream.try_to_find_file_descriptor(), @@ -987,7 +995,7 @@ try: space.setattr(space.sys, space.wrap('dont_write_bytecode'), space.w_True) - w_ret = importing.load_source_module( + w_ret = _load_source_module( space, w_modulename, w_mod, pathname, stream.readall(), stream.try_to_find_file_descriptor()) @@ -1006,7 +1014,7 @@ pathname = _testfilesource(source="") stream = streamio.open_file_as_stream(pathname, "r") try: - w_ret = importing.load_source_module( + w_ret = _load_source_module( space, w_modulename, w_mod, pathname, stream.readall(), stream.try_to_find_file_descriptor()) @@ -1026,7 +1034,7 @@ pathname = _testfilesource(source="a = unknown_name") stream = streamio.open_file_as_stream(pathname, "r") try: - w_ret = importing.load_source_module( + w_ret = _load_source_module( space, w_modulename, w_mod, pathname, stream.readall(), stream.try_to_find_file_descriptor()) @@ -1114,7 +1122,7 @@ magic = importing._r_long(stream) timestamp = importing._r_long(stream) space2.raises_w(space2.w_ImportError, - importing.load_compiled_module, + _load_compiled_module, space2, w_modulename, w_mod, @@ -1326,10 +1334,7 @@ # use an import hook that doesn't update sys.modules, then the # import succeeds; but at the same time, you can have the same # result without an import hook (see test_del_from_sys_modules) - # and then the import fails. This looks like even more mess - # to replicate, so we ignore it until someone really hits this - # case... - skip("looks like an inconsistency in CPython") + # and then the import fails. Mess mess mess. class ImportHook(object): def find_module(self, fullname, path=None): diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -187,6 +187,43 @@ """) + def test_oldstyle_methcall(self): + def main(): + def g(): pass + class A: + def f(self): + return self.x + 1 + class I(A): + pass + class J(I): + pass + + + class B(J): + def __init__(self, x): + self.x = x + + i = 0 + b = B(1) + while i < 1000: + g() + v = b.f() # ID: meth + i += v + return i + + log = self.run(main, [], threshold=80) + loop, = log.loops_by_filename(self.filepath, is_entry_bridge=True) + assert loop.match_by_id('meth', + ''' + guard_nonnull_class(p18, ..., descr=...) + p52 = getfield_gc_r(p18, descr=...) # read map + guard_value(p52, ConstPtr(ptr53), descr=...) + p54 = getfield_gc_r(p18, descr=...) # read class + guard_value(p54, ConstPtr(ptr55), descr=...) + p56 = force_token() # done + ''') + + def test_oldstyle_newstyle_mix(self): def main(): class A: diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -64,7 +64,7 @@ AsyncAction.__init__(self, space) self.pending_signal = -1 self.fire_in_another_thread = False - # + @rgc.no_collect def _after_thread_switch(): if self.fire_in_another_thread: @@ -251,7 +251,7 @@ except OSError as e: if e.errno == errno.EBADF: raise oefmt(space.w_ValueError, "invalid fd") - old_fd = pypysig_set_wakeup_fd(fd) + old_fd = pypysig_set_wakeup_fd(fd, True) return space.wrap(intmask(old_fd)) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py b/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py @@ -195,6 +195,29 @@ _fields_ = [('t', enum)] assert isinstance(S().t, enum) + def test_no_missing_shape_to_ffi_type(self): + # whitebox test + import sys + if '__pypy__' not in sys.builtin_module_names: + skip("only for pypy's ctypes") + skip("re-enable after adding 'g' to _shape_to_ffi_type.typemap, " + "which I think needs fighting all the way up from " + "rpython.rlib.libffi") + from _ctypes.basics import _shape_to_ffi_type + from _rawffi import Array + for i in range(1, 256): + try: + Array(chr(i)) + except ValueError: + pass + else: + assert chr(i) in _shape_to_ffi_type.typemap + + @py.test.mark.xfail + def test_pointer_to_long_double(self): + import ctypes + ctypes.POINTER(ctypes.c_longdouble) + ## def test_perf(self): ## check_perf() diff --git a/pypy/module/zipimport/interp_zipimport.py b/pypy/module/zipimport/interp_zipimport.py --- a/pypy/module/zipimport/interp_zipimport.py +++ b/pypy/module/zipimport/interp_zipimport.py @@ -152,8 +152,7 @@ importing._prepare_module(space, w_mod, real_name, pkgpath) co_filename = self.make_co_filename(filename) code_w = importing.parse_source_module(space, co_filename, buf) - importing.exec_code_module(space, w_mod, code_w) - return w_mod + return importing.exec_code_module(space, w_mod, code_w, w(modname)) def _parse_mtime(self, space, filename): w = space.wrap @@ -205,10 +204,10 @@ real_name = self.filename + os.path.sep + self.corr_zname(filename) space.setattr(w_mod, w('__loader__'), space.wrap(self)) importing._prepare_module(space, w_mod, real_name, pkgpath) - result = importing.load_compiled_module(space, w(modname), w_mod, + w_result = importing.load_compiled_module(space, w(modname), w_mod, filename, magic, timestamp, buf) - return result + return w_result def have_modulefile(self, space, filename): if ZIPSEP != os.path.sep: diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -58,6 +58,20 @@ return w_iter tuple_iter._annspecialcase_ = 'specialize:memo' +def str_getitem(space): + "Utility that returns the app-level descriptor str.__getitem__." + w_src, w_iter = space.lookup_in_type_where(space.w_str, + '__getitem__') + return w_iter +str_getitem._annspecialcase_ = 'specialize:memo' + +def unicode_getitem(space): + "Utility that returns the app-level descriptor unicode.__getitem__." + w_src, w_iter = space.lookup_in_type_where(space.w_unicode, + '__getitem__') + return w_iter +unicode_getitem._annspecialcase_ = 'specialize:memo' + def raiseattrerror(space, w_obj, name, w_descr=None): if w_descr is None: raise oefmt(space.w_AttributeError, diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -8,6 +8,7 @@ from rpython.rlib.objectmodel import specialize from rpython.rlib.rfloat import copysign, formatd from rpython.rlib.rarithmetic import r_uint, intmask +from pypy.interpreter.signature import Signature @specialize.argtype(1) @@ -40,19 +41,32 @@ ANS_MANUAL = 3 -def make_template_formatting_class(): +format_signature = Signature([], 'args', 'kwargs') + + +def make_template_formatting_class(for_unicode): class TemplateFormatter(object): + is_unicode = for_unicode parser_list_w = None - def __init__(self, space, is_unicode, template): + def __init__(self, space, template): self.space = space - self.is_unicode = is_unicode - self.empty = u"" if is_unicode else "" + self.empty = u"" if self.is_unicode else "" self.template = template def build(self, args): - self.args, self.kwargs = args.unpack() + if self.is_unicode: + # for unicode, use the slower parse_obj() to get self.w_kwargs + # as a wrapped dictionary that may contain full-range unicode + # keys. See test_non_latin1_key + space = self.space + w_args, w_kwds = args.parse_obj(None, 'format', + format_signature) + self.args = space.listview(w_args) + self.w_kwargs = w_kwds + else: + self.args, self.kwargs = args.unpack() self.auto_numbering = 0 self.auto_numbering_state = ANS_INIT return self._build_string(0, len(self.template), 2) @@ -197,17 +211,13 @@ if index == -1: kwarg = name[:i] if self.is_unicode: + w_arg = space.getitem(self.w_kwargs, space.wrap(kwarg)) + else: try: - arg_key = kwarg.encode("latin-1") - except UnicodeEncodeError: - # Not going to be found in a dict of strings. - raise OperationError(space.w_KeyError, space.wrap(kwarg)) - else: - arg_key = kwarg - try: - w_arg = self.kwargs[arg_key] - except KeyError: - raise OperationError(space.w_KeyError, space.wrap(arg_key)) + w_arg = self.kwargs[kwarg] + except KeyError: + raise OperationError(space.w_KeyError, + space.wrap(kwarg)) else: try: w_arg = self.args[index] @@ -351,14 +361,8 @@ return space.iter(space.newlist(self.parser_list_w)) return TemplateFormatter -StrTemplateFormatter = make_template_formatting_class() -UnicodeTemplateFormatter = make_template_formatting_class() - -def str_template_formatter(space, template): - return StrTemplateFormatter(space, False, template) - -def unicode_template_formatter(space, template): - return UnicodeTemplateFormatter(space, True, template) +str_template_formatter = make_template_formatting_class(for_unicode=False) +unicode_template_formatter = make_template_formatting_class(for_unicode=True) def format_method(space, w_string, args, is_unicode): @@ -395,16 +399,16 @@ LONG_DIGITS = string.digits + string.ascii_lowercase -def make_formatting_class(): +def make_formatting_class(for_unicode): class Formatter(BaseFormatter): """__format__ implementation for builtin types.""" + is_unicode = for_unicode _grouped_digits = None - def __init__(self, space, is_unicode, spec): + def __init__(self, space, spec): self.space = space - self.is_unicode = is_unicode - self.empty = u"" if is_unicode else "" + self.empty = u"" if self.is_unicode else "" self.spec = spec def _is_alignment(self, c): @@ -1138,15 +1142,8 @@ self._unknown_presentation("complex") return Formatter -StrFormatter = make_formatting_class() -UnicodeFormatter = make_formatting_class() - - -def unicode_formatter(space, spec): - return StrFormatter(space, True, spec) - -def str_formatter(space, spec): - return UnicodeFormatter(space, False, spec) +str_formatter = make_formatting_class(for_unicode=False) +unicode_formatter = make_formatting_class(for_unicode=True) @specialize.arg(2) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -445,7 +445,7 @@ return w_obj.listview_bytes() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_bytes() - if isinstance(w_obj, W_BytesObject) and self._uses_no_iter(w_obj): + if isinstance(w_obj, W_BytesObject) and self._str_uses_no_iter(w_obj): return w_obj.listview_bytes() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_bytes() @@ -460,7 +460,7 @@ return w_obj.listview_unicode() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_unicode() - if isinstance(w_obj, W_UnicodeObject) and self._uses_no_iter(w_obj): + if isinstance(w_obj, W_UnicodeObject) and self._uni_uses_no_iter(w_obj): return w_obj.listview_unicode() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_unicode() @@ -504,8 +504,15 @@ from pypy.objspace.descroperation import tuple_iter return self.lookup(w_obj, '__iter__') is tuple_iter(self) - def _uses_no_iter(self, w_obj): - return self.lookup(w_obj, '__iter__') is None + def _str_uses_no_iter(self, w_obj): + from pypy.objspace.descroperation import str_getitem + return (self.lookup(w_obj, '__iter__') is None and + self.lookup(w_obj, '__getitem__') is str_getitem(self)) + + def _uni_uses_no_iter(self, w_obj): + from pypy.objspace.descroperation import unicode_getitem + return (self.lookup(w_obj, '__iter__') is None and + self.lookup(w_obj, '__getitem__') is unicode_getitem(self)) def sliceindices(self, w_slice, w_length): if isinstance(w_slice, W_SliceObject): diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -432,7 +432,7 @@ class AppTestListObject(object): - spaceconfig = {"objspace.std.withliststrategies": True} # it's the default + #spaceconfig = {"objspace.std.withliststrategies": True} # it's the default def setup_class(cls): import platform @@ -1518,6 +1518,16 @@ def __iter__(self): yield "ok" assert list(U(u"don't see me")) == ["ok"] + # + class S(str): + def __getitem__(self, index): + return str.__getitem__(self, index).upper() + assert list(S("abc")) == list("ABC") + # + class U(unicode): + def __getitem__(self, index): + return unicode.__getitem__(self, index).upper() + assert list(U(u"abc")) == list(u"ABC") def test_extend_from_nonempty_list_with_subclasses(self): l = ["hi!"] @@ -1543,6 +1553,20 @@ l.extend(U(u"don't see me")) # assert l == ["hi!", "okT", "okL", "okL", "okS", "okU"] + # + class S(str): + def __getitem__(self, index): + return str.__getitem__(self, index).upper() + l = [] + l.extend(S("abc")) + assert l == list("ABC") + # + class U(unicode): + def __getitem__(self, index): + return unicode.__getitem__(self, index).upper() + l = [] + l.extend(U(u"abc")) + assert l == list(u"ABC") def test_no_len_on_range_iter(self): iterable = range(10) diff --git a/pypy/objspace/std/test/test_newformat.py b/pypy/objspace/std/test/test_newformat.py --- a/pypy/objspace/std/test/test_newformat.py +++ b/pypy/objspace/std/test/test_newformat.py @@ -215,7 +215,9 @@ assert self.s("{!r}").format(x()) == self.s("32") def test_non_latin1_key(self): - raises(KeyError, self.s("{\u1000}").format) + raises(KeyError, u"{\u1000}".format) + d = {u"\u1000": u"foo"} + assert u"{\u1000}".format(**d) == u"foo" class AppTestStringFormat(BaseStringFormatTests): diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,7 +1,7 @@ # Edit these appropriately before running this script maj=5 -min=3 -rev=1 +min=4 +rev=0 branchname=release-$maj.x # ==OR== release-$maj.$min.x tagname=release-pypy2.7-v$maj.$min.$rev # ==OR== release-$maj.$min diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -164,8 +164,15 @@ # annotations that are passed in, and don't annotate the old # graph -- it's already low-level operations! for a, s_newarg in zip(block.inputargs, cells): - s_oldarg = self.binding(a) - assert annmodel.unionof(s_oldarg, s_newarg) == s_oldarg + s_oldarg = a.annotation + # XXX: Should use s_oldarg.contains(s_newarg) but that breaks + # PyPy translation + if annmodel.unionof(s_oldarg, s_newarg) != s_oldarg: + raise annmodel.AnnotatorError( + "Late-stage annotation is not allowed to modify the " + "existing annotation for variable %s: %s" % + (a, s_oldarg)) + else: assert not self.frozen if block not in self.annotated: diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -17,7 +17,7 @@ from rpython.flowspace.model import Variable, Constant, const from rpython.flowspace.operation import op from rpython.rlib import rarithmetic -from rpython.annotator.model import AnnotatorError +from rpython.annotator.model import AnnotatorError, TLS BINARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 2]) @@ -436,6 +436,11 @@ class __extend__(pairtype(SomeFloat, SomeFloat)): def union((flt1, flt2)): + if not TLS.allow_int_to_float: + # in this mode, if one of the two is actually the + # subclass SomeInteger, complain + if isinstance(flt1, SomeInteger) or isinstance(flt2, SomeInteger): + raise UnionError(flt1, flt2) return SomeFloat() add = sub = mul = union diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -44,6 +44,7 @@ # A global attribute :-( Patch it with 'True' to enable checking of # the no_nul attribute... check_str_without_nul = False + allow_int_to_float = True TLS = State() class SomeObject(object): @@ -749,6 +750,7 @@ s1 = pair(s1, s2).union() else: # this is just a performance shortcut + # XXX: This is a lie! Grep for no_side_effects_in_union and weep. if s1 != s2: s1 = pair(s1, s2).union() return s1 diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -176,6 +176,7 @@ SOCK_DGRAM SOCK_RAW SOCK_RDM SOCK_SEQPACKET SOCK_STREAM +SOCK_CLOEXEC SOL_SOCKET SOL_IPX SOL_AX25 SOL_ATALK SOL_NETROM SOL_ROSE @@ -319,6 +320,8 @@ [('p_proto', rffi.INT), ]) +CConfig.HAVE_ACCEPT4 = platform.Has('accept4') + if _POSIX: CConfig.nfds_t = platform.SimpleType('nfds_t') CConfig.pollfd = platform.Struct('struct pollfd', @@ -541,6 +544,12 @@ socketaccept = external('accept', [socketfd_type, sockaddr_ptr, socklen_t_ptr], socketfd_type, save_err=SAVE_ERR) +HAVE_ACCEPT4 = cConfig.HAVE_ACCEPT4 +if HAVE_ACCEPT4: + socketaccept4 = external('accept4', [socketfd_type, sockaddr_ptr, + socklen_t_ptr, rffi.INT], + socketfd_type, + save_err=SAVE_ERR) socketbind = external('bind', [socketfd_type, sockaddr_ptr, socklen_t], rffi.INT, save_err=SAVE_ERR) socketlisten = external('listen', [socketfd_type, rffi.INT], rffi.INT, diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -10,6 +10,7 @@ _immutable_ = True def getlength(self): + """Returns the size in bytes (even if getitemsize() > 1).""" raise NotImplementedError def __len__(self): diff --git a/rpython/rlib/libffi.py b/rpython/rlib/libffi.py --- a/rpython/rlib/libffi.py +++ b/rpython/rlib/libffi.py @@ -47,6 +47,8 @@ cls.ulonglong = clibffi.cast_type_to_ffitype(rffi.ULONGLONG) cls.signed = clibffi.cast_type_to_ffitype(rffi.SIGNED) cls.wchar_t = clibffi.cast_type_to_ffitype(lltype.UniChar) + # XXX long double support: clibffi.ffi_type_longdouble, but then + # XXX fix the whole rest of this file to add a case for long double del cls._import @staticmethod diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -1222,6 +1222,9 @@ # base is supposed to be positive or 0.0, which means we use e if base == 10.0: return _loghelper(math.log10, self) + if base == 2.0: + from rpython.rlib import rfloat + return _loghelper(rfloat.log2, self) ret = _loghelper(math.log, self) if base != 0.0: ret /= math.log(base) diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -98,8 +98,15 @@ try: ctypes.CDLL(name) except OSError as e: + # common case: ctypes fails too, with the real dlerror() + # message in str(e). Return that error message. return str(e) else: + # uncommon case: may happen if 'name' is a linker script + # (which the C-level dlopen() can't handle) and we are + # directly running on pypy (whose implementation of ctypes + # or cffi will resolve linker scripts). In that case, + # unsure what we can do. return ("opening %r with ctypes.CDLL() works, " "but not with c_dlopen()??" % (name,)) @@ -160,7 +167,18 @@ mode = _dlopen_default_mode() elif (mode & (RTLD_LAZY | RTLD_NOW)) == 0: mode |= RTLD_NOW + # + # haaaack for 'pypy py.test -A' if libm.so is a linker script + # (see reason in _dlerror_on_dlopen_untranslated()) + must_free = False + if not we_are_translated() and platform.name == "linux": + if name and rffi.charp2str(name) == 'libm.so': + name = rffi.str2charp('libm.so.6') + must_free = True + # res = c_dlopen(name, rffi.cast(rffi.INT, mode)) + if must_free: + rffi.free_charp(name) if not res: if not we_are_translated(): err = _dlerror_on_dlopen_untranslated(name) diff --git a/rpython/rlib/rjitlog/rjitlog.py b/rpython/rlib/rjitlog/rjitlog.py --- a/rpython/rlib/rjitlog/rjitlog.py +++ b/rpython/rlib/rjitlog/rjitlog.py @@ -430,9 +430,8 @@ def encode_merge_point(log, compressor, values): line = [] - unrolled = unrolling_iterable(values) i = 0 - for value in unrolled: + for value in values: line.append(value.encode(log,i,compressor)) i += 1 return ''.join(line) diff --git a/rpython/rlib/rmarshal.py b/rpython/rlib/rmarshal.py --- a/rpython/rlib/rmarshal.py +++ b/rpython/rlib/rmarshal.py @@ -346,11 +346,15 @@ # on s_bigger. It relies on the fact that s_bigger was created with # an expression like 'annotation([s_item])' which returns a ListDef with # no bookkeeper, on which side-effects are not allowed. + saved = annmodel.TLS.allow_int_to_float try: + annmodel.TLS.allow_int_to_float = False s_union = annmodel.unionof(s_bigger, s_smaller) return s_bigger.contains(s_union) except (annmodel.UnionError, TooLateForChange): return False + finally: + annmodel.TLS.allow_int_to_float = saved class __extend__(pairtype(MTag, annmodel.SomeObject)): diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -126,6 +126,10 @@ "SSL_OP_NO_COMPRESSION") SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS = rffi_platform.ConstantInteger( "SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS") + SSL_OP_CIPHER_SERVER_PREFERENCE = rffi_platform.ConstantInteger( + "SSL_OP_CIPHER_SERVER_PREFERENCE") + SSL_OP_SINGLE_DH_USE = rffi_platform.ConstantInteger( + "SSL_OP_SINGLE_DH_USE") HAS_SNI = rffi_platform.Defined("SSL_CTRL_SET_TLSEXT_HOSTNAME") HAS_NPN = rffi_platform.Defined("OPENSSL_NPN_NEGOTIATED") SSL_VERIFY_NONE = rffi_platform.ConstantInteger("SSL_VERIFY_NONE") @@ -307,6 +311,8 @@ if HAVE_OPENSSL_RAND: ssl_external('RAND_add', [rffi.CCHARP, rffi.INT, rffi.DOUBLE], lltype.Void) + ssl_external('RAND_bytes', [rffi.UCHARP, rffi.INT], rffi.INT) + ssl_external('RAND_pseudo_bytes', [rffi.UCHARP, rffi.INT], rffi.INT) ssl_external('RAND_status', [], rffi.INT) if HAVE_OPENSSL_RAND_EGD: ssl_external('RAND_egd', [rffi.CCHARP], rffi.INT) @@ -465,6 +471,7 @@ ssl_external('GENERAL_NAME_print', [BIO, GENERAL_NAME], rffi.INT) ssl_external('pypy_GENERAL_NAME_dirn', [GENERAL_NAME], X509_NAME, macro=True) + ssl_external('pypy_GENERAL_NAME_uri', [GENERAL_NAME], ASN1_IA5STRING, macro=True) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -223,7 +223,7 @@ pass if _WIN32: - includes = ['io.h', 'sys/utime.h', 'sys/types.h', 'process.h'] + includes = ['io.h', 'sys/utime.h', 'sys/types.h', 'process.h', 'time.h'] libraries = [] else: if sys.platform.startswith(('darwin', 'netbsd', 'openbsd')): @@ -254,10 +254,11 @@ UTIMBUF = rffi_platform.Struct('struct %sutimbuf' % UNDERSCORE_ON_WIN32, [('actime', rffi.INT), ('modtime', rffi.INT)]) + CLOCK_T = rffi_platform.SimpleType('clock_t', rffi.INT) if not _WIN32: UID_T = rffi_platform.SimpleType('uid_t', rffi.UINT) GID_T = rffi_platform.SimpleType('gid_t', rffi.UINT) - CLOCK_T = rffi_platform.SimpleType('clock_t', rffi.INT) + TIOCGWINSZ = rffi_platform.DefinedConstantInteger('TIOCGWINSZ') TMS = rffi_platform.Struct( 'struct tms', [('tms_utime', rffi.INT), @@ -265,6 +266,12 @@ ('tms_cutime', rffi.INT), ('tms_cstime', rffi.INT)]) + WINSIZE = rffi_platform.Struct( + 'struct winsize', [('ws_row', rffi.USHORT), + ('ws_col', rffi.USHORT), + ('ws_xpixel', rffi.USHORT), + ('ws_ypixel', rffi.USHORT)]) + GETPGRP_HAVE_ARG = rffi_platform.Has("getpgrp(0)") SETPGRP_HAVE_ARG = rffi_platform.Has("setpgrp(0, 0)") @@ -365,15 +372,27 @@ raise OSError(get_saved_errno(), '%s failed' % name) return result +def _dup(fd, inheritable=True): + validate_fd(fd) + if inheritable: + res = c_dup(fd) + else: + res = c_dup_noninheritable(fd) + return res + @replace_os_function('dup') -def dup(fd): - validate_fd(fd) - return handle_posix_error('dup', c_dup(fd)) +def dup(fd, inheritable=True): + res = _dup(fd, inheritable) + return handle_posix_error('dup', res) @replace_os_function('dup2') -def dup2(fd, newfd): +def dup2(fd, newfd, inheritable=True): validate_fd(fd) - handle_posix_error('dup2', c_dup2(fd, newfd)) + if inheritable: + res = c_dup2(fd, newfd) + else: + res = c_dup2_noninheritable(fd, newfd) + handle_posix_error('dup2', res) #___________________________________________________________________ @@ -604,7 +623,8 @@ class CConfig: _compilation_info_ = eci DIRENT = rffi_platform.Struct('struct dirent', - [('d_name', lltype.FixedSizeArray(rffi.CHAR, 1))] + [('d_name', lltype.FixedSizeArray(rffi.CHAR, 1)), + ('d_ino', lltype.Signed)] + [('d_type', rffi.INT)] if HAVE_D_TYPE else []) if HAVE_D_TYPE: DT_UNKNOWN = rffi_platform.ConstantInteger('DT_UNKNOWN') @@ -628,6 +648,8 @@ macro=True, save_err=rffi.RFFI_FULL_ERRNO_ZERO) c_closedir = external('closedir', [DIRP], rffi.INT, releasegil=False) c_dirfd = external('dirfd', [DIRP], rffi.INT, releasegil=False) + c_ioctl_voidp = external('ioctl', [rffi.INT, rffi.UINT, rffi.VOIDP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) else: dirent_config = {} @@ -1113,37 +1135,69 @@ c_open_osfhandle = external('_open_osfhandle', [rffi.INTPTR_T, rffi.INT], rffi.INT) + HAVE_PIPE2 = False + HAVE_DUP3 = False + O_CLOEXEC = None else: INT_ARRAY_P = rffi.CArrayPtr(rffi.INT) c_pipe = external('pipe', [INT_ARRAY_P], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) + class CConfig: + _compilation_info_ = eci + HAVE_PIPE2 = rffi_platform.Has('pipe2') + HAVE_DUP3 = rffi_platform.Has('dup3') + O_CLOEXEC = rffi_platform.DefinedConstantInteger('O_CLOEXEC') + config = rffi_platform.configure(CConfig) + HAVE_PIPE2 = config['HAVE_PIPE2'] + HAVE_DUP3 = config['HAVE_DUP3'] + O_CLOEXEC = config['O_CLOEXEC'] + if HAVE_PIPE2: + c_pipe2 = external('pipe2', [INT_ARRAY_P, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) @replace_os_function('pipe') -def pipe(): +def pipe(flags=0): + # 'flags' might be ignored. Check the result. if _WIN32: - pread = lltype.malloc(rwin32.LPHANDLE.TO, 1, flavor='raw') - pwrite = lltype.malloc(rwin32.LPHANDLE.TO, 1, flavor='raw') - try: - if not CreatePipe( - pread, pwrite, lltype.nullptr(rffi.VOIDP.TO), 0): - raise WindowsError(rwin32.GetLastError_saved(), - "CreatePipe failed") - hread = rffi.cast(rffi.INTPTR_T, pread[0]) - hwrite = rffi.cast(rffi.INTPTR_T, pwrite[0]) - finally: - lltype.free(pwrite, flavor='raw') - lltype.free(pread, flavor='raw') - fdread = c_open_osfhandle(hread, 0) - fdwrite = c_open_osfhandle(hwrite, 1) - return (fdread, fdwrite) + # 'flags' ignored + ralloc = lltype.scoped_alloc(rwin32.LPHANDLE.TO, 1) + walloc = lltype.scoped_alloc(rwin32.LPHANDLE.TO, 1) + with ralloc as pread, walloc as pwrite: + if CreatePipe(pread, pwrite, lltype.nullptr(rffi.VOIDP.TO), 0): + hread = pread[0] + hwrite = pwrite[0] + fdread = c_open_osfhandle(rffi.cast(rffi.INTPTR_T, hread), 0) + fdwrite = c_open_osfhandle(rffi.cast(rffi.INTPTR_T, hwrite), 1) + if not (fdread == -1 or fdwrite == -1): + return (fdread, fdwrite) + rwin32.CloseHandle(hread) + rwin32.CloseHandle(hwrite) + raise WindowsError(rwin32.GetLastError_saved(), "CreatePipe failed") else: filedes = lltype.malloc(INT_ARRAY_P.TO, 2, flavor='raw') try: - handle_posix_error('pipe', c_pipe(filedes)) + if HAVE_PIPE2 and _pipe2_syscall.attempt_syscall(): + res = c_pipe2(filedes, flags) + if _pipe2_syscall.fallback(res): + res = c_pipe(filedes) + else: + res = c_pipe(filedes) # 'flags' ignored + handle_posix_error('pipe', res) return (widen(filedes[0]), widen(filedes[1])) finally: lltype.free(filedes, flavor='raw') +def pipe2(flags): + # Only available if there is really a c_pipe2 function. + # No fallback to pipe() if we get ENOSYS. + filedes = lltype.malloc(INT_ARRAY_P.TO, 2, flavor='raw') + try: + res = c_pipe2(filedes, flags) + handle_posix_error('pipe2', res) + return (widen(filedes[0]), widen(filedes[1])) + finally: + lltype.free(filedes, flavor='raw') + c_link = external('link', [rffi.CCHARP, rffi.CCHARP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO,) c_symlink = external('symlink', [rffi.CCHARP, rffi.CCHARP], rffi.INT, @@ -2079,14 +2133,47 @@ eci_inheritable = eci.merge(ExternalCompilationInfo( - separate_module_sources=[""" + separate_module_sources=[r""" +#include +#include + RPY_EXTERN int rpy_set_inheritable(int fd, int inheritable) { - /* XXX minimal impl. XXX */ - int request = inheritable ? FIONCLEX : FIOCLEX; - return ioctl(fd, request, NULL); + static int ioctl_works = -1; + int flags; + + if (ioctl_works != 0) { + int request = inheritable ? FIONCLEX : FIOCLEX; + int err = ioctl(fd, request, NULL); + if (!err) { + ioctl_works = 1; + return 0; + } + + if (errno != ENOTTY && errno != EACCES) { + return -1; + } + else { + /* ENOTTY: The ioctl is declared but not supported by the + kernel. EACCES: SELinux policy, this can be the case on + Android. */ + ioctl_works = 0; + } + /* fallback to fcntl() if ioctl() does not work */ + } + + flags = fcntl(fd, F_GETFD); + if (flags < 0) + return -1; + + if (inheritable) + flags &= ~FD_CLOEXEC; + else + flags |= FD_CLOEXEC; + return fcntl(fd, F_SETFD, flags); } + RPY_EXTERN int rpy_get_inheritable(int fd) { @@ -2095,8 +2182,64 @@ return -1; return !(flags & FD_CLOEXEC); } - """], - post_include_bits=['RPY_EXTERN int rpy_set_inheritable(int, int);'])) + +RPY_EXTERN +int rpy_dup_noninheritable(int fd) +{ +#ifdef _WIN32 +#error NotImplementedError +#endif + +#ifdef F_DUPFD_CLOEXEC + return fcntl(fd, F_DUPFD_CLOEXEC, 0); +#else + fd = dup(fd); + if (fd >= 0) { + if (rpy_set_inheritable(fd, 0) != 0) { + close(fd); + return -1; + } + } + return fd; +#endif +} + +RPY_EXTERN +int rpy_dup2_noninheritable(int fd, int fd2) +{ +#ifdef _WIN32 +#error NotImplementedError +#endif + +#ifdef F_DUP2FD_CLOEXEC + return fcntl(fd, F_DUP2FD_CLOEXEC, fd2); + +#else +# if %(HAVE_DUP3)d /* HAVE_DUP3 */ + static int dup3_works = -1; + if (dup3_works != 0) { + if (dup3(fd, fd2, O_CLOEXEC) >= 0) + return 0; + if (dup3_works == -1) + dup3_works = (errno != ENOSYS); + if (dup3_works) + return -1; + } +# endif + if (dup2(fd, fd2) < 0) + return -1; + if (rpy_set_inheritable(fd2, 0) != 0) { + close(fd2); + return -1; + } + return 0; +#endif +} + """ % {'HAVE_DUP3': HAVE_DUP3}], + post_include_bits=['RPY_EXTERN int rpy_set_inheritable(int, int);\n' + 'RPY_EXTERN int rpy_get_inheritable(int);\n' + 'RPY_EXTERN int rpy_dup_noninheritable(int);\n' + 'RPY_EXTERN int rpy_dup2_noninheritable(int, int);\n'])) c_set_inheritable = external('rpy_set_inheritable', [rffi.INT, rffi.INT], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO, @@ -2104,12 +2247,56 @@ c_get_inheritable = external('rpy_get_inheritable', [rffi.INT], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO, compilation_info=eci_inheritable) +c_dup_noninheritable = external('rpy_dup_noninheritable', [rffi.INT], + rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO, + compilation_info=eci_inheritable) +c_dup2_noninheritable = external('rpy_dup2_noninheritable', [rffi.INT,rffi.INT], + rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO, + compilation_info=eci_inheritable) def set_inheritable(fd, inheritable): - error = c_set_inheritable(fd, inheritable) - handle_posix_error('set_inheritable', error) + result = c_set_inheritable(fd, inheritable) + handle_posix_error('set_inheritable', result) def get_inheritable(fd): res = c_get_inheritable(fd) res = handle_posix_error('get_inheritable', res) return res != 0 + +class SetNonInheritableCache(object): + """Make one prebuilt instance of this for each path that creates + file descriptors, where you don't necessarily know if that function + returns inheritable or non-inheritable file descriptors. + """ + _immutable_fields_ = ['cached_inheritable?'] + cached_inheritable = -1 # -1 = don't know yet; 0 = off; 1 = on + + def set_non_inheritable(self, fd): + if self.cached_inheritable == -1: + self.cached_inheritable = get_inheritable(fd) + if self.cached_inheritable == 1: + # 'fd' is inheritable; we must manually turn it off + set_inheritable(fd, False) + + def _cleanup_(self): + self.cached_inheritable = -1 + +class ENoSysCache(object): + """Cache whether a system call returns ENOSYS or not.""" + _immutable_fields_ = ['cached_nosys?'] + cached_nosys = -1 # -1 = don't know; 0 = no; 1 = yes, getting ENOSYS + + def attempt_syscall(self): + return self.cached_nosys != 1 + + def fallback(self, res): + nosys = self.cached_nosys + if nosys == -1: + nosys = (res < 0 and get_saved_errno() == errno.ENOSYS) + self.cached_nosys = nosys + return nosys + + def _cleanup_(self): + self.cached_nosys = -1 + +_pipe2_syscall = ENoSysCache() diff --git a/rpython/rlib/rposix_scandir.py b/rpython/rlib/rposix_scandir.py --- a/rpython/rlib/rposix_scandir.py +++ b/rpython/rlib/rposix_scandir.py @@ -50,3 +50,6 @@ if rposix.HAVE_D_TYPE: return rffi.getintfield(direntp, 'c_d_type') return DT_UNKNOWN + +def get_inode(direntp): + return rffi.getintfield(direntp, 'c_d_ino') diff --git a/rpython/rlib/rsignal.py b/rpython/rlib/rsignal.py --- a/rpython/rlib/rsignal.py +++ b/rpython/rlib/rsignal.py @@ -31,7 +31,7 @@ signal_names.append('CTRL_BREAK_EVENT') CTRL_C_EVENT = 0 CTRL_BREAK_EVENT = 1 -includes = ['stdlib.h', 'src/signals.h'] +includes = ['stdlib.h', 'src/signals.h', 'signal.h'] if sys.platform != 'win32': includes.append('sys/time.h') @@ -47,7 +47,9 @@ _compilation_info_ = eci if sys.platform != 'win32': - for name in """ITIMER_REAL ITIMER_VIRTUAL ITIMER_PROF""".split(): + for name in """ + ITIMER_REAL ITIMER_VIRTUAL ITIMER_PROF + SIG_BLOCK SIG_UNBLOCK SIG_SETMASK""".split(): setattr(CConfig, name, rffi_platform.DefinedConstantInteger(name)) CConfig.timeval = rffi_platform.Struct( @@ -71,7 +73,8 @@ pypysig_default = external('pypysig_default', [rffi.INT], lltype.Void) pypysig_setflag = external('pypysig_setflag', [rffi.INT], lltype.Void) pypysig_reinstall = external('pypysig_reinstall', [rffi.INT], lltype.Void) -pypysig_set_wakeup_fd = external('pypysig_set_wakeup_fd', [rffi.INT], rffi.INT) +pypysig_set_wakeup_fd = external('pypysig_set_wakeup_fd', + [rffi.INT, rffi.INT], rffi.INT) pypysig_poll = external('pypysig_poll', [], rffi.INT, releasegil=False) # don't bother releasing the GIL around a call to pypysig_poll: it's # pointless and a performance issue @@ -98,3 +101,20 @@ [rffi.INT, itimervalP, itimervalP], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) c_getitimer = external('getitimer', [rffi.INT, itimervalP], rffi.INT) + +c_pthread_kill = external('pthread_kill', [lltype.Signed, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +if sys.platform != 'win32': + c_sigset_t = rffi.COpaquePtr('sigset_t', compilation_info=eci) + c_sigemptyset = external('sigemptyset', [c_sigset_t], rffi.INT) + c_sigaddset = external('sigaddset', [c_sigset_t, rffi.INT], rffi.INT) + c_sigismember = external('sigismember', [c_sigset_t, rffi.INT], rffi.INT) + c_sigwait = external('sigwait', [c_sigset_t, rffi.INTP], rffi.INT, + releasegil=True, + save_err=rffi.RFFI_SAVE_ERRNO) + c_sigpending = external('sigpending', [c_sigset_t], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + c_pthread_sigmask = external('pthread_sigmask', + [rffi.INT, c_sigset_t, c_sigset_t], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -8,10 +8,11 @@ # XXX this does not support yet the least common AF_xxx address families # supported by CPython. See http://bugs.pypy.org/issue1942 +from errno import EINVAL from rpython.rlib import _rsocket_rffi as _c, jit, rgc from rpython.rlib.objectmodel import instantiate, keepalive_until_here from rpython.rlib.rarithmetic import intmask, r_uint -from rpython.rlib import rthread +from rpython.rlib import rthread, rposix from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.rffi import sizeof, offsetof from rpython.rtyper.extregistry import ExtRegistryEntry @@ -522,12 +523,28 @@ timeout = -1.0 def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, - fd=_c.INVALID_SOCKET): + fd=_c.INVALID_SOCKET, inheritable=True): """Create a new socket.""" if _c.invalid_socket(fd): - fd = _c.socket(family, type, proto) - if _c.invalid_socket(fd): - raise self.error_handler() + if not inheritable and 'SOCK_CLOEXEC' in constants: + # Non-inheritable: we try to call socket() with + # SOCK_CLOEXEC, which may fail. If we get EINVAL, + # then we fall back to the SOCK_CLOEXEC-less case. + fd = _c.socket(family, type | SOCK_CLOEXEC, proto) + if fd < 0: + if _c.geterrno() == EINVAL: + # Linux older than 2.6.27 does not support + # SOCK_CLOEXEC. An EINVAL might be caused by + # random other things, though. Don't cache. + pass + else: + raise self.error_handler() + if _c.invalid_socket(fd): + fd = _c.socket(family, type, proto) + if _c.invalid_socket(fd): + raise self.error_handler() + if not inheritable: + sock_set_inheritable(fd, False) # PLAT RISCOS self.fd = fd self.family = family @@ -630,20 +647,33 @@ return addr, addr.addr_p, addrlen_p @jit.dont_look_inside - def accept(self): + def accept(self, inheritable=True): """Wait for an incoming connection. Return (new socket fd, client address).""" if self._select(False) == 1: raise SocketTimeout address, addr_p, addrlen_p = self._addrbuf() try: - newfd = _c.socketaccept(self.fd, addr_p, addrlen_p) + remove_inheritable = not inheritable + if (not inheritable and 'SOCK_CLOEXEC' in constants + and _c.HAVE_ACCEPT4 + and _accept4_syscall.attempt_syscall()): + newfd = _c.socketaccept4(self.fd, addr_p, addrlen_p, + SOCK_CLOEXEC) + if _accept4_syscall.fallback(newfd): + newfd = _c.socketaccept(self.fd, addr_p, addrlen_p) + else: + remove_inheritable = False + else: + newfd = _c.socketaccept(self.fd, addr_p, addrlen_p) addrlen = addrlen_p[0] finally: lltype.free(addrlen_p, flavor='raw') address.unlock() if _c.invalid_socket(newfd): raise self.error_handler() + if remove_inheritable: + sock_set_inheritable(newfd, False) address.addrlen = rffi.cast(lltype.Signed, addrlen) return (newfd, address) @@ -1032,6 +1062,12 @@ return result make_socket._annspecialcase_ = 'specialize:arg(4)' +def sock_set_inheritable(fd, inheritable): + try: + rposix.set_inheritable(fd, inheritable) + except OSError as e: + raise CSocketError(e.errno) + class SocketError(Exception): applevelerrcls = 'error' def __init__(self): @@ -1090,7 +1126,7 @@ if hasattr(_c, 'socketpair'): def socketpair(family=socketpair_default_family, type=SOCK_STREAM, proto=0, - SocketClass=RSocket): + SocketClass=RSocket, inheritable=True): """socketpair([family[, type[, proto]]]) -> (socket object, socket object) Create a pair of socket objects from the sockets returned by the platform @@ -1100,18 +1136,41 @@ """ result = lltype.malloc(_c.socketpair_t, 2, flavor='raw') try: - res = _c.socketpair(family, type, proto, result) + res = -1 + remove_inheritable = not inheritable + if not inheritable and 'SOCK_CLOEXEC' in constants: + # Non-inheritable: we try to call socketpair() with + # SOCK_CLOEXEC, which may fail. If we get EINVAL, + # then we fall back to the SOCK_CLOEXEC-less case. + res = _c.socketpair(family, type | SOCK_CLOEXEC, + proto, result) + if res < 0: + if _c.geterrno() == EINVAL: + # Linux older than 2.6.27 does not support + # SOCK_CLOEXEC. An EINVAL might be caused by + # random other things, though. Don't cache. + pass + else: + raise last_error() + else: + remove_inheritable = False + # if res < 0: - raise last_error() + res = _c.socketpair(family, type, proto, result) + if res < 0: + raise last_error() fd0 = rffi.cast(lltype.Signed, result[0]) fd1 = rffi.cast(lltype.Signed, result[1]) finally: lltype.free(result, flavor='raw') + if remove_inheritable: + sock_set_inheritable(fd0, False) + sock_set_inheritable(fd1, False) return (make_socket(fd0, family, type, proto, SocketClass), make_socket(fd1, family, type, proto, SocketClass)) if _c.WIN32: - def dup(fd): + def dup(fd, inheritable=True): with lltype.scoped_alloc(_c.WSAPROTOCOL_INFO, zero=True) as info: if _c.WSADuplicateSocket(fd, rwin32.GetCurrentProcessId(), info): raise last_error() @@ -1122,15 +1181,16 @@ raise last_error() return result else: - def dup(fd): - return _c.dup(fd) - From pypy.commits at gmail.com Fri Sep 2 02:49:15 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 01 Sep 2016 23:49:15 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: fix micronumpy test_zjit by adding _attrs_ to the class Message-ID: <57c920eb.05d71c0a.db689.9858@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86835:6cebc47b780b Date: 2016-09-02 09:47 +0300 http://bitbucket.org/pypy/pypy/changeset/6cebc47b780b/ Log: fix micronumpy test_zjit by adding _attrs_ to the class diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -14,6 +14,7 @@ """Implement the built-in 'memoryview' type as a wrapper around an interp-level buffer. """ + _attrs_ = ['buf'] def __init__(self, buf): assert isinstance(buf, Buffer) From pypy.commits at gmail.com Fri Sep 2 03:11:51 2016 From: pypy.commits at gmail.com (ntruessel) Date: Fri, 02 Sep 2016 00:11:51 -0700 (PDT) Subject: [pypy-commit] pypy quad-color-gc: Remove object layout hack, update qcgc codebase Message-ID: <57c92637.44ce1c0a.c2e76.9f88@mx.google.com> Author: Nicolas Truessel Branch: quad-color-gc Changeset: r86837:d98166b96e01 Date: 2016-09-02 09:11 +0200 http://bitbucket.org/pypy/pypy/changeset/d98166b96e01/ Log: Remove object layout hack, update qcgc codebase diff --git a/rpython/memory/gc/qcgc.py b/rpython/memory/gc/qcgc.py --- a/rpython/memory/gc/qcgc.py +++ b/rpython/memory/gc/qcgc.py @@ -1,5 +1,5 @@ from rpython.memory.gc.base import GCBase -#from rpython.memory.support import mangle_hash +from rpython.memory.support import mangle_hash from rpython.rtyper.lltypesystem import rffi, lltype, llgroup, llmemory, llarena from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib.debug import ll_assert @@ -19,22 +19,19 @@ gcflag_extra = 0 # or a real GC flag that is always 0 when not collecting typeid_is_in_field = 'tid' - withhash_flag_is_in_field = 'flags', QCGC_HAS_HASH TRANSLATION_PARAMS = {} HDR = lltype.Struct( 'header', - #('hdr', rffi.COpaque('object_t', hints={"is_qcgc_header": True})), - ('flags', lltype.Signed), # XXX: exploits knowledge about object_t + ('hdr', rffi.COpaque('object_t', hints={"is_qcgc_header": True})), ('tid', lltype.Signed), ('hash', lltype.Signed)) #HDR = rffi.COpaque('object_t') - def init_gc_object(self, addr, typeid, flags=0): + def init_gc_object(self, addr, typeid): hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)) - hdr.flags = rffi.cast(lltype.Signed, flags) hdr.tid = rffi.cast(lltype.Signed, typeid) - hdr.hash = rffi.cast(lltype.Signed, addr) + hdr.hash = rffi.cast(lltype.Signed, 0) def malloc_fixedsize_clear(self, typeid, size, needs_finalizer=False, @@ -63,15 +60,10 @@ (obj + offset_to_length).signed[0] = length return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) - def init_gc_object_immortal(self, addr, typeid, flags=0): # XXX: Prebuilt Objects? + def init_gc_object_immortal(self, addr, typeid, flags=0): assert flags == 0 - ptr = self.gcheaderbuilder.object_from_header(addr.ptr) - prebuilt_hash = lltype.identityhash_nocache(ptr) - assert prebuilt_hash != 0 - flags |= QCGC_PREBUILT_OBJECT # - self.init_gc_object(addr, typeid.index, flags) - llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)).hash = prebuilt_hash + self.init_gc_object(addr, typeid.index) def collect(self, gen=1): """Do a minor (gen=0) or major (gen>0) collection.""" @@ -84,11 +76,7 @@ # this. Unfortunately I don't fully understand what this is supposed to # do, so I can't optimize it ATM. return False - # Possible implementation? - #llop.gc_writebarrier(dest_addr) - #return True - # XXX: WRITE BARRIER def write_barrier(self, addr_struct): llop.qcgc_write_barrier(lltype.Void, addr_struct) @@ -97,14 +85,14 @@ pass def id_or_identityhash(self, gcobj, is_hash): - hdr = self.header(llmemory.cast_ptr_to_adr(gcobj)) - has_hash = (hdr.flags & QCGC_HAS_HASH) + obj = llmemory.cast_ptr_to_adr(gcobj) + hdr = self.header(obj) i = hdr.hash # - if is_hash: - if has_hash: - return i # Do not mangle for objects with built in hash - i = i ^ (i >> 5) + if i == 0: + i = llmemory.cast_adr_to_int(obj) + if is_hash: + i = mangle_hash(i) return i def id(self, gcobje): diff --git a/rpython/memory/gctransform/qcgcframework.py b/rpython/memory/gctransform/qcgcframework.py --- a/rpython/memory/gctransform/qcgcframework.py +++ b/rpython/memory/gctransform/qcgcframework.py @@ -36,16 +36,10 @@ def gc_header_for(self, obj, needs_hash=False): hdr = self.gcdata.gc.gcheaderbuilder.header_of_object(obj) - withhash, flag = self.gcdata.gc.withhash_flag_is_in_field - x = getattr(hdr, withhash) - TYPE = lltype.typeOf(x) - x = lltype.cast_primitive(lltype.Signed, x) if needs_hash: - x |= flag # set the flag in the header + hdr.hash = lltype.identityhash_nocache(obj._as_ptr()) else: - x &= ~flag # clear the flag in the header - x = lltype.cast_primitive(TYPE, x) - setattr(hdr, withhash, x) + assert hdr.hash == 0 return hdr def push_roots(self, hop, keep_current_args=False): diff --git a/rpython/translator/c/src/qcgc/allocator.c b/rpython/translator/c/src/qcgc/allocator.c --- a/rpython/translator/c/src/qcgc/allocator.c +++ b/rpython/translator/c/src/qcgc/allocator.c @@ -11,6 +11,7 @@ QCGC_STATIC void bump_allocator_assign(cell_t *ptr, size_t cells); QCGC_STATIC void bump_allocator_advance(size_t cells); +QCGC_STATIC void bump_allocator_renew_block(void); QCGC_STATIC bool is_small(size_t cells); QCGC_STATIC size_t small_index(size_t cells); @@ -26,6 +27,7 @@ void qcgc_allocator_initialize(void) { qcgc_allocator_state.arenas = qcgc_arena_bag_create(QCGC_ARENA_BAG_INIT_SIZE); + qcgc_allocator_state.free_arenas = qcgc_arena_bag_create(4); // XXX // Bump Allocator qcgc_allocator_state.bump_state.bump_ptr = NULL; @@ -59,10 +61,24 @@ qcgc_arena_destroy(qcgc_allocator_state.arenas->items[i]); } + arena_count = qcgc_allocator_state.free_arenas->count; + for (size_t i = 0; i < arena_count; i++) { + qcgc_arena_destroy(qcgc_allocator_state.free_arenas->items[i]); + } + free(qcgc_allocator_state.arenas); + free(qcgc_allocator_state.free_arenas); } void qcgc_fit_allocator_add(cell_t *ptr, size_t cells) { +#if CHECKED + if (cells > 0) { + assert((((object_t *)ptr)->flags & QCGC_PREBUILT_OBJECT) == 0); + assert((cell_t *) qcgc_arena_addr(ptr) != ptr); + assert(qcgc_arena_get_blocktype(ptr) == BLOCK_FREE || + qcgc_arena_get_blocktype(ptr) == BLOCK_EXTENT); + } +#endif if (cells > 0) { if (is_small(cells)) { size_t index = small_index(cells); @@ -80,30 +96,17 @@ } } -QCGC_STATIC void bump_allocator_assign(cell_t *ptr, size_t cells) { - qcgc_allocator_state.bump_state.bump_ptr = ptr; - qcgc_allocator_state.bump_state.remaining_cells = cells; -} - -QCGC_STATIC void bump_allocator_advance(size_t cells) { - qcgc_allocator_state.bump_state.bump_ptr += cells; - qcgc_allocator_state.bump_state.remaining_cells -= cells; -} - /******************************************************************************* - * Allocators * + * Bump Allocator * ******************************************************************************/ object_t *qcgc_bump_allocate(size_t bytes) { +#if CHECKED + assert(bytes <= 1< qcgc_allocator_state.bump_state.remaining_cells) { - // Grab a new arena - // FIXME: Add remaining memory to fit allocator - arena_t *arena = qcgc_arena_create(); - bump_allocator_assign(&(arena->cells[QCGC_ARENA_FIRST_CELL_INDEX]), - QCGC_ARENA_CELLS_COUNT - QCGC_ARENA_FIRST_CELL_INDEX); - qcgc_allocator_state.arenas = - qcgc_arena_bag_add(qcgc_allocator_state.arenas, arena); + bump_allocator_renew_block(); } cell_t *mem = qcgc_allocator_state.bump_state.bump_ptr; bump_allocator_advance(cells); @@ -116,9 +119,92 @@ #endif result->flags |= QCGC_GRAY_FLAG; +#if CHECKED + assert(qcgc_arena_is_coalesced(qcgc_arena_addr((cell_t *)result))); + if (qcgc_allocator_state.bump_state.remaining_cells > 0) { + assert(qcgc_arena_get_blocktype( + qcgc_allocator_state.bump_state.bump_ptr) == BLOCK_FREE); + for (size_t i = 1; i < qcgc_allocator_state.bump_state.remaining_cells; + i++) { + assert(qcgc_arena_get_blocktype( + qcgc_allocator_state.bump_state.bump_ptr + i) + == BLOCK_EXTENT); + } + } +#endif return result; } +QCGC_STATIC void bump_allocator_renew_block(void) { +#if CHECKED + if (qcgc_allocator_state.bump_state.remaining_cells > 0) { + assert(qcgc_arena_get_blocktype( + qcgc_allocator_state.bump_state.bump_ptr) == BLOCK_FREE); + for (size_t i = 1; i < qcgc_allocator_state.bump_state.remaining_cells; + i++) { + assert(qcgc_arena_get_blocktype( + qcgc_allocator_state.bump_state.bump_ptr + i) + == BLOCK_EXTENT); + } + } +#endif + // Add remaining memory to fit allocator + qcgc_fit_allocator_add(qcgc_allocator_state.bump_state.bump_ptr, + qcgc_allocator_state.bump_state.remaining_cells); + + // Try finding some huge block from fit allocator + exp_free_list_t *free_list = qcgc_allocator_state.fit_state. + large_free_list[QCGC_LARGE_FREE_LISTS - 1]; + while (free_list->count > 0 && !valid_block(free_list->items[0].ptr, + free_list->items[0].size)) { + free_list = qcgc_exp_free_list_remove_index(free_list, 0); + } + + if (free_list->count > 0) { + // Assign huge block to bump allocator + bump_allocator_assign(free_list->items[0].ptr, + free_list->items[0].size); + free_list = qcgc_exp_free_list_remove_index(free_list, 0); + } else { + // Grab a new arena + arena_t *arena = qcgc_arena_create(); + bump_allocator_assign(&(arena->cells[QCGC_ARENA_FIRST_CELL_INDEX]), + QCGC_ARENA_CELLS_COUNT - QCGC_ARENA_FIRST_CELL_INDEX); + qcgc_allocator_state.arenas = + qcgc_arena_bag_add(qcgc_allocator_state.arenas, arena); + } + + qcgc_allocator_state.fit_state. + large_free_list[QCGC_LARGE_FREE_LISTS - 1] = free_list; +#if CHECKED + assert(qcgc_allocator_state.bump_state.bump_ptr != NULL); + assert(qcgc_arena_get_blocktype(qcgc_allocator_state.bump_state.bump_ptr) == + BLOCK_FREE); + for (size_t i = 1; i < qcgc_allocator_state.bump_state.remaining_cells; + i++) { + assert(qcgc_arena_get_blocktype( + qcgc_allocator_state.bump_state.bump_ptr + i) + == BLOCK_EXTENT); + } +#endif +} + +QCGC_STATIC void bump_allocator_assign(cell_t *ptr, size_t cells) { +#if CHECKED + assert(qcgc_arena_get_blocktype(ptr) == BLOCK_FREE); + for (size_t i = 1; i < cells; i++) { + assert(qcgc_arena_get_blocktype(ptr + i) == BLOCK_EXTENT); + } +#endif + qcgc_allocator_state.bump_state.bump_ptr = ptr; + qcgc_allocator_state.bump_state.remaining_cells = cells; +} + +QCGC_STATIC void bump_allocator_advance(size_t cells) { + qcgc_allocator_state.bump_state.bump_ptr += cells; + qcgc_allocator_state.bump_state.remaining_cells -= cells; +} + object_t *qcgc_fit_allocate(size_t bytes) { size_t cells = bytes_to_cells(bytes); cell_t *mem; @@ -291,7 +377,7 @@ cells = cells >> QCGC_LARGE_FREE_LIST_FIRST_EXP; // calculates floor(log(cells)) - return (8 * sizeof(unsigned long)) - __builtin_clzl(cells) - 1; + return MIN((8 * sizeof(unsigned long)) - __builtin_clzl(cells) - 1, QCGC_LARGE_FREE_LISTS - 1); } QCGC_STATIC size_t small_index_to_cells(size_t index) { @@ -306,6 +392,7 @@ assert(ptr != NULL); assert(cells > 0); #endif - return (qcgc_arena_get_blocktype(ptr) == BLOCK_FREE && - qcgc_arena_get_blocktype(ptr + cells) != BLOCK_EXTENT); + return (qcgc_arena_get_blocktype(ptr) == BLOCK_FREE && ( + ((qcgc_arena_addr(ptr + cells)) == (arena_t *) (ptr + cells)) || + qcgc_arena_get_blocktype(ptr + cells) != BLOCK_EXTENT)); } diff --git a/rpython/translator/c/src/qcgc/allocator.h b/rpython/translator/c/src/qcgc/allocator.h --- a/rpython/translator/c/src/qcgc/allocator.h +++ b/rpython/translator/c/src/qcgc/allocator.h @@ -17,6 +17,7 @@ * +---+---+-----+----+ * size (cells): | 1 | 2 | ... | 31 | * +---+---+-----+----+ + * (31 is 2^QCGC_LARGE_FREE_LIST_FIRST_EXP - 1) * * Large free lists: * +-----+-----+-----+---------+ @@ -24,18 +25,21 @@ * +-----+-----+-----+---------+ * minimal size (cells): | 2^5 | 2^6 | ... | 2^(x+5) | * +-----+-----+-----+---------+ + * (5 is QCGC_LARGE_FREE_LIST_FIRST_EXP) * - * where x is chosen such that x + 5 + 1 = QCGC_ARENA_SIZE_EXP - 4 (i.e. the - * next bin would hold chunks that have the size of at least one arena size, - * which is impossible as an arena contains overhead) + * where x is chosen such that 2^(x + 5) = 2^QCGC_LARGE_ALLOC_THRESHOLD_EXP + * (i.e. such that the last bin contains all blocks that are larger or equal + * than the threshold for huge blocks. These blocks can be returned to the + * bump allocator) */ - -#define QCGC_LARGE_FREE_LISTS (QCGC_ARENA_SIZE_EXP - 4 - QCGC_LARGE_FREE_LIST_FIRST_EXP) +#define QCGC_LARGE_FREE_LISTS (QCGC_LARGE_ALLOC_THRESHOLD_EXP - QCGC_LARGE_FREE_LIST_FIRST_EXP - 4 + 1) +// -4 because of turning bytes into cells, +1 because we start to count at 0 #define QCGC_SMALL_FREE_LISTS ((1< #include +#if DEBUG_ZERO_ON_SWEEP +#include +#endif + #include "allocator.h" #include "event_logger.h" @@ -146,12 +150,27 @@ void qcgc_arena_mark_allocated(cell_t *ptr, size_t cells) { size_t index = qcgc_arena_cell_index(ptr); arena_t *arena = qcgc_arena_addr(ptr); +#if CHECKED + assert(get_blocktype(arena, index) == BLOCK_FREE); + for (size_t i = 1; i < cells; i++) { + assert(get_blocktype(arena, index + i) == BLOCK_EXTENT); + } +#endif set_blocktype(arena, index, BLOCK_WHITE); size_t index_of_next_block = index + cells; if (index_of_next_block < QCGC_ARENA_CELLS_COUNT && get_blocktype(arena, index_of_next_block) == BLOCK_EXTENT) { set_blocktype(arena, index_of_next_block, BLOCK_FREE); } +#if CHECKED + assert(get_blocktype(arena, index) == BLOCK_WHITE); + for (size_t i = 1; i < cells; i++) { + assert(get_blocktype(arena, index + i) == BLOCK_EXTENT); + } + if (index_of_next_block < QCGC_ARENA_CELLS_COUNT) { + assert(get_blocktype(arena, index + cells) != BLOCK_EXTENT); + } +#endif } void qcgc_arena_mark_free(cell_t *ptr) { @@ -164,15 +183,35 @@ assert(arena != NULL); assert(qcgc_arena_is_coalesced(arena)); #endif +#if DEBUG_ZERO_ON_SWEEP + bool zero = true; +#endif bool free = true; bool coalesce = false; bool add_to_free_list = false; size_t last_free_cell = QCGC_ARENA_FIRST_CELL_INDEX; + + if (qcgc_arena_addr(qcgc_allocator_state.bump_state.bump_ptr) == arena) { + for (size_t cell = QCGC_ARENA_FIRST_CELL_INDEX; + cell < QCGC_ARENA_CELLS_COUNT; + cell++) { + if (get_blocktype(arena, cell) == BLOCK_BLACK) { + set_blocktype(arena, cell, BLOCK_WHITE); + } + } + return false; + } + for (size_t cell = QCGC_ARENA_FIRST_CELL_INDEX; cell < QCGC_ARENA_CELLS_COUNT; cell++) { - switch (qcgc_arena_get_blocktype(arena->cells + cell)) { + switch (get_blocktype(arena, cell)) { case BLOCK_EXTENT: +#if DEBUG_ZERO_ON_SWEEP + if (zero) { + memset(&arena->cells[cell], 0, sizeof(cell_t)); + } +#endif break; case BLOCK_FREE: if (coalesce) { @@ -181,6 +220,10 @@ last_free_cell = cell; } coalesce = true; +#if DEBUG_ZERO_ON_SWEEP + zero = true; + memset(&arena->cells[cell], 0, sizeof(cell_t)); +#endif break; case BLOCK_WHITE: if (coalesce) { @@ -191,25 +234,33 @@ } coalesce = true; add_to_free_list = true; +#if DEBUG_ZERO_ON_SWEEP + zero = true; + memset(&arena->cells[cell], 0, sizeof(cell_t)); +#endif break; case BLOCK_BLACK: set_blocktype(arena, cell, BLOCK_WHITE); if (add_to_free_list) { - qcgc_fit_allocator_add(&(arena->cells[last_free_cell]), + qcgc_fit_allocator_add(arena->cells + last_free_cell, cell - last_free_cell); } free = false; coalesce = false; add_to_free_list = false; +#if DEBUG_ZERO_ON_SWEEP + zero = false; +#endif break; } } if (add_to_free_list && !free) { - qcgc_fit_allocator_add(&(arena->cells[last_free_cell]), + qcgc_fit_allocator_add(arena->cells + last_free_cell, QCGC_ARENA_CELLS_COUNT - last_free_cell); } #if CHECKED assert(qcgc_arena_is_coalesced(arena)); + assert(free == qcgc_arena_is_empty(arena)); #endif return free; } diff --git a/rpython/translator/c/src/qcgc/config.h b/rpython/translator/c/src/qcgc/config.h --- a/rpython/translator/c/src/qcgc/config.h +++ b/rpython/translator/c/src/qcgc/config.h @@ -1,13 +1,15 @@ #pragma once -#define CHECKED 1 // Enable runtime sanity checks +#define CHECKED 0 // Enable runtime sanity checks + // warning: huge performance impact +#define DEBUG_ZERO_ON_SWEEP 0 // Zero memory on sweep (debug only) #define QCGC_INIT_ZERO 1 // Init new objects with zero bytes /** * Event logger */ -#define EVENT_LOG 1 // Enable event log +#define EVENT_LOG 0 // Enable event log #define LOGFILE "./qcgc_events.log" // Default logfile #define LOG_ALLOCATION 0 // Enable allocation log (warning: // significant performance impact) @@ -16,7 +18,7 @@ // shadow stack #define QCGC_ARENA_BAG_INIT_SIZE 16 // Initial size of the arena bag #define QCGC_ARENA_SIZE_EXP 20 // Between 16 (64kB) and 20 (1MB) -#define QCGC_LARGE_ALLOC_THRESHOLD 1<<14 +#define QCGC_LARGE_ALLOC_THRESHOLD_EXP 14 // Less than QCGC_ARENA_SIZE_EXP #define QCGC_MARK_LIST_SEGMENT_SIZE 64 // TODO: Tune for performance #define QCGC_GRAY_STACK_INIT_SIZE 128 // TODO: Tune for performance #define QCGC_INC_MARK_MIN 64 // TODO: Tune for performance @@ -32,8 +34,16 @@ * DO NOT MODIFY BELOW HERE */ +#if QCGC_LARGE_ALLOC_THRESHOLD_EXP >= QCGC_ARENA_SIZE_EXP +#error "Inconsistent configuration. Huge block threshold must be smaller " \ + "than the arena size." +#endif + #ifdef TESTING #define QCGC_STATIC #else #define QCGC_STATIC static #endif + +#define MAX(a,b) (((a)>(b))?(a):(b)) +#define MIN(a,b) (((a)<(b))?(a):(b)) diff --git a/rpython/translator/c/src/qcgc/qcgc.c b/rpython/translator/c/src/qcgc/qcgc.c --- a/rpython/translator/c/src/qcgc/qcgc.c +++ b/rpython/translator/c/src/qcgc/qcgc.c @@ -10,10 +10,6 @@ #include "hugeblocktable.h" #include "event_logger.h" -// TODO: Eventually move to own header? -#define MAX(a,b) (((a)>(b))?(a):(b)) -#define MIN(a,b) (((a)<(b))?(a):(b)) - void qcgc_mark(bool incremental); void qcgc_pop_object(object_t *object); void qcgc_push_object(object_t *object); @@ -117,7 +113,7 @@ (uint8_t *) &size); #endif object_t *result; - if (size <= QCGC_LARGE_ALLOC_THRESHOLD) { + if (size <= 1< 0) { object_t *top = qcgc_gray_stack_top(qcgc_state.gp_gray_stack); - qcgc_state.gp_gray_stack = - qcgc_gray_stack_pop(qcgc_state.gp_gray_stack); + qcgc_state.gp_gray_stack = qcgc_gray_stack_pop( + qcgc_state.gp_gray_stack); qcgc_pop_object(top); to_process--; } @@ -225,10 +216,8 @@ (arena->gray_stack->index)); while (to_process > 0) { - object_t *top = - qcgc_gray_stack_top(arena->gray_stack); - arena->gray_stack = - qcgc_gray_stack_pop(arena->gray_stack); + object_t *top = qcgc_gray_stack_top(arena->gray_stack); + arena->gray_stack = qcgc_gray_stack_pop(arena->gray_stack); qcgc_pop_object(top); to_process--; } @@ -247,6 +236,7 @@ qcgc_event_logger_log(EVENT_MARK_DONE, 0, NULL); #if CHECKED assert(incremental || (qcgc_state.phase = GC_COLLECT)); + assert(qcgc_state.phase != GC_PAUSE); #endif } @@ -319,8 +309,22 @@ (uint8_t *) &arena_count); qcgc_hbtable_sweep(); - for (size_t i = 0; i < qcgc_allocator_state.arenas->count; i++) { - qcgc_arena_sweep(qcgc_allocator_state.arenas->items[i]); + size_t i = 0; + while (i < qcgc_allocator_state.arenas->count) { + arena_t *arena = qcgc_allocator_state.arenas->items[i]; + // The arena that contains the bump pointer is autmatically skipped + if (qcgc_arena_sweep(arena)) { + // Free + qcgc_allocator_state.arenas = qcgc_arena_bag_remove_index( + qcgc_allocator_state.arenas, i); + qcgc_allocator_state.free_arenas = qcgc_arena_bag_add( + qcgc_allocator_state.free_arenas, arena); + + // NO i++ + } else { + // Not free + i++; + } } qcgc_state.phase = GC_PAUSE; diff --git a/rpython/translator/c/src/qcgc/qcgc.h b/rpython/translator/c/src/qcgc/qcgc.h --- a/rpython/translator/c/src/qcgc/qcgc.h +++ b/rpython/translator/c/src/qcgc/qcgc.h @@ -27,6 +27,7 @@ MARK_COLOR_LIGHT_GRAY, MARK_COLOR_DARK_GRAY, MARK_COLOR_BLACK, + MARK_COLOR_INVALID, } mark_color_t; /** From pypy.commits at gmail.com Fri Sep 2 09:57:45 2016 From: pypy.commits at gmail.com (sbauman) Date: Fri, 02 Sep 2016 06:57:45 -0700 (PDT) Subject: [pypy-commit] pypy force-virtual-state: Add test for edge case when forcing virtual args (currently failing) Message-ID: <57c98559.12331c0a.4f134.4002@mx.google.com> Author: Spenser Andrew Bauman Branch: force-virtual-state Changeset: r86838:fde1ff5108d5 Date: 2016-09-01 22:12 -0400 http://bitbucket.org/pypy/pypy/changeset/fde1ff5108d5/ Log: Add test for edge case when forcing virtual args (currently failing) diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -1006,6 +1006,41 @@ self.check_target_token_count(2) self.check_trace_count(3) + def test_conflated_virtual_states(self): + # All cases are covered when forcing one component of the virtual state + # also forces an as yet unseen component. + # i.e. expect [NotVirtual, Virtual] and given a pair of aliasing virtual + # objects + driver = JitDriver(greens=[], reds=['i', 'v1', 'v2']) + class Box(object): + def __init__(self, v): + self.v = v + + class X(object): + def __init__(self, v): + self.v = v + + const = Box(X(0)) + def f(): + set_param(None, 'retrace_limit', -1) + set_param(None, 'threshold', 1) + i = 0 + v1 = X(0) + v2 = X(0) + const.v = X(0) + while i < 17: + driver.jit_merge_point(i=i, v1=v1, v2=v2) + driver.can_enter_jit(i=i, v1=v1, v2=v2) + if i & 1 == 0: + v1 = const.v + v2 = X(i) + else: + v1 = v2 = X(i) + i += 1 + return None + self.meta_interp(f, []) + # assert did not crash + class VirtualMiscTests: def test_multiple_equal_virtuals(self): From pypy.commits at gmail.com Fri Sep 2 11:14:35 2016 From: pypy.commits at gmail.com (vext01) Date: Fri, 02 Sep 2016 08:14:35 -0700 (PDT) Subject: [pypy-commit] pypy asmmemmgr-for-code-only: Another W^X site. Message-ID: <57c9975b.8628c20a.f6b0b.c572@mx.google.com> Author: Edd Barrett Branch: asmmemmgr-for-code-only Changeset: r86839:5e719144f315 Date: 2016-09-02 16:13 +0100 http://bitbucket.org/pypy/pypy/changeset/5e719144f315/ Log: Another W^X site. diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py --- a/rpython/rlib/clibffi.py +++ b/rpython/rlib/clibffi.py @@ -8,7 +8,7 @@ from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.rarithmetic import intmask, is_emulated_long from rpython.rlib.objectmodel import we_are_translated -from rpython.rlib.rmmap import alloc +from rpython.rlib.rmmap import alloc, set_pages_executable, set_pages_writable from rpython.rlib.rdynload import dlopen, dlclose, dlsym, dlsym_byordinal from rpython.rlib.rdynload import DLOpenError, DLLHANDLE from rpython.rlib import jit, rposix @@ -446,10 +446,13 @@ def _more(self): chunk = rffi.cast(CLOSURES, alloc(CHUNK)) count = CHUNK//rffi.sizeof(FFI_CLOSUREP.TO) + chunk_p = rffi.cast(rffi.CCHARP, chunk) + set_pages_writable(chunk_p, CHUNK) for i in range(count): rffi.cast(rffi.VOIDPP, chunk)[0] = self.free_list self.free_list = rffi.cast(rffi.VOIDP, chunk) chunk = rffi.ptradd(chunk, 1) + set_pages_executable(chunk_p, CHUNK) def alloc(self): if not self.free_list: From pypy.commits at gmail.com Fri Sep 2 11:14:37 2016 From: pypy.commits at gmail.com (vext01) Date: Fri, 02 Sep 2016 08:14:37 -0700 (PDT) Subject: [pypy-commit] pypy asmmemmgr-for-code-only: Mark an argument we send to a C-callback as writable. Message-ID: <57c9975d.041f1c0a.fbb56.6686@mx.google.com> Author: Edd Barrett Branch: asmmemmgr-for-code-only Changeset: r86840:42b309a4db14 Date: 2016-09-02 16:14 +0100 http://bitbucket.org/pypy/pypy/changeset/42b309a4db14/ Log: Mark an argument we send to a C-callback as writable. diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py --- a/rpython/rlib/clibffi.py +++ b/rpython/rlib/clibffi.py @@ -540,9 +540,14 @@ track_allocation=False) self.ll_userdata.callback = rffi.llhelper(CALLBACK_TP, func) self.ll_userdata.addarg = additional_arg + + ll_closure_p = rffi.cast(rffi.CCHARP, self.ll_closure) + set_pages_writable(ll_closure_p, CHUNK) res = c_ffi_prep_closure(self.ll_closure, self.ll_cif, ll_callback, rffi.cast(rffi.VOIDP, self.ll_userdata)) + set_pages_executable(ll_closure_p, CHUNK) + if not res == FFI_OK: raise LibFFIError From pypy.commits at gmail.com Fri Sep 2 11:27:54 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 02 Sep 2016 08:27:54 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: add a flags test from numpy to test_memoryobject.py, implement enough to run it Message-ID: <57c99a7a.482cc20a.5879c.c8f3@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86841:0c283916dcf6 Date: 2016-09-02 14:20 +0300 http://bitbucket.org/pypy/pypy/changeset/0c283916dcf6/ Log: add a flags test from numpy to test_memoryobject.py, implement enough to run it diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,8 +1,8 @@ from pypy.interpreter.error import oefmt from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER) -from pypy.module.cpyext.pyobject import PyObject + cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER, Py_ssize_tP) +from pypy.module.cpyext.pyobject import PyObject, as_pyobj, incref @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def PyObject_CheckBuffer(space, pyobj): @@ -33,13 +33,80 @@ raise an error if the object can't support a simpler view of its memory. 0 is returned on success and -1 on error.""" - raise oefmt(space.w_TypeError, - "PyPy does not yet implement the new buffer interface") + buf = space.call_method(w_obj, "__buffer__", space.newint(flags)) + try: + view.c_buf = rffi.cast(rffi.VOIDP, buf.get_raw_address()) + except ValueError: + raise BufferError("could not create buffer from object") + view.c_len = buf.getlength() + view.c_obj = as_pyobj(space, w_obj) + incref(space, view.c_obj) + ndim = buf.getndim() + view.c_itemsize = buf.getitemsize() + rffi.setintfield(view, 'c_readonly', int(buf.readonly)) + rffi.setintfield(view, 'c_ndim', ndim) + view.c_format = rffi.str2charp(buf.getformat()) + view.c_shape = lltype.malloc(Py_ssize_tP.TO, ndim, flavor='raw') + view.c_strides = lltype.malloc(Py_ssize_tP.TO, ndim, flavor='raw') + shape = buf.getshape() + strides = buf.getstrides() + for i in range(ndim): + view.c_shape[i] = shape[i] + view.c_strides[i] = strides[i] + view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO) + view.c_internal = lltype.nullptr(rffi.VOIDP.TO) + return 0 + +def _IsFortranContiguous(view): + if view.ndim == 0: + return 1 + if not view.strides: + return view.ndim == 1 + sd = view.itemsize + if view.ndim == 1: + return view.shape[0] == 1 or sd == view.strides[0] + for i in range(view.ndim): + dim = view.shape[i] + if dim == 0: + return 1 + if view.strides[i] != sd: + return 0 + sd *= dim + return 1 + +def _IsCContiguous(view): + if view.ndim == 0: + return 1 + if not view.strides: + return view.ndim == 1 + sd = view.itemsize + if view.ndim == 1: + return view.shape[0] == 1 or sd == view.strides[0] + for i in range(view.ndim-1, -1, -1): + dim = view.shape[i] + if dim == 0: + return 1 + if view.strides[i] != sd: + return 0 + sd *= dim + return 1 + @cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) -def PyBuffer_IsContiguous(space, view, fortran): +def PyBuffer_IsContiguous(space, view, fort): """Return 1 if the memory defined by the view is C-style (fortran is 'C') or Fortran-style (fortran is 'F') contiguous or either one (fortran is 'A'). Return 0 otherwise.""" - # PyPy only supports contiguous Py_buffers for now. - return 1 + # traverse the strides, checking for consistent stride increases from + # right-to-left (c) or left-to-right (fortran). Copied from cpython + if not view.suboffsets: + return 0 + if (fort == 'C'): + return _IsCContiguous(view) + elif (fort == 'F'): + return _IsFortranContiguous(view) + elif (fort == 'A'): + return (_IsCContiguous(view) or _IsFortranContiguous(view)) + return 0 + + diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -13,6 +13,7 @@ @cpython_api([PyObject], PyObject) def PyMemoryView_GET_BASE(space, w_obj): # return the obj field of the Py_buffer created by PyMemoryView_GET_BUFFER + # XXX needed for numpy on py3k raise NotImplementedError('PyMemoryView_GET_BUFFER') @cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL) diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c --- a/pypy/module/cpyext/test/buffer_test.c +++ b/pypy/module/cpyext/test/buffer_test.c @@ -198,10 +198,118 @@ return PyInt_FromLong(view->len); } +/* Copied from numpy tests */ +/* + * Create python string from a FLAG and or the corresponding PyBuf flag + * for the use in get_buffer_info. + */ +#define GET_PYBUF_FLAG(FLAG) \ + buf_flag = PyUnicode_FromString(#FLAG); \ + flag_matches = PyObject_RichCompareBool(buf_flag, tmp, Py_EQ); \ + Py_DECREF(buf_flag); \ + if (flag_matches == 1) { \ + Py_DECREF(tmp); \ + flags |= PyBUF_##FLAG; \ + continue; \ + } \ + else if (flag_matches == -1) { \ + Py_DECREF(tmp); \ + return NULL; \ + } + + +/* + * Get information for a buffer through PyBuf_GetBuffer with the + * corresponding flags or'ed. Note that the python caller has to + * make sure that or'ing those flags actually makes sense. + * More information should probably be returned for future tests. + */ +static PyObject * +get_buffer_info(PyObject *self, PyObject *args) +{ + PyObject *buffer_obj, *pyflags; + PyObject *tmp, *buf_flag; + Py_buffer buffer; + PyObject *shape, *strides; + Py_ssize_t i, n; + int flag_matches; + int flags = 0; + + if (!PyArg_ParseTuple(args, "OO", &buffer_obj, &pyflags)) { + return NULL; + } + + n = PySequence_Length(pyflags); + if (n < 0) { + return NULL; + } + + for (i=0; i < n; i++) { + tmp = PySequence_GetItem(pyflags, i); + if (tmp == NULL) { + return NULL; + } + + GET_PYBUF_FLAG(SIMPLE); + GET_PYBUF_FLAG(WRITABLE); + GET_PYBUF_FLAG(STRIDES); + GET_PYBUF_FLAG(ND); + GET_PYBUF_FLAG(C_CONTIGUOUS); + GET_PYBUF_FLAG(F_CONTIGUOUS); + GET_PYBUF_FLAG(ANY_CONTIGUOUS); + GET_PYBUF_FLAG(INDIRECT); + GET_PYBUF_FLAG(FORMAT); + GET_PYBUF_FLAG(STRIDED); + GET_PYBUF_FLAG(STRIDED_RO); + GET_PYBUF_FLAG(RECORDS); + GET_PYBUF_FLAG(RECORDS_RO); + GET_PYBUF_FLAG(FULL); + GET_PYBUF_FLAG(FULL_RO); + GET_PYBUF_FLAG(CONTIG); + GET_PYBUF_FLAG(CONTIG_RO); + + Py_DECREF(tmp); + + /* One of the flags must match */ + PyErr_SetString(PyExc_ValueError, "invalid flag used."); + return NULL; + } + + if (PyObject_GetBuffer(buffer_obj, &buffer, flags) < 0) { + return NULL; + } + + if (buffer.shape == NULL) { + Py_INCREF(Py_None); + shape = Py_None; + } + else { + shape = PyTuple_New(buffer.ndim); + for (i=0; i < buffer.ndim; i++) { + PyTuple_SET_ITEM(shape, i, PyLong_FromSsize_t(buffer.shape[i])); + } + } + + if (buffer.strides == NULL) { + Py_INCREF(Py_None); + strides = Py_None; + } + else { + strides = PyTuple_New(buffer.ndim); + for (i=0; i < buffer.ndim; i++) { + PyTuple_SET_ITEM(strides, i, PyLong_FromSsize_t(buffer.strides[i])); + } + } + + PyBuffer_Release(&buffer); + return Py_BuildValue("(NN)", shape, strides); +} + static PyMethodDef buffer_functions[] = { {"test_buffer", (PyCFunction)test_buffer, METH_VARARGS, NULL}, + {"get_buffer_info", (PyCFunction)get_buffer_info, METH_VARARGS, NULL}, {NULL, NULL} /* Sentinel */ }; diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -27,3 +27,19 @@ assert s == struct.pack('i', 3) viewlen = module.test_buffer(arr) assert viewlen == y.itemsize * len(y) + + def test_buffer_info(self): + from _numpypy import multiarray as np + module = self.import_module(name='buffer_test') + get_buffer_info = module.get_buffer_info + # test_export_flags from numpy test_multiarray + raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',)) + # test_relaxed_strides from numpy test_multiarray + arr = np.ones((1, 10)) + if arr.flags.f_contiguous: + shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS']) + assert strides[0] == 8 + arr = np.ones((10, 1), order='F') + shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS']) + assert strides[-1] == 8 + diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -293,6 +293,8 @@ STRUCT_TYPE = PyNumberMethods elif slot_names[0] == 'c_tp_as_sequence': STRUCT_TYPE = PySequenceMethods + elif slot_names[0] == 'c_tp_as_buffer': + STRUCT_TYPE = PyBufferProcs else: raise AssertionError( "Structure not allocated: %s" % (slot_names[0],)) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -377,7 +377,8 @@ def __exit__(self, typ, value, traceback): keepalive_until_here(self) - def get_buffer(self, space, readonly): + def get_buffer(self, space, flags): + readonly = not bool(flags & space.BUF_WRITABLE) return ArrayBuffer(self, readonly) def astype(self, space, dtype, order, copy=True): @@ -695,6 +696,8 @@ index + self.impl.start) def setitem(self, index, v): + if self.readonly: + raise oefmt(space.w_BufferError, "cannot write to a readonly buffer") raw_storage_setitem(self.impl.storage, index + self.impl.start, rffi.cast(lltype.Char, v)) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -804,20 +804,20 @@ """) return w_result - def buffer_w(self, space, flags): - return self.implementation.get_buffer(space, True) + def buffer_w(self, space, w_flags): + return self.implementation.get_buffer(space, space.int_w(w_flags)) def readbuf_w(self, space): - return self.implementation.get_buffer(space, True) + return self.implementation.get_buffer(space, space.BUF_FULL_RO) def writebuf_w(self, space): - return self.implementation.get_buffer(space, False) + return self.implementation.get_buffer(space, space.BUF_FULL) def charbuf_w(self, space): - return self.implementation.get_buffer(space, True).as_str() + return self.implementation.get_buffer(space, space.BUF_FULL_RO).as_str() def descr_get_data(self, space): - return space.newbuffer(self.implementation.get_buffer(space, False)) + return space.newbuffer(self.implementation.get_buffer(space, space.BUF_FULL)) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_diagonal(self, space, offset=0, axis1=0, axis2=1): @@ -1697,6 +1697,7 @@ __array_wrap__ = interp2app(W_NDimArray.descr___array_wrap__), __array_priority__ = GetSetProperty(W_NDimArray.descr___array_priority__), __array__ = interp2app(W_NDimArray.descr___array__), + __buffer__ = interp2app(W_NDimArray.buffer_w), ) From pypy.commits at gmail.com Fri Sep 2 11:27:56 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 02 Sep 2016 08:27:56 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: add all the logic needed to make the tests pass, fixes for translation Message-ID: <57c99a7c.c62f1c0a.6b352.6041@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86842:04da0bbd16eb Date: 2016-09-02 18:26 +0300 http://bitbucket.org/pypy/pypy/changeset/04da0bbd16eb/ Log: add all the logic needed to make the tests pass, fixes for translation diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1428,6 +1428,9 @@ BUF_FORMAT = 0x0004 BUF_ND = 0x0008 BUF_STRIDES = 0x0010 | BUF_ND + BUF_C_CONTIGUOUS = 0x0020 | BUF_STRIDES + BUF_F_CONTIGUOUS = 0x0040 | BUF_STRIDES + BUF_ANY_CONTIGUOUS = 0x0080 | BUF_STRIDES BUF_INDIRECT = 0x0100 | BUF_STRIDES BUF_CONTIG_RO = BUF_ND diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import oefmt from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import widen from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER, Py_ssize_tP) from pypy.module.cpyext.pyobject import PyObject, as_pyobj, incref @@ -33,7 +34,8 @@ raise an error if the object can't support a simpler view of its memory. 0 is returned on success and -1 on error.""" - buf = space.call_method(w_obj, "__buffer__", space.newint(flags)) + flags = widen(flags) + buf = space.buffer_w(w_obj, flags) try: view.c_buf = rffi.cast(rffi.VOIDP, buf.get_raw_address()) except ValueError: @@ -58,35 +60,37 @@ return 0 def _IsFortranContiguous(view): - if view.ndim == 0: + ndim = widen(view.c_ndim) + if ndim == 0: return 1 - if not view.strides: - return view.ndim == 1 - sd = view.itemsize - if view.ndim == 1: - return view.shape[0] == 1 or sd == view.strides[0] - for i in range(view.ndim): - dim = view.shape[i] + if not view.c_strides: + return ndim == 1 + sd = view.c_itemsize + if ndim == 1: + return view.c_shape[0] == 1 or sd == view.c_strides[0] + for i in range(view.c_ndim): + dim = view.c_shape[i] if dim == 0: return 1 - if view.strides[i] != sd: + if view.c_strides[i] != sd: return 0 sd *= dim return 1 def _IsCContiguous(view): - if view.ndim == 0: + ndim = widen(view.c_ndim) + if ndim == 0: return 1 - if not view.strides: - return view.ndim == 1 - sd = view.itemsize - if view.ndim == 1: - return view.shape[0] == 1 or sd == view.strides[0] - for i in range(view.ndim-1, -1, -1): - dim = view.shape[i] + if not view.c_strides: + return ndim == 1 + sd = view.c_itemsize + if ndim == 1: + return view.c_shape[0] == 1 or sd == view.c_strides[0] + for i in range(ndim - 1, -1, -1): + dim = view.c_shape[i] if dim == 0: return 1 - if view.strides[i] != sd: + if view.c_strides[i] != sd: return 0 sd *= dim return 1 @@ -99,7 +103,7 @@ (fortran is 'A'). Return 0 otherwise.""" # traverse the strides, checking for consistent stride increases from # right-to-left (c) or left-to-right (fortran). Copied from cpython - if not view.suboffsets: + if not view.c_suboffsets: return 0 if (fort == 'C'): return _IsCContiguous(view) diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -35,7 +35,7 @@ # test_export_flags from numpy test_multiarray raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',)) # test_relaxed_strides from numpy test_multiarray - arr = np.ones((1, 10)) + arr = np.zeros((1, 10)) if arr.flags.f_contiguous: shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS']) assert strides[0] == 8 diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -378,7 +378,24 @@ keepalive_until_here(self) def get_buffer(self, space, flags): - readonly = not bool(flags & space.BUF_WRITABLE) + errtype = space.w_ValueError # should be BufferError, numpy does this instead + if ((flags & space.BUF_C_CONTIGUOUS) == space.BUF_C_CONTIGUOUS and + not self.flags & NPY.ARRAY_C_CONTIGUOUS): + raise oefmt(errtype, "ndarray is not C-contiguous") + if ((flags & space.BUF_F_CONTIGUOUS) == space.BUF_F_CONTIGUOUS and + not self.flags & NPY.ARRAY_F_CONTIGUOUS): + raise oefmt(errtype, "ndarray is not Fortran contiguous") + if ((flags & space.BUF_ANY_CONTIGUOUS) == space.BUF_ANY_CONTIGUOUS and + not (self.flags & NPY.ARRAY_F_CONTIGUOUS and + self.flags & NPY.ARRAY_C_CONTIGUOUS)): + raise oefmt(errtype, "ndarray is not contiguous") + if ((flags & space.BUF_STRIDES) != space.BUF_STRIDES and + not self.flags & NPY.ARRAY_C_CONTIGUOUS): + raise oefmt(errtype, "ndarray is not C-contiguous") + if ((flags & space.BUF_WRITABLE) == space.BUF_WRITABLE and + not self.flags & NPY.ARRAY_WRITEABLE): + raise oefmt(errtype, "buffer source array is read-only") + readonly = not (flags & space.BUF_WRITABLE) == space.BUF_WRITABLE return ArrayBuffer(self, readonly) def astype(self, space, dtype, order, copy=True): @@ -696,8 +713,7 @@ index + self.impl.start) def setitem(self, index, v): - if self.readonly: - raise oefmt(space.w_BufferError, "cannot write to a readonly buffer") + # XXX what if self.readonly? raw_storage_setitem(self.impl.storage, index + self.impl.start, rffi.cast(lltype.Char, v)) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -804,8 +804,8 @@ """) return w_result - def buffer_w(self, space, w_flags): - return self.implementation.get_buffer(space, space.int_w(w_flags)) + def buffer_w(self, space, flags): + return self.implementation.get_buffer(space, flags) def readbuf_w(self, space): return self.implementation.get_buffer(space, space.BUF_FULL_RO) @@ -1697,7 +1697,6 @@ __array_wrap__ = interp2app(W_NDimArray.descr___array_wrap__), __array_priority__ = GetSetProperty(W_NDimArray.descr___array_priority__), __array__ = interp2app(W_NDimArray.descr___array__), - __buffer__ = interp2app(W_NDimArray.buffer_w), ) From pypy.commits at gmail.com Fri Sep 2 11:38:53 2016 From: pypy.commits at gmail.com (sbauman) Date: Fri, 02 Sep 2016 08:38:53 -0700 (PDT) Subject: [pypy-commit] pypy force-virtual-state: Tentative solution to having aliased objects in virtual state when forcing Message-ID: <57c99d0d.09afc20a.e370d.cbad@mx.google.com> Author: Spenser Andrew Bauman Branch: force-virtual-state Changeset: r86843:8855c1e2a325 Date: 2016-09-02 11:38 -0400 http://bitbucket.org/pypy/pypy/changeset/8855c1e2a325/ Log: Tentative solution to having aliased objects in virtual state when forcing diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -325,10 +325,20 @@ guard.rd_resume_position = patchguardop.rd_resume_position guard.setdescr(compile.ResumeAtPositionDescr()) self.send_extra_operation(guard) - except VirtualStatesCantMatch as e: + except VirtualStatesCantMatch: continue - args, virtuals = target_virtual_state.make_inputargs_and_virtuals( - args, self.optimizer, force_boxes=force_boxes) + + # When force_boxes == True, creating the virtual args can fail when + # components of the virtual state alias. If this occurs, we must + # recompute the virtual state as boxes will have been forced. + try: + args, virtuals = target_virtual_state.make_inputargs_and_virtuals( + args, self.optimizer, force_boxes=force_boxes) + except VirtualStatesCantMatch: + assert force_boxes + virtual_state = self.get_virtual_state(args) + continue + short_preamble = target_token.short_preamble try: extra = self.inline_short_preamble(args + virtuals, args, From pypy.commits at gmail.com Fri Sep 2 12:09:30 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 02 Sep 2016 09:09:30 -0700 (PDT) Subject: [pypy-commit] pypy default: Specialize str_decode_utf_8_impl on allow_surrogates, to resolve a translation failure in pycket Message-ID: <57c9a43a.898b1c0a.2cfd1.7d5d@mx.google.com> Author: Ronan Lamy Branch: Changeset: r86844:d61e115b2d73 Date: 2016-09-02 17:08 +0100 http://bitbucket.org/pypy/pypy/changeset/d61e115b2d73/ Log: Specialize str_decode_utf_8_impl on allow_surrogates, to resolve a translation failure in pycket diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -145,6 +145,7 @@ _invalid_byte_3_of_4 = _invalid_cont_byte _invalid_byte_4_of_4 = _invalid_cont_byte + at specialize.arg(2) def _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): return (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0) @@ -156,7 +157,7 @@ (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)) - at specialize.argtype(6) + at specialize.arg(5) def str_decode_utf_8_impl(s, size, errors, final, errorhandler, allow_surrogates, result): if size == 0: From pypy.commits at gmail.com Fri Sep 2 13:23:36 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Sep 2016 10:23:36 -0700 (PDT) Subject: [pypy-commit] pypy default: Fix: this is needed to JIT code that uses ll_ullong_py_mod_zer Message-ID: <57c9b598.05d71c0a.db689.8f5b@mx.google.com> Author: Armin Rigo Branch: Changeset: r86845:01245dcd8f95 Date: 2016-09-02 19:23 +0200 http://bitbucket.org/pypy/pypy/changeset/01245dcd8f95/ Log: Fix: this is needed to JIT code that uses ll_ullong_py_mod_zer diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -540,7 +540,7 @@ def ll_ullong_py_mod_zer(x, y): if y == 0: raise ZeroDivisionError - return llop.ullong_mod(UnsignedLongLong, x, y) + return ll_ullong_py_mod(x, y) @jit.dont_look_inside def ll_lllong_py_mod(x, y): From pypy.commits at gmail.com Fri Sep 2 13:46:35 2016 From: pypy.commits at gmail.com (sbauman) Date: Fri, 02 Sep 2016 10:46:35 -0700 (PDT) Subject: [pypy-commit] pypy force-virtual-state: Merge with default Message-ID: <57c9bafb.94071c0a.8c4bb.961c@mx.google.com> Author: Spenser Andrew Bauman Branch: force-virtual-state Changeset: r86846:a5fbf228be6d Date: 2016-09-02 13:42 -0400 http://bitbucket.org/pypy/pypy/changeset/a5fbf228be6d/ Log: Merge with default diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -369,3 +369,109 @@ Roman Podoliaka Dan Loewenherz werat + + Heinrich-Heine University, Germany + Open End AB (formerly AB Strakt), Sweden + merlinux GmbH, Germany + tismerysoft GmbH, Germany + Logilab Paris, France + DFKI GmbH, Germany + Impara, Germany + Change Maker, Sweden + University of California Berkeley, USA + Google Inc. + King's College London + +The PyPy Logo as used by http://speed.pypy.org and others was created +by Samuel Reis and is distributed on terms of Creative Commons Share Alike +License. + +License for 'lib-python/2.7' +============================ + +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the terms that you can find here: https://docs.python.org/2/license.html + +License for 'pypy/module/unicodedata/' +====================================== + +The following files are from the website of The Unicode Consortium +at http://www.unicode.org/. For the terms of use of these files, see +http://www.unicode.org/terms_of_use.html . Or they are derived from +files from the above website, and the same terms of use apply. + + CompositionExclusions-*.txt + EastAsianWidth-*.txt + LineBreak-*.txt + UnicodeData-*.txt + UnihanNumeric-*.txt + +License for 'dotviewer/font/' +============================= + +Copyright (C) 2008 The Android Open Source Project + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Detailed license information is contained in the NOTICE file in the +directory. + + +Licenses and Acknowledgements for Incorporated Software +======================================================= + +This section is an incomplete, but growing list of licenses and +acknowledgements for third-party software incorporated in the PyPy +distribution. + +License for 'Tcl/Tk' +-------------------- + +This copy of PyPy contains library code that may, when used, result in +the Tcl/Tk library to be loaded. PyPy also includes code that may be +regarded as being a copy of some parts of the Tcl/Tk header files. +You may see a copy of the License for Tcl/Tk in the file +`lib_pypy/_tkinter/license.terms` included here. + +License for 'bzip2' +------------------- + +This copy of PyPy may be linked (dynamically or statically) with the +bzip2 library. You may see a copy of the License for bzip2/libbzip2 at + + http://www.bzip.org/1.0.5/bzip2-manual-1.0.5.html + +License for 'openssl' +--------------------- + +This copy of PyPy may be linked (dynamically or statically) with the +openssl library. You may see a copy of the License for OpenSSL at + + https://www.openssl.org/source/license.html + +License for 'gdbm' +------------------ + +The gdbm module includes code from gdbm.h, which is distributed under +the terms of the GPL license version 2 or any later version. Thus the +gdbm module, provided in the file lib_pypy/gdbm.py, is redistributed +under the terms of the GPL license as well. + +License for 'rpython/rlib/rvmprof/src' +-------------------------------------- + +The code is based on gperftools. You may see a copy of the License for it at + + https://github.com/gperftools/gperftools/blob/master/COPYING diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -164,8 +164,15 @@ # annotations that are passed in, and don't annotate the old # graph -- it's already low-level operations! for a, s_newarg in zip(block.inputargs, cells): - s_oldarg = self.binding(a) - assert annmodel.unionof(s_oldarg, s_newarg) == s_oldarg + s_oldarg = a.annotation + # XXX: Should use s_oldarg.contains(s_newarg) but that breaks + # PyPy translation + if annmodel.unionof(s_oldarg, s_newarg) != s_oldarg: + raise annmodel.AnnotatorError( + "Late-stage annotation is not allowed to modify the " + "existing annotation for variable %s: %s" % + (a, s_oldarg)) + else: assert not self.frozen if block not in self.annotated: diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -750,6 +750,7 @@ s1 = pair(s1, s2).union() else: # this is just a performance shortcut + # XXX: This is a lie! Grep for no_side_effects_in_union and weep. if s1 != s2: s1 = pair(s1, s2).union() return s1 diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -145,6 +145,7 @@ _invalid_byte_3_of_4 = _invalid_cont_byte _invalid_byte_4_of_4 = _invalid_cont_byte + at specialize.arg(2) def _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): return (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0) @@ -156,7 +157,7 @@ (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)) - at specialize.argtype(6) + at specialize.arg(5) def str_decode_utf_8_impl(s, size, errors, final, errorhandler, allow_surrogates, result): if size == 0: From pypy.commits at gmail.com Fri Sep 2 14:36:31 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 02 Sep 2016 11:36:31 -0700 (PDT) Subject: [pypy-commit] pypy union-side-effects: Create union() as a side-effect-free binary function to replace unionof() wherever possible Message-ID: <57c9c6af.d42f1c0a.665d8.aa80@mx.google.com> Author: Ronan Lamy Branch: union-side-effects Changeset: r86847:033077287c06 Date: 2016-09-02 19:35 +0100 http://bitbucket.org/pypy/pypy/changeset/033077287c06/ Log: Create union() as a side-effect-free binary function to replace unionof() wherever possible diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -94,16 +94,9 @@ if self == other: return True try: - TLS.no_side_effects_in_union += 1 - except AttributeError: - TLS.no_side_effects_in_union = 1 - try: - try: - return pair(self, other).union() == self - except UnionError: - return False - finally: - TLS.no_side_effects_in_union -= 1 + return union(self, other) == self + except UnionError: + return False def is_constant(self): d = self.__dict__ @@ -739,6 +732,23 @@ def __repr__(self): return str(self) +def union(s1, s2): + """The join operation in the lattice of annotations. + + It is the most precise SomeObject instance that contains both arguments. + + union() is (supposed to be) idempotent, commutative, associative and has + no side-effects. + """ + try: + TLS.no_side_effects_in_union += 1 + except AttributeError: + TLS.no_side_effects_in_union = 1 + try: + return pair(s1, s2).union() + finally: + TLS.no_side_effects_in_union -= 1 + def unionof(*somevalues): "The most precise SomeValue instance that contains all the values." try: From pypy.commits at gmail.com Fri Sep 2 15:22:11 2016 From: pypy.commits at gmail.com (ntruessel) Date: Fri, 02 Sep 2016 12:22:11 -0700 (PDT) Subject: [pypy-commit] pypy quad-color-gc: Fix some broken testcases due to wrong typeids for immortal objects Message-ID: <57c9d163.04141c0a.b87ba.b674@mx.google.com> Author: Nicolas Truessel Branch: quad-color-gc Changeset: r86848:a6ec7b4fb726 Date: 2016-09-02 21:21 +0200 http://bitbucket.org/pypy/pypy/changeset/a6ec7b4fb726/ Log: Fix some broken testcases due to wrong typeids for immortal objects diff --git a/rpython/memory/gc/qcgc.py b/rpython/memory/gc/qcgc.py --- a/rpython/memory/gc/qcgc.py +++ b/rpython/memory/gc/qcgc.py @@ -26,11 +26,10 @@ ('hdr', rffi.COpaque('object_t', hints={"is_qcgc_header": True})), ('tid', lltype.Signed), ('hash', lltype.Signed)) - #HDR = rffi.COpaque('object_t') def init_gc_object(self, addr, typeid): hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)) - hdr.tid = rffi.cast(lltype.Signed, typeid) + hdr.tid = llop.combine_ushort(lltype.Signed, typeid, 0) hdr.hash = rffi.cast(lltype.Signed, 0) def malloc_fixedsize_clear(self, typeid, size, @@ -63,7 +62,7 @@ def init_gc_object_immortal(self, addr, typeid, flags=0): assert flags == 0 # - self.init_gc_object(addr, typeid.index) + self.init_gc_object(addr, typeid) def collect(self, gen=1): """Do a minor (gen=0) or major (gen>0) collection.""" From pypy.commits at gmail.com Fri Sep 2 16:44:02 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 02 Sep 2016 13:44:02 -0700 (PDT) Subject: [pypy-commit] pypy union-side-effects: Use union() instead of unionof() in a few places Message-ID: <57c9e492.05371c0a.1ada0.d1e2@mx.google.com> Author: Ronan Lamy Branch: union-side-effects Changeset: r86849:53937853a5d2 Date: 2016-09-02 21:40 +0100 http://bitbucket.org/pypy/pypy/changeset/53937853a5d2/ Log: Use union() instead of unionof() in a few places diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -246,7 +246,7 @@ if s_old is not None: if not s_value.contains(s_old): log.WARNING("%s does not contain %s" % (s_value, s_old)) - log.WARNING("%s" % annmodel.unionof(s_value, s_old)) + log.WARNING("%s" % annmodel.union(s_value, s_old)) assert False arg.annotation = s_value diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -11,7 +11,7 @@ SomeBuiltinMethod, SomeIterator, SomePBC, SomeNone, SomeFloat, s_None, SomeByteArray, SomeWeakRef, SomeSingleFloat, SomeLongFloat, SomeType, SomeTypeOf, SomeConstantType, unionof, UnionError, - read_can_only_throw, add_knowntypedata, + union, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) from rpython.annotator.bookkeeper import immutablevalue, getbookkeeper from rpython.flowspace.model import Variable, Constant, const @@ -703,13 +703,13 @@ pairtype(SomeException, SomeInstance), pairtype(SomeException, SomeNone)): def union((s_exc, s_inst)): - return unionof(s_exc.as_SomeInstance(), s_inst) + return union(s_exc.as_SomeInstance(), s_inst) class __extend__( pairtype(SomeInstance, SomeException), pairtype(SomeNone, SomeException)): def union((s_inst, s_exc)): - return unionof(s_exc.as_SomeInstance(), s_inst) + return union(s_exc.as_SomeInstance(), s_inst) class __extend__(pairtype(SomeException, SomeException)): def union((s_exc1, s_exc2)): diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -6,7 +6,7 @@ from rpython.annotator.model import ( SomeInteger, SomeChar, SomeBool, SomeString, SomeTuple, - SomeUnicodeCodePoint, SomeFloat, unionof, SomeUnicodeString, + SomeUnicodeCodePoint, SomeFloat, union, SomeUnicodeString, SomePBC, SomeInstance, SomeDict, SomeList, SomeWeakRef, SomeIterator, SomeOrderedDict, SomeByteArray, add_knowntypedata, s_ImpossibleValue,) from rpython.annotator.bookkeeper import ( @@ -166,14 +166,14 @@ s_iter = s_values[0].iter() return s_iter.next() else: - return unionof(*s_values) + return union(*s_values) def builtin_max(*s_values): if len(s_values) == 1: # xxx do we support this? s_iter = s_values[0].iter() return s_iter.next() else: - s = unionof(*s_values) + s = union(*s_values) if type(s) is SomeInteger and not s.nonneg: nonneg = False for s1 in s_values: diff --git a/rpython/annotator/dictdef.py b/rpython/annotator/dictdef.py --- a/rpython/annotator/dictdef.py +++ b/rpython/annotator/dictdef.py @@ -1,5 +1,5 @@ -from rpython.annotator.model import s_ImpossibleValue -from rpython.annotator.model import SomeInteger, s_Bool, unionof +from rpython.annotator.model import ( + s_ImpossibleValue, SomeInteger, s_Bool, union) from rpython.annotator.listdef import ListItem from rpython.rlib.objectmodel import compute_hash @@ -34,8 +34,8 @@ def update_rdict_annotations(self, s_eqfn, s_hashfn, other=None): assert self.custom_eq_hash - s_eqfn = unionof(s_eqfn, self.s_rdict_eqfn) - s_hashfn = unionof(s_hashfn, self.s_rdict_hashfn) + s_eqfn = union(s_eqfn, self.s_rdict_eqfn) + s_hashfn = union(s_hashfn, self.s_rdict_hashfn) self.s_rdict_eqfn = s_eqfn self.s_rdict_hashfn = s_hashfn self.emulate_rdict_calls(other=other) diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -146,14 +146,14 @@ someinst = lambda cls, **kw: SomeInstance(bk.getuniqueclassdef(cls), **kw) s_inst = someinst(Exception) s_exc = bk.new_exception([ValueError, IndexError]) - assert unionof(s_exc, s_inst) == s_inst - assert unionof(s_inst, s_exc) == s_inst - s_nullable = unionof(s_None, bk.new_exception([ValueError])) + assert union(s_exc, s_inst) == s_inst + assert union(s_inst, s_exc) == s_inst + s_nullable = union(s_None, bk.new_exception([ValueError])) assert isinstance(s_nullable, SomeInstance) assert s_nullable.can_be_None s_exc1 = bk.new_exception([ValueError]) s_exc2 = bk.new_exception([IndexError]) - unionof(s_exc1, s_exc2) == unionof(s_exc2, s_exc1) + union(s_exc1, s_exc2) == union(s_exc2, s_exc1) def contains_s(s_a, s_b): if s_b is None: diff --git a/rpython/rtyper/test/test_llannotation.py b/rpython/rtyper/test/test_llannotation.py --- a/rpython/rtyper/test/test_llannotation.py +++ b/rpython/rtyper/test/test_llannotation.py @@ -1,6 +1,6 @@ import py.test from rpython.annotator.model import ( - SomeInteger, SomeBool, SomeChar, unionof, SomeImpossibleValue, + SomeInteger, SomeBool, SomeChar, union, SomeImpossibleValue, UnionError, SomeInstance, SomeSingleFloat) from rpython.rlib.rarithmetic import r_uint, r_singlefloat from rpython.rtyper.llannotation import ( @@ -69,22 +69,22 @@ PA1 = lltype.Ptr(lltype.GcArray()) PA2 = lltype.Ptr(lltype.GcArray()) - assert unionof(SomePtr(PS1), SomePtr(PS1)) == SomePtr(PS1) - assert unionof(SomePtr(PS1), SomePtr(PS2)) == SomePtr(PS2) - assert unionof(SomePtr(PS1), SomePtr(PS2)) == SomePtr(PS1) + assert union(SomePtr(PS1), SomePtr(PS1)) == SomePtr(PS1) + assert union(SomePtr(PS1), SomePtr(PS2)) == SomePtr(PS2) + assert union(SomePtr(PS1), SomePtr(PS2)) == SomePtr(PS1) - assert unionof(SomePtr(PA1), SomePtr(PA1)) == SomePtr(PA1) - assert unionof(SomePtr(PA1), SomePtr(PA2)) == SomePtr(PA2) - assert unionof(SomePtr(PA1), SomePtr(PA2)) == SomePtr(PA1) + assert union(SomePtr(PA1), SomePtr(PA1)) == SomePtr(PA1) + assert union(SomePtr(PA1), SomePtr(PA2)) == SomePtr(PA2) + assert union(SomePtr(PA1), SomePtr(PA2)) == SomePtr(PA1) - assert unionof(SomePtr(PS1), SomeImpossibleValue()) == SomePtr(PS1) - assert unionof(SomeImpossibleValue(), SomePtr(PS1)) == SomePtr(PS1) + assert union(SomePtr(PS1), SomeImpossibleValue()) == SomePtr(PS1) + assert union(SomeImpossibleValue(), SomePtr(PS1)) == SomePtr(PS1) with py.test.raises(UnionError): - unionof(SomePtr(PA1), SomePtr(PS1)) + union(SomePtr(PA1), SomePtr(PS1)) with py.test.raises(UnionError): - unionof(SomePtr(PS1), SomePtr(PS3)) + union(SomePtr(PS1), SomePtr(PS3)) with py.test.raises(UnionError): - unionof(SomePtr(PS1), SomeInteger()) + union(SomePtr(PS1), SomeInteger()) with py.test.raises(UnionError): - unionof(SomeInteger(), SomePtr(PS1)) + union(SomeInteger(), SomePtr(PS1)) From pypy.commits at gmail.com Fri Sep 2 22:42:45 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 02 Sep 2016 19:42:45 -0700 (PDT) Subject: [pypy-commit] pypy union-side-effects: Fix union() and give a correct explanation for the s1==s2 case Message-ID: <57ca38a5.eeb8c20a.69f14.86e7@mx.google.com> Author: Ronan Lamy Branch: union-side-effects Changeset: r86851:81eb595adb63 Date: 2016-09-03 01:44 +0100 http://bitbucket.org/pypy/pypy/changeset/81eb595adb63/ Log: Fix union() and give a correct explanation for the s1==s2 case diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -745,6 +745,10 @@ except AttributeError: TLS.no_side_effects_in_union = 1 try: + if s1 == s2: + # Most pair(...).union() methods deal incorrectly with that case + # when constants are involved. + return s1 return pair(s1, s2).union() finally: TLS.no_side_effects_in_union -= 1 @@ -759,8 +763,7 @@ if s1 != s2: s1 = pair(s1, s2).union() else: - # this is just a performance shortcut - # XXX: This is a lie! Grep for no_side_effects_in_union and weep. + # See comment in union() above if s1 != s2: s1 = pair(s1, s2).union() return s1 From pypy.commits at gmail.com Fri Sep 2 22:42:47 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 02 Sep 2016 19:42:47 -0700 (PDT) Subject: [pypy-commit] pypy union-side-effects: Expand hypothesis testing of union() until it fails Message-ID: <57ca38a7.4676c20a.2e4e.b0e6@mx.google.com> Author: Ronan Lamy Branch: union-side-effects Changeset: r86852:fc2b86d220a0 Date: 2016-09-03 03:41 +0100 http://bitbucket.org/pypy/pypy/changeset/fc2b86d220a0/ Log: Expand hypothesis testing of union() until it fails diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -673,6 +673,10 @@ s_None = SomeNone() s_Bool = SomeBool() +s_True = SomeBool() +s_True.const = True +s_False = SomeBool() +s_False.const = False s_Int = SomeInteger() s_ImpossibleValue = SomeImpossibleValue() s_Str0 = SomeString(no_nul=True) diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -70,11 +70,11 @@ except TypeError: # if A0 is also a new-style class, e.g. in PyPy class B3(A0, object): pass - assert commonbase(A1,A2) is A0 - assert commonbase(A1,A0) is A0 - assert commonbase(A1,A1) is A1 - assert commonbase(A2,B2) is object - assert commonbase(A2,B3) is A0 + assert commonbase(A1, A2) is A0 + assert commonbase(A1, A0) is A0 + assert commonbase(A1, A1) is A1 + assert commonbase(A2, B2) is object + assert commonbase(A2, B3) is A0 def test_list_union(): listdef1 = ListDef('dummy', SomeInteger(nonneg=True)) @@ -105,20 +105,89 @@ assert f2.contains(f1) assert f1.contains(f2) +def const_float(x): + s = SomeFloat() + s.const = x + return s + def const_int(n): - s = SomeInteger(nonneg=(n>=0)) + s = SomeInteger(nonneg=(n >= 0)) s.const = n return s +def const_str(x): + no_nul = not '\x00' in x + if len(x) == 1: + result = SomeChar(no_nul=no_nul) + else: + result = SomeString(no_nul=no_nul) + result.const = x + return result + +def const_unicode(x): + no_nul = not u'\x00' in x + if len(x) == 1: + result = SomeUnicodeCodePoint(no_nul=no_nul) + else: + result = SomeUnicodeString(no_nul=no_nul) + result.const = x + return result + +def compatible(s1, s2): + try: + union(s1, s2) + except UnionError: + return False + return True + +def compatible_pair(pair_s): + return compatible(*pair_s) + +st_float = st.just(SomeFloat()) | st.builds(const_float, st.floats()) st_int = st.one_of(st.builds(SomeInteger, st.booleans(), st.booleans()), st.builds(const_int, st.integers())) -st_annotation = st_int +st_bool = st.sampled_from([s_Bool, s_True, s_False]) +st_numeric = st.one_of(st_float, st_int, st_bool) +st_str = (st.builds(SomeString, st.booleans(), st.booleans()) + | st.builds(const_str, st.binary())) +st_unicode = (st.builds(SomeUnicodeString, st.booleans(), st.booleans()) + | st.builds(const_unicode, st.text())) +st_simple = st.one_of(st_numeric, st_str, st_unicode, st.just(s_ImpossibleValue), st.just(s_None)) - at given(s=st_annotation) +def valid_unions(st_ann): + """From a strategy generating annotations, create a strategy returning + unions of these annotations.""" + pairs = st.tuples(st_ann, st_ann) + return pairs.filter(compatible_pair).map(lambda t: union(*t)) + + +st_annotation = st.recursive(st_simple, + lambda st_ann: valid_unions(st_ann) | st.builds(SomeTuple, st.lists(st_ann)), + max_leaves=3) + + at given(s=st_numeric) def test_union_unary(s): assert union(s, s) == s assert union(s_ImpossibleValue, s) == s + at given(s1=st_numeric, s2=st_numeric) +def test_commutativity_of_union_compatibility(s1, s2): + assert compatible(s1, s2) == compatible(s2, s1) + + at given(st.tuples(st_annotation, st_annotation).filter(lambda t: compatible(*t))) +def test_union_commutative(t): + s1, s2 = t + s_union = union(s1, s2) + assert union(s2, s1) == s_union + assert s_union.contains(s1) + assert s_union.contains(s2) + + at given(st.tuples(st_annotation, st_annotation, st_annotation).filter( + lambda t: compatible(t[0], t[1]) and compatible(t[1], t[2]) and compatible(t[0], t[2]))) +def test_union_associative(t): + s1, s2, s3 = t + assert union(union(s1, s2), s3) == union(s1, union(s2, s3)) + def compile_function(function, annotation=[]): t = TranslationContext() From pypy.commits at gmail.com Fri Sep 2 22:42:44 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 02 Sep 2016 19:42:44 -0700 (PDT) Subject: [pypy-commit] pypy union-side-effects: Add an elementary (but failing) hypothesis test for union() Message-ID: <57ca38a4.c3d41c0a.2923b.249f@mx.google.com> Author: Ronan Lamy Branch: union-side-effects Changeset: r86850:34fcf0746426 Date: 2016-09-03 01:24 +0100 http://bitbucket.org/pypy/pypy/changeset/34fcf0746426/ Log: Add an elementary (but failing) hypothesis test for union() diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -1,5 +1,8 @@ import pytest +from hypothesis import given +from hypothesis import strategies as st + from rpython.flowspace.model import Variable from rpython.flowspace.operation import op from rpython.translator.translator import TranslationContext @@ -102,6 +105,21 @@ assert f2.contains(f1) assert f1.contains(f2) +def const_int(n): + s = SomeInteger(nonneg=(n>=0)) + s.const = n + return s + +st_int = st.one_of(st.builds(SomeInteger, st.booleans(), st.booleans()), + st.builds(const_int, st.integers())) +st_annotation = st_int + + at given(s=st_annotation) +def test_union_unary(s): + assert union(s, s) == s + assert union(s_ImpossibleValue, s) == s + + def compile_function(function, annotation=[]): t = TranslationContext() t.buildannotator().build_types(function, annotation) From pypy.commits at gmail.com Sat Sep 3 05:11:55 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Sep 2016 02:11:55 -0700 (PDT) Subject: [pypy-commit] cffi default: Kill the ctypes backend. Message-ID: <57ca93db.8aacc20a.9c360.e831@mx.google.com> Author: Armin Rigo Branch: Changeset: r2749:0087e2aec9ef Date: 2016-09-03 11:11 +0200 http://bitbucket.org/cffi/cffi/changeset/0087e2aec9ef/ Log: Kill the ctypes backend. diff too long, truncating to 2000 out of 2925 lines diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -46,20 +46,21 @@ ''' def __init__(self, backend=None): - """Create an FFI instance. The 'backend' argument is used to - select a non-default backend, mostly for tests. + """Create an FFI instance. + + The 'backend' argument is not used any more and must be set to None. + It is still present only so that 'FFI(None)' still works, and + for a few tests. """ from . import cparser, model + if backend is None: - # You need PyPy (>= 2.0 beta), or a CPython (>= 2.6) with - # _cffi_backend.so compiled. + # You need the corresponding version of PyPy, or CPython + # with the '_cffi_backend' C extension module compiled. import _cffi_backend as backend from . import __version__ assert backend.__version__ == __version__, \ "version mismatch, %s != %s" % (backend.__version__, __version__) - # (If you insist you can also try to pass the option - # 'backend=backend_ctypes.CTypesBackend()', but don't - # rely on it! It's probably not going to work well.) self._backend = backend self._lock = allocate_lock() @@ -75,8 +76,6 @@ self._init_once_cache = {} self._cdef_version = None self._embedding = None - if hasattr(backend, 'set_ffi'): - backend.set_ffi(self) for name in backend.__dict__: if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) @@ -84,15 +83,10 @@ with self._lock: self.BVoidP = self._get_cached_btype(model.voidp_type) self.BCharA = self._get_cached_btype(model.char_array_type) - if isinstance(backend, types.ModuleType): - # _cffi_backend: attach these constants to the class - if not hasattr(FFI, 'NULL'): - FFI.NULL = self.cast(self.BVoidP, 0) - FFI.CData, FFI.CType = backend._get_types() - else: - # ctypes backend: attach these constants to the instance - self.NULL = self.cast(self.BVoidP, 0) - self.CData, self.CType = backend._get_types() + # attach these constants to the class + if not hasattr(FFI, 'NULL'): + FFI.NULL = self.cast(self.BVoidP, 0) + FFI.CData, FFI.CType = backend._get_types() def cdef(self, csource, override=False, packed=False): """Parse the given C source. This registers all declared functions, diff --git a/cffi/backend_ctypes.py b/cffi/backend_ctypes.py deleted file mode 100644 --- a/cffi/backend_ctypes.py +++ /dev/null @@ -1,1097 +0,0 @@ -import ctypes, ctypes.util, operator, sys -from . import model - -if sys.version_info < (3,): - bytechr = chr -else: - unicode = str - long = int - xrange = range - bytechr = lambda num: bytes([num]) - -class CTypesType(type): - pass - -class CTypesData(object): - __metaclass__ = CTypesType - __slots__ = ['__weakref__'] - __name__ = '' - - def __init__(self, *args): - raise TypeError("cannot instantiate %r" % (self.__class__,)) - - @classmethod - def _newp(cls, init): - raise TypeError("expected a pointer or array ctype, got '%s'" - % (cls._get_c_name(),)) - - @staticmethod - def _to_ctypes(value): - raise TypeError - - @classmethod - def _arg_to_ctypes(cls, *value): - try: - ctype = cls._ctype - except AttributeError: - raise TypeError("cannot create an instance of %r" % (cls,)) - if value: - res = cls._to_ctypes(*value) - if not isinstance(res, ctype): - res = cls._ctype(res) - else: - res = cls._ctype() - return res - - @classmethod - def _create_ctype_obj(cls, init): - if init is None: - return cls._arg_to_ctypes() - else: - return cls._arg_to_ctypes(init) - - @staticmethod - def _from_ctypes(ctypes_value): - raise TypeError - - @classmethod - def _get_c_name(cls, replace_with=''): - return cls._reftypename.replace(' &', replace_with) - - @classmethod - def _fix_class(cls): - cls.__name__ = 'CData<%s>' % (cls._get_c_name(),) - cls.__qualname__ = 'CData<%s>' % (cls._get_c_name(),) - cls.__module__ = 'ffi' - - def _get_own_repr(self): - raise NotImplementedError - - def _addr_repr(self, address): - if address == 0: - return 'NULL' - else: - if address < 0: - address += 1 << (8*ctypes.sizeof(ctypes.c_void_p)) - return '0x%x' % address - - def __repr__(self, c_name=None): - own = self._get_own_repr() - return '' % (c_name or self._get_c_name(), own) - - def _convert_to_address(self, BClass): - if BClass is None: - raise TypeError("cannot convert %r to an address" % ( - self._get_c_name(),)) - else: - raise TypeError("cannot convert %r to %r" % ( - self._get_c_name(), BClass._get_c_name())) - - @classmethod - def _get_size(cls): - return ctypes.sizeof(cls._ctype) - - def _get_size_of_instance(self): - return ctypes.sizeof(self._ctype) - - @classmethod - def _cast_from(cls, source): - raise TypeError("cannot cast to %r" % (cls._get_c_name(),)) - - def _cast_to_integer(self): - return self._convert_to_address(None) - - @classmethod - def _alignment(cls): - return ctypes.alignment(cls._ctype) - - def __iter__(self): - raise TypeError("cdata %r does not support iteration" % ( - self._get_c_name()),) - - def _make_cmp(name): - cmpfunc = getattr(operator, name) - def cmp(self, other): - if isinstance(other, CTypesData): - return cmpfunc(self._convert_to_address(None), - other._convert_to_address(None)) - else: - return NotImplemented - cmp.func_name = name - return cmp - - __eq__ = _make_cmp('__eq__') - __ne__ = _make_cmp('__ne__') - __lt__ = _make_cmp('__lt__') - __le__ = _make_cmp('__le__') - __gt__ = _make_cmp('__gt__') - __ge__ = _make_cmp('__ge__') - - def __hash__(self): - return hash(type(self)) ^ hash(self._convert_to_address(None)) - - def _to_string(self, maxlen): - raise TypeError("string(): %r" % (self,)) - - -class CTypesGenericPrimitive(CTypesData): - __slots__ = [] - - def __eq__(self, other): - return self is other - - def __ne__(self, other): - return self is not other - - def __hash__(self): - return object.__hash__(self) - - def _get_own_repr(self): - return repr(self._from_ctypes(self._value)) - - -class CTypesGenericArray(CTypesData): - __slots__ = [] - - @classmethod - def _newp(cls, init): - return cls(init) - - def __iter__(self): - for i in xrange(len(self)): - yield self[i] - - def _get_own_repr(self): - return self._addr_repr(ctypes.addressof(self._blob)) - - -class CTypesGenericPtr(CTypesData): - __slots__ = ['_address', '_as_ctype_ptr'] - _automatic_casts = False - kind = "pointer" - - @classmethod - def _newp(cls, init): - return cls(init) - - @classmethod - def _cast_from(cls, source): - if source is None: - address = 0 - elif isinstance(source, CTypesData): - address = source._cast_to_integer() - elif isinstance(source, (int, long)): - address = source - else: - raise TypeError("bad type for cast to %r: %r" % - (cls, type(source).__name__)) - return cls._new_pointer_at(address) - - @classmethod - def _new_pointer_at(cls, address): - self = cls.__new__(cls) - self._address = address - self._as_ctype_ptr = ctypes.cast(address, cls._ctype) - return self - - def _get_own_repr(self): - try: - return self._addr_repr(self._address) - except AttributeError: - return '???' - - def _cast_to_integer(self): - return self._address - - def __nonzero__(self): - return bool(self._address) - __bool__ = __nonzero__ - - @classmethod - def _to_ctypes(cls, value): - if not isinstance(value, CTypesData): - raise TypeError("unexpected %s object" % type(value).__name__) - address = value._convert_to_address(cls) - return ctypes.cast(address, cls._ctype) - - @classmethod - def _from_ctypes(cls, ctypes_ptr): - address = ctypes.cast(ctypes_ptr, ctypes.c_void_p).value or 0 - return cls._new_pointer_at(address) - - @classmethod - def _initialize(cls, ctypes_ptr, value): - if value: - ctypes_ptr.contents = cls._to_ctypes(value).contents - - def _convert_to_address(self, BClass): - if (BClass in (self.__class__, None) or BClass._automatic_casts - or self._automatic_casts): - return self._address - else: - return CTypesData._convert_to_address(self, BClass) - - -class CTypesBaseStructOrUnion(CTypesData): - __slots__ = ['_blob'] - - @classmethod - def _create_ctype_obj(cls, init): - # may be overridden - raise TypeError("cannot instantiate opaque type %s" % (cls,)) - - def _get_own_repr(self): - return self._addr_repr(ctypes.addressof(self._blob)) - - @classmethod - def _offsetof(cls, fieldname): - return getattr(cls._ctype, fieldname).offset - - def _convert_to_address(self, BClass): - if getattr(BClass, '_BItem', None) is self.__class__: - return ctypes.addressof(self._blob) - else: - return CTypesData._convert_to_address(self, BClass) - - @classmethod - def _from_ctypes(cls, ctypes_struct_or_union): - self = cls.__new__(cls) - self._blob = ctypes_struct_or_union - return self - - @classmethod - def _to_ctypes(cls, value): - return value._blob - - def __repr__(self, c_name=None): - return CTypesData.__repr__(self, c_name or self._get_c_name(' &')) - - -class CTypesBackend(object): - - PRIMITIVE_TYPES = { - 'char': ctypes.c_char, - 'short': ctypes.c_short, - 'int': ctypes.c_int, - 'long': ctypes.c_long, - 'long long': ctypes.c_longlong, - 'signed char': ctypes.c_byte, - 'unsigned char': ctypes.c_ubyte, - 'unsigned short': ctypes.c_ushort, - 'unsigned int': ctypes.c_uint, - 'unsigned long': ctypes.c_ulong, - 'unsigned long long': ctypes.c_ulonglong, - 'float': ctypes.c_float, - 'double': ctypes.c_double, - '_Bool': ctypes.c_bool, - } - - for _name in ['unsigned long long', 'unsigned long', - 'unsigned int', 'unsigned short', 'unsigned char']: - _size = ctypes.sizeof(PRIMITIVE_TYPES[_name]) - PRIMITIVE_TYPES['uint%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name] - if _size == ctypes.sizeof(ctypes.c_void_p): - PRIMITIVE_TYPES['uintptr_t'] = PRIMITIVE_TYPES[_name] - if _size == ctypes.sizeof(ctypes.c_size_t): - PRIMITIVE_TYPES['size_t'] = PRIMITIVE_TYPES[_name] - - for _name in ['long long', 'long', 'int', 'short', 'signed char']: - _size = ctypes.sizeof(PRIMITIVE_TYPES[_name]) - PRIMITIVE_TYPES['int%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name] - if _size == ctypes.sizeof(ctypes.c_void_p): - PRIMITIVE_TYPES['intptr_t'] = PRIMITIVE_TYPES[_name] - PRIMITIVE_TYPES['ptrdiff_t'] = PRIMITIVE_TYPES[_name] - if _size == ctypes.sizeof(ctypes.c_size_t): - PRIMITIVE_TYPES['ssize_t'] = PRIMITIVE_TYPES[_name] - - - def __init__(self): - self.RTLD_LAZY = 0 # not supported anyway by ctypes - self.RTLD_NOW = 0 - self.RTLD_GLOBAL = ctypes.RTLD_GLOBAL - self.RTLD_LOCAL = ctypes.RTLD_LOCAL - - def set_ffi(self, ffi): - self.ffi = ffi - - def _get_types(self): - return CTypesData, CTypesType - - def load_library(self, path, flags=0): - cdll = ctypes.CDLL(path, flags) - return CTypesLibrary(self, cdll) - - def new_void_type(self): - class CTypesVoid(CTypesData): - __slots__ = [] - _reftypename = 'void &' - @staticmethod - def _from_ctypes(novalue): - return None - @staticmethod - def _to_ctypes(novalue): - if novalue is not None: - raise TypeError("None expected, got %s object" % - (type(novalue).__name__,)) - return None - CTypesVoid._fix_class() - return CTypesVoid - - def new_primitive_type(self, name): - if name == 'wchar_t': - raise NotImplementedError(name) - ctype = self.PRIMITIVE_TYPES[name] - if name == 'char': - kind = 'char' - elif name in ('float', 'double'): - kind = 'float' - else: - if name in ('signed char', 'unsigned char'): - kind = 'byte' - elif name == '_Bool': - kind = 'bool' - else: - kind = 'int' - is_signed = (ctype(-1).value == -1) - # - def _cast_source_to_int(source): - if isinstance(source, (int, long, float)): - source = int(source) - elif isinstance(source, CTypesData): - source = source._cast_to_integer() - elif isinstance(source, bytes): - source = ord(source) - elif source is None: - source = 0 - else: - raise TypeError("bad type for cast to %r: %r" % - (CTypesPrimitive, type(source).__name__)) - return source - # - kind1 = kind - class CTypesPrimitive(CTypesGenericPrimitive): - __slots__ = ['_value'] - _ctype = ctype - _reftypename = '%s &' % name - kind = kind1 - - def __init__(self, value): - self._value = value - - @staticmethod - def _create_ctype_obj(init): - if init is None: - return ctype() - return ctype(CTypesPrimitive._to_ctypes(init)) - - if kind == 'int' or kind == 'byte': - @classmethod - def _cast_from(cls, source): - source = _cast_source_to_int(source) - source = ctype(source).value # cast within range - return cls(source) - def __int__(self): - return self._value - - if kind == 'bool': - @classmethod - def _cast_from(cls, source): - if not isinstance(source, (int, long, float)): - source = _cast_source_to_int(source) - return cls(bool(source)) - def __int__(self): - return self._value - - if kind == 'char': - @classmethod - def _cast_from(cls, source): - source = _cast_source_to_int(source) - source = bytechr(source & 0xFF) - return cls(source) - def __int__(self): - return ord(self._value) - - if kind == 'float': - @classmethod - def _cast_from(cls, source): - if isinstance(source, float): - pass - elif isinstance(source, CTypesGenericPrimitive): - if hasattr(source, '__float__'): - source = float(source) - else: - source = int(source) - else: - source = _cast_source_to_int(source) - source = ctype(source).value # fix precision - return cls(source) - def __int__(self): - return int(self._value) - def __float__(self): - return self._value - - _cast_to_integer = __int__ - - if kind == 'int' or kind == 'byte' or kind == 'bool': - @staticmethod - def _to_ctypes(x): - if not isinstance(x, (int, long)): - if isinstance(x, CTypesData): - x = int(x) - else: - raise TypeError("integer expected, got %s" % - type(x).__name__) - if ctype(x).value != x: - if not is_signed and x < 0: - raise OverflowError("%s: negative integer" % name) - else: - raise OverflowError("%s: integer out of bounds" - % name) - return x - - if kind == 'char': - @staticmethod - def _to_ctypes(x): - if isinstance(x, bytes) and len(x) == 1: - return x - if isinstance(x, CTypesPrimitive): # > - return x._value - raise TypeError("character expected, got %s" % - type(x).__name__) - def __nonzero__(self): - return ord(self._value) != 0 - else: - def __nonzero__(self): - return self._value != 0 - __bool__ = __nonzero__ - - if kind == 'float': - @staticmethod - def _to_ctypes(x): - if not isinstance(x, (int, long, float, CTypesData)): - raise TypeError("float expected, got %s" % - type(x).__name__) - return ctype(x).value - - @staticmethod - def _from_ctypes(value): - return getattr(value, 'value', value) - - @staticmethod - def _initialize(blob, init): - blob.value = CTypesPrimitive._to_ctypes(init) - - if kind == 'char': - def _to_string(self, maxlen): - return self._value - if kind == 'byte': - def _to_string(self, maxlen): - return chr(self._value & 0xff) - # - CTypesPrimitive._fix_class() - return CTypesPrimitive - - def new_pointer_type(self, BItem): - getbtype = self.ffi._get_cached_btype - if BItem is getbtype(model.PrimitiveType('char')): - kind = 'charp' - elif BItem in (getbtype(model.PrimitiveType('signed char')), - getbtype(model.PrimitiveType('unsigned char'))): - kind = 'bytep' - elif BItem is getbtype(model.void_type): - kind = 'voidp' - else: - kind = 'generic' - # - class CTypesPtr(CTypesGenericPtr): - __slots__ = ['_own'] - if kind == 'charp': - __slots__ += ['__as_strbuf'] - _BItem = BItem - if hasattr(BItem, '_ctype'): - _ctype = ctypes.POINTER(BItem._ctype) - _bitem_size = ctypes.sizeof(BItem._ctype) - else: - _ctype = ctypes.c_void_p - if issubclass(BItem, CTypesGenericArray): - _reftypename = BItem._get_c_name('(* &)') - else: - _reftypename = BItem._get_c_name(' * &') - - def __init__(self, init): - ctypeobj = BItem._create_ctype_obj(init) - if kind == 'charp': - self.__as_strbuf = ctypes.create_string_buffer( - ctypeobj.value + b'\x00') - self._as_ctype_ptr = ctypes.cast( - self.__as_strbuf, self._ctype) - else: - self._as_ctype_ptr = ctypes.pointer(ctypeobj) - self._address = ctypes.cast(self._as_ctype_ptr, - ctypes.c_void_p).value - self._own = True - - def __add__(self, other): - if isinstance(other, (int, long)): - return self._new_pointer_at(self._address + - other * self._bitem_size) - else: - return NotImplemented - - def __sub__(self, other): - if isinstance(other, (int, long)): - return self._new_pointer_at(self._address - - other * self._bitem_size) - elif type(self) is type(other): - return (self._address - other._address) // self._bitem_size - else: - return NotImplemented - - def __getitem__(self, index): - if getattr(self, '_own', False) and index != 0: - raise IndexError - return BItem._from_ctypes(self._as_ctype_ptr[index]) - - def __setitem__(self, index, value): - self._as_ctype_ptr[index] = BItem._to_ctypes(value) - - if kind == 'charp' or kind == 'voidp': - @classmethod - def _arg_to_ctypes(cls, *value): - if value and isinstance(value[0], bytes): - return ctypes.c_char_p(value[0]) - else: - return super(CTypesPtr, cls)._arg_to_ctypes(*value) - - if kind == 'charp' or kind == 'bytep': - def _to_string(self, maxlen): - if maxlen < 0: - maxlen = sys.maxsize - p = ctypes.cast(self._as_ctype_ptr, - ctypes.POINTER(ctypes.c_char)) - n = 0 - while n < maxlen and p[n] != b'\x00': - n += 1 - return b''.join([p[i] for i in range(n)]) - - def _get_own_repr(self): - if getattr(self, '_own', False): - return 'owning %d bytes' % ( - ctypes.sizeof(self._as_ctype_ptr.contents),) - return super(CTypesPtr, self)._get_own_repr() - # - if (BItem is self.ffi._get_cached_btype(model.void_type) or - BItem is self.ffi._get_cached_btype(model.PrimitiveType('char'))): - CTypesPtr._automatic_casts = True - # - CTypesPtr._fix_class() - return CTypesPtr - - def new_array_type(self, CTypesPtr, length): - if length is None: - brackets = ' &[]' - else: - brackets = ' &[%d]' % length - BItem = CTypesPtr._BItem - getbtype = self.ffi._get_cached_btype - if BItem is getbtype(model.PrimitiveType('char')): - kind = 'char' - elif BItem in (getbtype(model.PrimitiveType('signed char')), - getbtype(model.PrimitiveType('unsigned char'))): - kind = 'byte' - else: - kind = 'generic' - # - class CTypesArray(CTypesGenericArray): - __slots__ = ['_blob', '_own'] - if length is not None: - _ctype = BItem._ctype * length - else: - __slots__.append('_ctype') - _reftypename = BItem._get_c_name(brackets) - _declared_length = length - _CTPtr = CTypesPtr - - def __init__(self, init): - if length is None: - if isinstance(init, (int, long)): - len1 = init - init = None - elif kind == 'char' and isinstance(init, bytes): - len1 = len(init) + 1 # extra null - else: - init = tuple(init) - len1 = len(init) - self._ctype = BItem._ctype * len1 - self._blob = self._ctype() - self._own = True - if init is not None: - self._initialize(self._blob, init) - - @staticmethod - def _initialize(blob, init): - if isinstance(init, bytes): - init = [init[i:i+1] for i in range(len(init))] - else: - init = tuple(init) - if len(init) > len(blob): - raise IndexError("too many initializers") - addr = ctypes.cast(blob, ctypes.c_void_p).value - PTR = ctypes.POINTER(BItem._ctype) - itemsize = ctypes.sizeof(BItem._ctype) - for i, value in enumerate(init): - p = ctypes.cast(addr + i * itemsize, PTR) - BItem._initialize(p.contents, value) - - def __len__(self): - return len(self._blob) - - def __getitem__(self, index): - if not (0 <= index < len(self._blob)): - raise IndexError - return BItem._from_ctypes(self._blob[index]) - - def __setitem__(self, index, value): - if not (0 <= index < len(self._blob)): - raise IndexError - self._blob[index] = BItem._to_ctypes(value) - - if kind == 'char' or kind == 'byte': - def _to_string(self, maxlen): - if maxlen < 0: - maxlen = len(self._blob) - p = ctypes.cast(self._blob, - ctypes.POINTER(ctypes.c_char)) - n = 0 - while n < maxlen and p[n] != b'\x00': - n += 1 - return b''.join([p[i] for i in range(n)]) - - def _get_own_repr(self): - if getattr(self, '_own', False): - return 'owning %d bytes' % (ctypes.sizeof(self._blob),) - return super(CTypesArray, self)._get_own_repr() - - def _convert_to_address(self, BClass): - if BClass in (CTypesPtr, None) or BClass._automatic_casts: - return ctypes.addressof(self._blob) - else: - return CTypesData._convert_to_address(self, BClass) - - @staticmethod - def _from_ctypes(ctypes_array): - self = CTypesArray.__new__(CTypesArray) - self._blob = ctypes_array - return self - - @staticmethod - def _arg_to_ctypes(value): - return CTypesPtr._arg_to_ctypes(value) - - def __add__(self, other): - if isinstance(other, (int, long)): - return CTypesPtr._new_pointer_at( - ctypes.addressof(self._blob) + - other * ctypes.sizeof(BItem._ctype)) - else: - return NotImplemented - - @classmethod - def _cast_from(cls, source): - raise NotImplementedError("casting to %r" % ( - cls._get_c_name(),)) - # - CTypesArray._fix_class() - return CTypesArray - - def _new_struct_or_union(self, kind, name, base_ctypes_class): - # - class struct_or_union(base_ctypes_class): - pass - struct_or_union.__name__ = '%s_%s' % (kind, name) - kind1 = kind - # - class CTypesStructOrUnion(CTypesBaseStructOrUnion): - __slots__ = ['_blob'] - _ctype = struct_or_union - _reftypename = '%s &' % (name,) - _kind = kind = kind1 - # - CTypesStructOrUnion._fix_class() - return CTypesStructOrUnion - - def new_struct_type(self, name): - return self._new_struct_or_union('struct', name, ctypes.Structure) - - def new_union_type(self, name): - return self._new_struct_or_union('union', name, ctypes.Union) - - def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp, - totalsize=-1, totalalignment=-1, sflags=0): - if totalsize >= 0 or totalalignment >= 0: - raise NotImplementedError("the ctypes backend of CFFI does not support " - "structures completed by verify(); please " - "compile and install the _cffi_backend module.") - struct_or_union = CTypesStructOrUnion._ctype - fnames = [fname for (fname, BField, bitsize) in fields] - btypes = [BField for (fname, BField, bitsize) in fields] - bitfields = [bitsize for (fname, BField, bitsize) in fields] - # - bfield_types = {} - cfields = [] - for (fname, BField, bitsize) in fields: - if bitsize < 0: - cfields.append((fname, BField._ctype)) - bfield_types[fname] = BField - else: - cfields.append((fname, BField._ctype, bitsize)) - bfield_types[fname] = Ellipsis - if sflags & 8: - struct_or_union._pack_ = 1 - struct_or_union._fields_ = cfields - CTypesStructOrUnion._bfield_types = bfield_types - # - @staticmethod - def _create_ctype_obj(init): - result = struct_or_union() - if init is not None: - initialize(result, init) - return result - CTypesStructOrUnion._create_ctype_obj = _create_ctype_obj - # - def initialize(blob, init): - if is_union: - if len(init) > 1: - raise ValueError("union initializer: %d items given, but " - "only one supported (use a dict if needed)" - % (len(init),)) - if not isinstance(init, dict): - if isinstance(init, (bytes, unicode)): - raise TypeError("union initializer: got a str") - init = tuple(init) - if len(init) > len(fnames): - raise ValueError("too many values for %s initializer" % - CTypesStructOrUnion._get_c_name()) - init = dict(zip(fnames, init)) - addr = ctypes.addressof(blob) - for fname, value in init.items(): - BField, bitsize = name2fieldtype[fname] - assert bitsize < 0, \ - "not implemented: initializer with bit fields" - offset = CTypesStructOrUnion._offsetof(fname) - PTR = ctypes.POINTER(BField._ctype) - p = ctypes.cast(addr + offset, PTR) - BField._initialize(p.contents, value) - is_union = CTypesStructOrUnion._kind == 'union' - name2fieldtype = dict(zip(fnames, zip(btypes, bitfields))) - # - for fname, BField, bitsize in fields: - if fname == '': - raise NotImplementedError("nested anonymous structs/unions") - if hasattr(CTypesStructOrUnion, fname): - raise ValueError("the field name %r conflicts in " - "the ctypes backend" % fname) - if bitsize < 0: - def getter(self, fname=fname, BField=BField, - offset=CTypesStructOrUnion._offsetof(fname), - PTR=ctypes.POINTER(BField._ctype)): - addr = ctypes.addressof(self._blob) - p = ctypes.cast(addr + offset, PTR) - return BField._from_ctypes(p.contents) - def setter(self, value, fname=fname, BField=BField): - setattr(self._blob, fname, BField._to_ctypes(value)) - # - if issubclass(BField, CTypesGenericArray): - setter = None - if BField._declared_length == 0: - def getter(self, fname=fname, BFieldPtr=BField._CTPtr, - offset=CTypesStructOrUnion._offsetof(fname), - PTR=ctypes.POINTER(BField._ctype)): - addr = ctypes.addressof(self._blob) - p = ctypes.cast(addr + offset, PTR) - return BFieldPtr._from_ctypes(p) - # - else: - def getter(self, fname=fname, BField=BField): - return BField._from_ctypes(getattr(self._blob, fname)) - def setter(self, value, fname=fname, BField=BField): - # xxx obscure workaround - value = BField._to_ctypes(value) - oldvalue = getattr(self._blob, fname) - setattr(self._blob, fname, value) - if value != getattr(self._blob, fname): - setattr(self._blob, fname, oldvalue) - raise OverflowError("value too large for bitfield") - setattr(CTypesStructOrUnion, fname, property(getter, setter)) - # - CTypesPtr = self.ffi._get_cached_btype(model.PointerType(tp)) - for fname in fnames: - if hasattr(CTypesPtr, fname): - raise ValueError("the field name %r conflicts in " - "the ctypes backend" % fname) - def getter(self, fname=fname): - return getattr(self[0], fname) - def setter(self, value, fname=fname): - setattr(self[0], fname, value) - setattr(CTypesPtr, fname, property(getter, setter)) - - def new_function_type(self, BArgs, BResult, has_varargs): - nameargs = [BArg._get_c_name() for BArg in BArgs] - if has_varargs: - nameargs.append('...') - nameargs = ', '.join(nameargs) - # - class CTypesFunctionPtr(CTypesGenericPtr): - __slots__ = ['_own_callback', '_name'] - _ctype = ctypes.CFUNCTYPE(getattr(BResult, '_ctype', None), - *[BArg._ctype for BArg in BArgs], - use_errno=True) - _reftypename = BResult._get_c_name('(* &)(%s)' % (nameargs,)) - - def __init__(self, init, error=None): - # create a callback to the Python callable init() - import traceback - assert not has_varargs, "varargs not supported for callbacks" - if getattr(BResult, '_ctype', None) is not None: - error = BResult._from_ctypes( - BResult._create_ctype_obj(error)) - else: - error = None - def callback(*args): - args2 = [] - for arg, BArg in zip(args, BArgs): - args2.append(BArg._from_ctypes(arg)) - try: - res2 = init(*args2) - res2 = BResult._to_ctypes(res2) - except: - traceback.print_exc() - res2 = error - if issubclass(BResult, CTypesGenericPtr): - if res2: - res2 = ctypes.cast(res2, ctypes.c_void_p).value - # .value: http://bugs.python.org/issue1574593 - else: - res2 = None - #print repr(res2) - return res2 - if issubclass(BResult, CTypesGenericPtr): - # The only pointers callbacks can return are void*s: - # http://bugs.python.org/issue5710 - callback_ctype = ctypes.CFUNCTYPE( - ctypes.c_void_p, - *[BArg._ctype for BArg in BArgs], - use_errno=True) - else: - callback_ctype = CTypesFunctionPtr._ctype - self._as_ctype_ptr = callback_ctype(callback) - self._address = ctypes.cast(self._as_ctype_ptr, - ctypes.c_void_p).value - self._own_callback = init - - @staticmethod - def _initialize(ctypes_ptr, value): - if value: - raise NotImplementedError("ctypes backend: not supported: " - "initializers for function pointers") - - def __repr__(self): - c_name = getattr(self, '_name', None) - if c_name: - i = self._reftypename.index('(* &)') - if self._reftypename[i-1] not in ' )*': - c_name = ' ' + c_name - c_name = self._reftypename.replace('(* &)', c_name) - return CTypesData.__repr__(self, c_name) - - def _get_own_repr(self): - if getattr(self, '_own_callback', None) is not None: - return 'calling %r' % (self._own_callback,) - return super(CTypesFunctionPtr, self)._get_own_repr() - - def __call__(self, *args): - if has_varargs: - assert len(args) >= len(BArgs) - extraargs = args[len(BArgs):] - args = args[:len(BArgs)] - else: - assert len(args) == len(BArgs) - ctypes_args = [] - for arg, BArg in zip(args, BArgs): - ctypes_args.append(BArg._arg_to_ctypes(arg)) - if has_varargs: - for i, arg in enumerate(extraargs): - if arg is None: - ctypes_args.append(ctypes.c_void_p(0)) # NULL - continue - if not isinstance(arg, CTypesData): - raise TypeError( - "argument %d passed in the variadic part " - "needs to be a cdata object (got %s)" % - (1 + len(BArgs) + i, type(arg).__name__)) - ctypes_args.append(arg._arg_to_ctypes(arg)) - result = self._as_ctype_ptr(*ctypes_args) - return BResult._from_ctypes(result) - # - CTypesFunctionPtr._fix_class() - return CTypesFunctionPtr - - def new_enum_type(self, name, enumerators, enumvalues, CTypesInt): - assert isinstance(name, str) - reverse_mapping = dict(zip(reversed(enumvalues), - reversed(enumerators))) - # - class CTypesEnum(CTypesInt): - __slots__ = [] - _reftypename = '%s &' % name - - def _get_own_repr(self): - value = self._value - try: - return '%d: %s' % (value, reverse_mapping[value]) - except KeyError: - return str(value) - - def _to_string(self, maxlen): - value = self._value - try: - return reverse_mapping[value] - except KeyError: - return str(value) - # - CTypesEnum._fix_class() - return CTypesEnum - - def get_errno(self): - return ctypes.get_errno() - - def set_errno(self, value): - ctypes.set_errno(value) - - def string(self, b, maxlen=-1): - return b._to_string(maxlen) - - def buffer(self, bptr, size=-1): - raise NotImplementedError("buffer() with ctypes backend") - - def sizeof(self, cdata_or_BType): - if isinstance(cdata_or_BType, CTypesData): - return cdata_or_BType._get_size_of_instance() - else: - assert issubclass(cdata_or_BType, CTypesData) - return cdata_or_BType._get_size() - - def alignof(self, BType): - assert issubclass(BType, CTypesData) - return BType._alignment() - - def newp(self, BType, source): - if not issubclass(BType, CTypesData): - raise TypeError - return BType._newp(source) - - def cast(self, BType, source): - return BType._cast_from(source) - - def callback(self, BType, source, error, onerror): - assert onerror is None # XXX not implemented - return BType(source, error) - - def gcp(self, cdata, destructor): - BType = self.typeof(cdata) - - if destructor is None: - if not (hasattr(BType, '_gcp_type') and - BType._gcp_type is BType): - raise TypeError("Can remove destructor only on a object " - "previously returned by ffi.gc()") - cdata._destructor = None - return None - - try: - gcp_type = BType._gcp_type - except AttributeError: - class CTypesDataGcp(BType): - __slots__ = ['_orig', '_destructor'] - def __del__(self): - if self._destructor is not None: - self._destructor(self._orig) - gcp_type = BType._gcp_type = CTypesDataGcp - new_cdata = self.cast(gcp_type, cdata) - new_cdata._orig = cdata - new_cdata._destructor = destructor - return new_cdata - - typeof = type - - def getcname(self, BType, replace_with): - return BType._get_c_name(replace_with) - - def typeoffsetof(self, BType, fieldname, num=0): - if isinstance(fieldname, str): - if num == 0 and issubclass(BType, CTypesGenericPtr): - BType = BType._BItem - if not issubclass(BType, CTypesBaseStructOrUnion): - raise TypeError("expected a struct or union ctype") - BField = BType._bfield_types[fieldname] - if BField is Ellipsis: - raise TypeError("not supported for bitfields") - return (BField, BType._offsetof(fieldname)) - elif isinstance(fieldname, (int, long)): - if issubclass(BType, CTypesGenericArray): - BType = BType._CTPtr - if not issubclass(BType, CTypesGenericPtr): - raise TypeError("expected an array or ptr ctype") - BItem = BType._BItem - offset = BItem._get_size() * fieldname - if offset > sys.maxsize: - raise OverflowError - return (BItem, offset) - else: - raise TypeError(type(fieldname)) - - def rawaddressof(self, BTypePtr, cdata, offset=None): - if isinstance(cdata, CTypesBaseStructOrUnion): - ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) - elif isinstance(cdata, CTypesGenericPtr): - if offset is None or not issubclass(type(cdata)._BItem, - CTypesBaseStructOrUnion): - raise TypeError("unexpected cdata type") - ptr = type(cdata)._to_ctypes(cdata) - elif isinstance(cdata, CTypesGenericArray): - ptr = type(cdata)._to_ctypes(cdata) - else: - raise TypeError("expected a ") - if offset: - ptr = ctypes.cast( - ctypes.c_void_p( - ctypes.cast(ptr, ctypes.c_void_p).value + offset), - type(ptr)) - return BTypePtr._from_ctypes(ptr) - - -class CTypesLibrary(object): - - def __init__(self, backend, cdll): - self.backend = backend - self.cdll = cdll - - def load_function(self, BType, name): - c_func = getattr(self.cdll, name) - funcobj = BType._from_ctypes(c_func) - funcobj._name = name - return funcobj - - def read_variable(self, BType, name): - try: - ctypes_obj = BType._ctype.in_dll(self.cdll, name) - except AttributeError as e: - raise NotImplementedError(e) - return BType._from_ctypes(ctypes_obj) - - def write_variable(self, BType, name, value): - new_ctypes_obj = BType._to_ctypes(value) - ctypes_obj = BType._ctype.in_dll(self.cdll, name) - ctypes.memmove(ctypes.addressof(ctypes_obj), - ctypes.addressof(new_ctypes_obj), - ctypes.sizeof(BType._ctype)) diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -23,6 +23,10 @@ argument (in older versions, a copy would be made). This used to be a CPython-only optimization. +* Removed the ctypes backend. If ``_cffi_backend`` was not compiled, + you could ask (using an undocumented interface) for ``backend_ctypes`` + instead. That was never fully functional and long deprecated. + v1.7 ==== diff --git a/testing/cffi0/backend_tests.py b/testing/cffi0/test_backend.py rename from testing/cffi0/backend_tests.py rename to testing/cffi0/test_backend.py --- a/testing/cffi0/backend_tests.py +++ b/testing/cffi0/test_backend.py @@ -11,10 +11,10 @@ SIZE_OF_WCHAR = ctypes.sizeof(ctypes.c_wchar) -class BackendTests: +class TestBackend(object): def test_integer_ranges(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() for (c_type, size) in [('char', 1), ('short', 2), ('short int', 2), @@ -34,7 +34,7 @@ self._test_int_type(ffi, c_decl, size, unsigned) def test_fixedsize_int(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() for size in [1, 2, 4, 8]: self._test_int_type(ffi, 'int%d_t' % (8*size), size, False) self._test_int_type(ffi, 'uint%d_t' % (8*size), size, True) @@ -79,12 +79,12 @@ assert ffi.new(c_decl_ptr, long(max))[0] == max def test_new_unsupported_type(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() e = py.test.raises(TypeError, ffi.new, "int") assert str(e.value) == "expected a pointer or array ctype, got 'int'" def test_new_single_integer(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() p = ffi.new("int *") # similar to ffi.new("int[1]") assert p[0] == 0 p[0] = -123 @@ -94,14 +94,14 @@ assert repr(p) == "" % SIZE_OF_INT def test_new_array_no_arg(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() p = ffi.new("int[10]") # the object was zero-initialized: for i in range(10): assert p[i] == 0 def test_array_indexing(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() p = ffi.new("int[10]") p[0] = 42 p[9] = 43 @@ -113,7 +113,7 @@ py.test.raises(IndexError, "p[-1] = 44") def test_new_array_args(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() # this tries to be closer to C: where we say "int x[5] = {10, 20, ..}" # then here we must enclose the items in a list p = ffi.new("int[5]", [10, 20, 30, 40, 50]) @@ -132,7 +132,7 @@ assert repr(p) == "" % (4*SIZE_OF_INT) def test_new_array_varsize(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() p = ffi.new("int[]", 10) # a single integer is the length assert p[9] == 0 py.test.raises(IndexError, "p[10]") @@ -151,7 +151,7 @@ assert repr(p) == "" def test_pointer_init(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() n = ffi.new("int *", 24) a = ffi.new("int *[10]", [ffi.NULL, ffi.NULL, n, n, ffi.NULL]) for i in range(10): @@ -160,14 +160,14 @@ assert a[2] == a[3] == n def test_cannot_cast(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() a = ffi.new("short int[10]") e = py.test.raises(TypeError, ffi.new, "long int **", a) msg = str(e.value) assert "'short[10]'" in msg and "'long *'" in msg def test_new_pointer_to_array(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() a = ffi.new("int[4]", [100, 102, 104, 106]) p = ffi.new("int **", a) assert p[0] == ffi.cast("int *", a) @@ -180,7 +180,7 @@ # keepalive: a def test_pointer_direct(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() p = ffi.cast("int*", 0) assert p is not None assert bool(p) is False @@ -195,9 +195,11 @@ assert p[0] == 123 assert p[1] == 456 + TypeRepr = "" + def test_repr(self): typerepr = self.TypeRepr - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo { short a, b, c; };") p = ffi.cast("short unsigned int", 0) assert repr(p) == "" @@ -248,7 +250,7 @@ assert repr(ffi.typeof(q)) == typerepr % "struct foo" def test_new_array_of_array(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() p = ffi.new("int[3][4]") p[0][0] = 10 p[2][3] = 33 @@ -257,12 +259,12 @@ py.test.raises(IndexError, "p[1][-1]") def test_constructor_array_of_array(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() p = ffi.new("int[3][2]", [[10, 11], [12, 13], [14, 15]]) assert p[2][1] == 15 def test_new_array_of_pointer_1(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() n = ffi.new("int*", 99) p = ffi.new("int*[4]") p[3] = n @@ -271,7 +273,7 @@ assert a[0] == 99 def test_new_array_of_pointer_2(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() n = ffi.new("int[1]", [99]) p = ffi.new("int*[4]") p[3] = n @@ -280,7 +282,7 @@ assert a[0] == 99 def test_char(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() assert ffi.new("char*", b"\xff")[0] == b'\xff' assert ffi.new("char*")[0] == b'\x00' assert int(ffi.cast("char", 300)) == 300 - 256 @@ -317,7 +319,7 @@ py.test.skip("NotImplementedError: wchar_t") def test_wchar_t(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() self.check_wchar_t(ffi) assert ffi.new("wchar_t*", u+'x')[0] == u+'x' assert ffi.new("wchar_t*", u+'\u1234')[0] == u+'\u1234' @@ -372,7 +374,7 @@ py.test.raises(IndexError, ffi.new, "wchar_t[2]", u+"abc") def test_none_as_null_doesnt_work(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() p = ffi.new("int*[1]") assert p[0] is not None assert p[0] != None @@ -387,7 +389,7 @@ assert p[0] == ffi.NULL def test_float(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() p = ffi.new("float[]", [-2, -2.5]) assert p[0] == -2.0 assert p[1] == -2.5 @@ -412,7 +414,7 @@ assert p[0] == INF # infinite, not enough precision def test_struct_simple(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo { int a; short b, c; };") s = ffi.new("struct foo*") assert s.a == s.b == s.c == 0 @@ -431,7 +433,7 @@ py.test.raises(ValueError, ffi.new, "struct foo*", [1, 2, 3, 4]) def test_constructor_struct_from_dict(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo { int a; short b, c; };") s = ffi.new("struct foo*", {'b': 123, 'c': 456}) assert s.a == 0 @@ -440,7 +442,7 @@ py.test.raises(KeyError, ffi.new, "struct foo*", {'d': 456}) def test_struct_pointer(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo { int a; short b, c; };") s = ffi.new("struct foo*") assert s[0].a == s[0].b == s[0].c == 0 @@ -450,13 +452,13 @@ py.test.raises(IndexError, "s[1]") def test_struct_opaque(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() py.test.raises(TypeError, ffi.new, "struct baz*") p = ffi.new("struct baz **") # this works assert p[0] == ffi.NULL def test_pointer_to_struct(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo { int a; short b, c; };") s = ffi.new("struct foo *") s.a = -42 @@ -478,7 +480,7 @@ assert p[0][0].a == -46 def test_constructor_struct_of_array(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo { int a[2]; char b[3]; };") s = ffi.new("struct foo *", [[10, 11], [b'a', b'b', b'c']]) assert s.a[1] == 11 @@ -489,7 +491,7 @@ assert s.b[2] == b'c' def test_recursive_struct(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo { int value; struct foo *next; };") s = ffi.new("struct foo*") t = ffi.new("struct foo*") @@ -500,7 +502,7 @@ assert s.next.value == 456 def test_union_simple(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("union foo { int a; short b, c; };") u = ffi.new("union foo*") assert u.a == u.b == u.c == 0 @@ -515,13 +517,13 @@ assert repr(u) == "" % SIZE_OF_INT def test_union_opaque(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() py.test.raises(TypeError, ffi.new, "union baz *") u = ffi.new("union baz **") # this works assert u[0] == ffi.NULL def test_union_initializer(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("union foo { char a; int b; };") py.test.raises(TypeError, ffi.new, "union foo*", b'A') py.test.raises(TypeError, ffi.new, "union foo*", 5) @@ -536,7 +538,7 @@ assert u.b == 0 def test_sizeof_type(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef(""" struct foo { int a; short b, c, d; }; union foo { int a; short b, c, d; }; @@ -553,7 +555,7 @@ assert size == expected_size, (size, expected_size, ctype) def test_sizeof_cdata(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() assert ffi.sizeof(ffi.new("short*")) == SIZE_OF_PTR assert ffi.sizeof(ffi.cast("short", 123)) == SIZE_OF_SHORT # @@ -562,7 +564,7 @@ assert ffi.sizeof(a) == 5 * SIZE_OF_INT def test_string_from_char_pointer(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() x = ffi.new("char*", b"x") assert str(x) == repr(x) assert ffi.string(x) == b"x" @@ -570,7 +572,7 @@ py.test.raises(TypeError, ffi.new, "char*", unicode("foo")) def test_unicode_from_wchar_pointer(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() self.check_wchar_t(ffi) x = ffi.new("wchar_t*", u+"x") assert unicode(x) == unicode(repr(x)) @@ -578,7 +580,7 @@ assert ffi.string(ffi.new("wchar_t*", u+"\x00")) == u+"" def test_string_from_char_array(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() p = ffi.new("char[]", b"hello.") p[5] = b'!' assert ffi.string(p) == b"hello!" @@ -595,7 +597,7 @@ assert ffi.string(p) == b'hello' def test_string_from_wchar_array(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() self.check_wchar_t(ffi) assert ffi.string(ffi.cast("wchar_t", "x")) == u+"x" assert ffi.string(ffi.cast("wchar_t", u+"x")) == u+"x" @@ -623,7 +625,7 @@ def test_fetch_const_char_p_field(self): # 'const' is ignored so far - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo { const char *name; };") t = ffi.new("const char[]", b"testing") s = ffi.new("struct foo*", [t]) @@ -635,7 +637,7 @@ def test_fetch_const_wchar_p_field(self): # 'const' is ignored so far - ffi = FFI(backend=self.Backend()) + ffi = FFI() self.check_wchar_t(ffi) ffi.cdef("struct foo { const wchar_t *name; };") t = ffi.new("const wchar_t[]", u+"testing") @@ -646,7 +648,7 @@ assert s.name == ffi.NULL def test_voidp(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() py.test.raises(TypeError, ffi.new, "void*") p = ffi.new("void **") assert p[0] == ffi.NULL @@ -667,7 +669,7 @@ py.test.raises(TypeError, "s.r = b") # fails def test_functionptr_simple(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() py.test.raises(TypeError, ffi.callback, "int(*)(int)", 0) def cb(n): return n + 1 @@ -692,12 +694,12 @@ assert res == 46 def test_functionptr_advanced(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() t = ffi.typeof("int(*(*)(int))(int)") assert repr(t) == self.TypeRepr % "int(*(*)(int))(int)" def test_functionptr_voidptr_return(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() def cb(): return ffi.NULL p = ffi.callback("void*(*)()", cb) @@ -713,7 +715,7 @@ assert res == void_ptr def test_functionptr_intptr_return(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() def cb(): return ffi.NULL p = ffi.callback("int*(*)()", cb) @@ -735,7 +737,7 @@ assert res == int_array_ptr def test_functionptr_void_return(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() def foo(): pass foo_cb = ffi.callback("void foo()", foo) @@ -743,7 +745,7 @@ assert result is None def test_char_cast(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() p = ffi.cast("int", b'\x01') assert ffi.typeof(p) is ffi.typeof("int") assert int(p) == 1 @@ -755,7 +757,7 @@ assert int(p) == 0x81 def test_wchar_cast(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() self.check_wchar_t(ffi) p = ffi.cast("int", ffi.cast("wchar_t", u+'\u1234')) assert int(p) == 0x1234 @@ -771,7 +773,7 @@ assert int(p) == 0x1234 def test_cast_array_to_charp(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() a = ffi.new("short int[]", [0x1234, 0x5678]) p = ffi.cast("char*", a) data = b''.join([p[i] for i in range(4)]) @@ -781,7 +783,7 @@ assert data == b'\x12\x34\x56\x78' def test_cast_between_pointers(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() a = ffi.new("short int[]", [0x1234, 0x5678]) p = ffi.cast("short*", a) p2 = ffi.cast("int*", p) @@ -793,7 +795,7 @@ assert data == b'\x12\x34\x56\x78' def test_cast_pointer_and_int(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() a = ffi.new("short int[]", [0x1234, 0x5678]) l1 = ffi.cast("intptr_t", a) p = ffi.cast("short*", a) @@ -805,7 +807,7 @@ assert int(ffi.cast("intptr_t", ffi.NULL)) == 0 def test_cast_functionptr_and_int(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() def cb(n): return n + 1 a = ffi.callback("int(*)(int)", cb) @@ -817,7 +819,7 @@ assert hash(a) == hash(b) def test_callback_crash(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() def cb(n): raise Exception a = ffi.callback("int(*)(int)", cb, error=42) @@ -825,7 +827,7 @@ assert res == 42 def test_structptr_argument(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo_s { int a, b; };") def cb(p): return p[0].a * 1000 + p[0].b * 100 + p[1].a * 10 + p[1].b @@ -836,7 +838,7 @@ assert res == 5008 def test_array_argument_as_list(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo_s { int a, b; };") seen = [] def cb(argv): @@ -847,7 +849,7 @@ assert seen == [b"foobar", b"baz"] def test_cast_float(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() a = ffi.cast("float", 12) assert float(a) == 12.0 a = ffi.cast("float", 12.5) @@ -871,7 +873,7 @@ assert ffi.string(a) == b"B" def test_enum(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("enum foo { A0, B0, CC0, D0 };") assert ffi.string(ffi.cast("enum foo", 0)) == "A0" assert ffi.string(ffi.cast("enum foo", 2)) == "CC0" @@ -893,7 +895,7 @@ assert ffi.string(ffi.cast("enum baz", 0x2000)) == "B2" def test_enum_in_struct(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("enum foo { A, B, C, D }; struct bar { enum foo e; };") s = ffi.new("struct bar *") s.e = 0 @@ -914,7 +916,7 @@ py.test.raises(TypeError, "s.e = '#7'") def test_enum_non_contiguous(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("enum foo { A, B=42, C };") assert ffi.string(ffi.cast("enum foo", 0)) == "A" assert ffi.string(ffi.cast("enum foo", 42)) == "B" @@ -924,7 +926,7 @@ assert ffi.string(invalid_value) == "2" def test_enum_char_hex_oct(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef(r"enum foo{A='!', B='\'', C=0x10, D=010, E=- 0x10, F=-010};") assert ffi.string(ffi.cast("enum foo", ord('!'))) == "A" assert ffi.string(ffi.cast("enum foo", ord("'"))) == "B" @@ -934,7 +936,7 @@ assert ffi.string(ffi.cast("enum foo", -8)) == "F" def test_enum_partial(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef(r"enum foo {A, ...}; enum bar { B, C };") lib = ffi.dlopen(None) assert lib.B == 0 @@ -942,7 +944,7 @@ assert lib.C == 1 def test_array_of_struct(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo { int a, b; };") s = ffi.new("struct foo[1]") py.test.raises(AttributeError, 's.b') @@ -952,12 +954,12 @@ py.test.raises(IndexError, 's[1]') def test_pointer_to_array(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() p = ffi.new("int(**)[5]") assert repr(p) == "" % SIZE_OF_PTR def test_iterate_array(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() a = ffi.new("char[]", b"hello") assert list(a) == [b"h", b"e", b"l", b"l", b"o", b"\0"] assert list(iter(a)) == [b"h", b"e", b"l", b"l", b"o", b"\0"] @@ -968,14 +970,14 @@ py.test.raises(TypeError, list, ffi.new("int *")) def test_offsetof(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo { int a, b, c; };") assert ffi.offsetof("struct foo", "a") == 0 assert ffi.offsetof("struct foo", "b") == 4 assert ffi.offsetof("struct foo", "c") == 8 def test_offsetof_nested(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo { int a, b, c; };" "struct bar { struct foo d, e; };") assert ffi.offsetof("struct bar", "e") == 12 @@ -985,7 +987,7 @@ assert ffi.offsetof("struct bar", "e", "c") == 20 def test_offsetof_array(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() assert ffi.offsetof("int[]", 51) == 51 * ffi.sizeof("int") assert ffi.offsetof("int *", 51) == 51 * ffi.sizeof("int") ffi.cdef("struct bar { int a, b; int c[99]; };") @@ -994,14 +996,14 @@ assert ffi.offsetof("struct bar", "c", 51) == 53 * ffi.sizeof("int") def test_alignof(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo { char a; short b; char c; };") assert ffi.alignof("int") == 4 assert ffi.alignof("double") in (4, 8) assert ffi.alignof("struct foo") == 2 def test_bitfield(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo { int a:10, b:20, c:3; };") assert ffi.sizeof("struct foo") == 8 s = ffi.new("struct foo *") @@ -1021,7 +1023,7 @@ assert s.c == -4 def test_bitfield_enum(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef(""" typedef enum { AA, BB, CC } foo_e; typedef struct { foo_e f:2; } foo_s; @@ -1031,7 +1033,7 @@ assert s.f == 2 def test_anonymous_struct(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("typedef struct { int a; } foo_t;") ffi.cdef("typedef struct { char b, c; } bar_t;") f = ffi.new("foo_t *", [12345]) @@ -1043,13 +1045,13 @@ def test_struct_with_two_usages(self): for name in ['foo_s', '']: # anonymous or not - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("typedef struct %s { int a; } foo_t, *foo_p;" % name) f = ffi.new("foo_t *", [12345]) ps = ffi.new("foo_p[]", [f]) def test_pointer_arithmetic(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() s = ffi.new("short[]", list(range(100, 110))) p = ffi.cast("short *", s) assert p[2] == 102 @@ -1063,7 +1065,7 @@ assert p+1 == s+1 def test_pointer_comparison(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() s = ffi.new("short[]", list(range(100))) p = ffi.cast("short *", s) assert (p < s) is False @@ -1114,7 +1116,7 @@ assert (q != None) is True def test_no_integer_comparison(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() x = ffi.cast("int", 123) y = ffi.cast("int", 456) py.test.raises(TypeError, "x < y") @@ -1124,7 +1126,7 @@ py.test.raises(TypeError, "z < y") def test_ffi_buffer_ptr(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() a = ffi.new("short *", 100) try: b = ffi.buffer(a) @@ -1143,7 +1145,7 @@ assert a[0] == 101 def test_ffi_buffer_array(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() a = ffi.new("int[]", list(range(100, 110))) try: b = ffi.buffer(a) @@ -1160,7 +1162,7 @@ assert a[1] == 0x45 def test_ffi_buffer_ptr_size(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() a = ffi.new("short *", 0x4243) try: b = ffi.buffer(a, 1) @@ -1178,7 +1180,7 @@ assert a[0] == 0x6343 def test_ffi_buffer_array_size(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() a1 = ffi.new("int[]", list(range(100, 110))) a2 = ffi.new("int[]", list(range(100, 115))) try: @@ -1188,7 +1190,7 @@ assert ffi.buffer(a1)[:] == ffi.buffer(a2, 4*10)[:] def test_ffi_buffer_with_file(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() import tempfile, os, array fd, filename = tempfile.mkstemp() f = os.fdopen(fd, 'r+b') @@ -1208,7 +1210,7 @@ os.unlink(filename) def test_ffi_buffer_with_io(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() import io, array f = io.BytesIO() a = ffi.new("int[]", list(range(1005))) @@ -1226,7 +1228,7 @@ f.close() def test_array_in_struct(self): - ffi = FFI(backend=self.Backend()) + ffi = FFI() ffi.cdef("struct foo_s { int len; short data[5]; };") p = ffi.new("struct foo_s *") p.data[3] = 5 @@ -1234,7 +1236,7 @@ assert repr(p.data).startswith(" Author: Armin Rigo Branch: Changeset: r2748:849a40dd7925 Date: 2016-09-03 11:02 +0200 http://bitbucket.org/cffi/cffi/changeset/849a40dd7925/ Log: Fix test diff --git a/testing/cffi0/test_zintegration.py b/testing/cffi0/test_zintegration.py --- a/testing/cffi0/test_zintegration.py +++ b/testing/cffi0/test_zintegration.py @@ -166,7 +166,7 @@ setuptools.__version__ = 'development' kwds = _set_py_limited_api(Extension, {}) - assert not kwds + assert kwds['py_limited_api'] == True finally: setuptools.__version__ = orig_version From pypy.commits at gmail.com Sat Sep 3 05:36:29 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Sep 2016 02:36:29 -0700 (PDT) Subject: [pypy-commit] cffi default: Windows fun Message-ID: <57ca999d.c3f0c20a.54624.e9bf@mx.google.com> Author: Armin Rigo Branch: Changeset: r2750:d83bdf06b04f Date: 2016-09-03 11:36 +0200 http://bitbucket.org/cffi/cffi/changeset/d83bdf06b04f/ Log: Windows fun diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -1974,9 +1974,9 @@ def test_function_returns_partial_struct(): ffi = FFI() - ffi.cdef("struct a { int a; ...; }; struct a f1(int);") + ffi.cdef("struct aaa { int a; ...; }; struct aaa f1(int);") lib = verify(ffi, "test_function_returns_partial_struct", """ - struct a { int b, a, c; }; - static struct a f1(int x) { struct a s = {0}; s.a = x; return s; } + struct aaa { int b, a, c; }; + static struct aaa f1(int x) { struct aaa s = {0}; s.a = x; return s; } """) assert lib.f1(52).a == 52 From pypy.commits at gmail.com Sat Sep 3 04:32:11 2016 From: pypy.commits at gmail.com (ntruessel) Date: Sat, 03 Sep 2016 01:32:11 -0700 (PDT) Subject: [pypy-commit] pypy quad-color-gc: Claim QCGC supports rweakref to make pypy executable useable Message-ID: <57ca8a8b.81a2c20a.d8de6.de70@mx.google.com> Author: Nicolas Truessel Branch: quad-color-gc Changeset: r86853:9c2ceea892ed Date: 2016-09-03 10:31 +0200 http://bitbucket.org/pypy/pypy/changeset/9c2ceea892ed/ Log: Claim QCGC supports rweakref to make pypy executable useable diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -79,7 +79,7 @@ ("translation.gcrootfinder", "qcgc"), ("translation.gcremovetypeptr", True), ("translation.thread", False), - ("translation.rweakref", False)], + ("translation.rweakref", True)], # XXX "minimark": [("translation.gctransformer", "framework")], "incminimark": [("translation.gctransformer", "framework")], }, From pypy.commits at gmail.com Sat Sep 3 06:36:25 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Sep 2016 03:36:25 -0700 (PDT) Subject: [pypy-commit] cffi default: If we say Py_LIMITED_API and we're compiling with a debug version of Message-ID: <57caa7a9.11051c0a.8851f.9c7e@mx.google.com> Author: Armin Rigo Branch: Changeset: r2751:0ff89f851d2c Date: 2016-09-03 12:36 +0200 http://bitbucket.org/cffi/cffi/changeset/0ff89f851d2c/ Log: If we say Py_LIMITED_API and we're compiling with a debug version of CPython, "#include " crashes. Work around it the hard way. diff --git a/cffi/_cffi_include.h b/cffi/_cffi_include.h --- a/cffi/_cffi_include.h +++ b/cffi/_cffi_include.h @@ -1,4 +1,20 @@ #define _CFFI_ + +/* We try to define Py_LIMITED_API before including Python.h. + + Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and + Py_REF_DEBUG are not defined. This is a best-effort approximation: + we can learn about Py_DEBUG from pyconfig.h, but it is unclear if + the same works for the other two macros. Py_DEBUG implies them, + but not the other way around. +*/ +#ifndef _CFFI_USE_EMBEDDING +# include +# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) +# define Py_LIMITED_API +# endif +#endif + #include #ifdef __cplusplus extern "C" { diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -275,8 +275,8 @@ def write_c_source_to_f(self, f, preamble): self._f = f prnt = self._prnt - if self.ffi._embedding is None: - prnt('#define Py_LIMITED_API') + if self.ffi._embedding is not None: + prnt('#define _CFFI_USE_EMBEDDING') # # first the '#include' (actually done by inlining the file's content) lines = self._rel_readlines('_cffi_include.h') From pypy.commits at gmail.com Sat Sep 3 06:39:21 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Sep 2016 03:39:21 -0700 (PDT) Subject: [pypy-commit] cffi default: Document 0ff89f851d2c Message-ID: <57caa859.09afc20a.e370d.0052@mx.google.com> Author: Armin Rigo Branch: Changeset: r2752:02ca935a5c3f Date: 2016-09-03 12:39 +0200 http://bitbucket.org/cffi/cffi/changeset/02ca935a5c3f/ Log: Document 0ff89f851d2c diff --git a/doc/source/cdef.rst b/doc/source/cdef.rst --- a/doc/source/cdef.rst +++ b/doc/source/cdef.rst @@ -529,7 +529,9 @@ the same version of CPython x.y). However, the standard ``distutils`` package will still produce a file called e.g. ``NAME.cpython-35m-x86_64-linux-gnu.so``. You can manually rename it to -``NAME.abi3.so``, or use setuptools version 26 or later. +``NAME.abi3.so``, or use setuptools version 26 or later. Also, note +that compiling with a debug version of Python will not actually define +``Py_LIMITED_API``, as doing so makes ``Python.h`` unhappy. **ffibuilder.compile(tmpdir='.', verbose=False):** explicitly generate the .py or .c file, From pypy.commits at gmail.com Sat Sep 3 13:22:05 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Sep 2016 10:22:05 -0700 (PDT) Subject: [pypy-commit] cffi default: - add ffi.compile(debug=flag) Message-ID: <57cb06bd.81091c0a.d9c91.1d6e@mx.google.com> Author: Armin Rigo Branch: Changeset: r2753:ecd17895c8df Date: 2016-09-03 19:21 +0200 http://bitbucket.org/cffi/cffi/changeset/ecd17895c8df/ Log: - add ffi.compile(debug=flag) - this small addition, and a few others not in pypy 5.4, will make the next cffi release v1.8.1 diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -646,7 +646,7 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0, target=None): + def compile(self, tmpdir='.', verbose=0, target=None, debug=None): """The 'target' argument gives the final file name of the compiled DLL. Use '*' to force distutils' choice, suitable for regular CPython C API modules. Use a file name ending in '.*' @@ -663,7 +663,7 @@ module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, target=target, source_extension=source_extension, - compiler_verbose=verbose, **kwds) + compiler_verbose=verbose, debug=debug, **kwds) def init_once(self, func, tag): # Read _init_once_cache[tag], which is either (False, lock) if diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -21,12 +21,12 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, debug=None): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, debug) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +36,7 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _build(tmpdir, ext, compiler_verbose=0, debug=None): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -44,6 +44,9 @@ dist = Distribution({'ext_modules': [ext]}) dist.parse_config_files() options = dist.get_option_dict('build_ext') + if debug is None: + debug = sys.flags.debug + options['debug'] = ('ffiplatform', debug) options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) options['build_temp'] = ('ffiplatform', tmpdir) diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -1431,7 +1431,7 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, target=None, **kwds): + compiler_verbose=1, target=None, debug=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: @@ -1467,7 +1467,8 @@ if target != '*': _patch_for_target(patchlist, target) os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, + compiler_verbose, debug) finally: os.chdir(cwd) _unpatch_meths(patchlist) diff --git a/doc/source/cdef.rst b/doc/source/cdef.rst --- a/doc/source/cdef.rst +++ b/doc/source/cdef.rst @@ -533,7 +533,7 @@ that compiling with a debug version of Python will not actually define ``Py_LIMITED_API``, as doing so makes ``Python.h`` unhappy. -**ffibuilder.compile(tmpdir='.', verbose=False):** +**ffibuilder.compile(tmpdir='.', verbose=False, debug=None):** explicitly generate the .py or .c file, and (if .c) compile it. The output file is (or are) put in the directory given by ``tmpdir``. In the examples given here, we use @@ -548,6 +548,13 @@ compiler. (This parameter might be changed to True by default in a future release.) +*New in version 1.8.1:* ``debug`` argument. If set to a bool, it +controls whether the C code is compiled in debug mode or not. The +default None means to use the host Python's ``sys.flags.debug``. +Starting with version 1.8.1, if you are running a debug-mode Python, the +C code is thus compiled in debug mode by default (note that it is anyway +necessary to do so on Windows). + **ffibuilder.emit_python_code(filename):** generate the given .py file (same as ``ffibuilder.compile()`` for ABI mode, with an explicitly-named file to write). If you choose, you can include this .py file pre-packaged in diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -3,8 +3,8 @@ ====================== -v1.8 -==== +v1.8.1 +====== * CPython 3.x: experimental: the generated C extension modules now use the "limited API", which means that, as a compiled .so/.dll, it should @@ -13,6 +13,18 @@ name, you can rename it manually to ``NAME.abi3.so``, or use the very recent setuptools 26. +* Removed the ctypes backend. If ``_cffi_backend`` was not compiled, + you could ask (using an undocumented interface) for ``backend_ctypes`` + instead. That was never fully functional and long deprecated. + +* Added ``ffi.compile(debug=...)``, similar to ``python setup.py build + --debug`` but defaulting to True if we are running a debugging + version of Python itself. + + +v1.8 +==== + * Removed the restriction that ``ffi.from_buffer()`` cannot be used on byte strings. Now you can get a ``char *`` out of a byte string, which is valid as long as the string object is kept alive. (But @@ -23,10 +35,6 @@ argument (in older versions, a copy would be made). This used to be a CPython-only optimization. -* Removed the ctypes backend. If ``_cffi_backend`` was not compiled, - you could ask (using an undocumented interface) for ``backend_ctypes`` - instead. That was never fully functional and long deprecated. - v1.7 ==== From pypy.commits at gmail.com Sat Sep 3 13:26:09 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Sep 2016 10:26:09 -0700 (PDT) Subject: [pypy-commit] cffi default: Bump version number to 1.8.1 Message-ID: <57cb07b1.8bd51c0a.5659f.03d6@mx.google.com> Author: Armin Rigo Branch: Changeset: r2754:90e2e3ee4411 Date: 2016-09-03 19:25 +0200 http://bitbucket.org/cffi/cffi/changeset/90e2e3ee4411/ Log: Bump version number to 1.8.1 diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -2,7 +2,7 @@ #include #include "structmember.h" -#define CFFI_VERSION "1.8.0" +#define CFFI_VERSION "1.8.1" #ifdef MS_WIN32 #include diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -12,7 +12,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.8.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.8.1", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.8.0" -__version_info__ = (1, 8, 0) +__version__ = "1.8.1" +__version_info__ = (1, 8, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.8.0" + "\ncompiled with cffi version: 1.8.1" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '1.8' # The full version, including alpha/beta/rc tags. -release = '1.8.0' +release = '1.8.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -51,7 +51,7 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-1.8.0.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-1.8.1.tar.gz - MD5: ... diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -144,7 +144,7 @@ `Mailing list `_ """, - version='1.8.0', + version='1.8.1', packages=['cffi'] if cpython else [], package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h', '_embedding.h']} From pypy.commits at gmail.com Sat Sep 3 14:04:39 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Sep 2016 11:04:39 -0700 (PDT) Subject: [pypy-commit] cffi default: Backed out changeset 0087e2aec9ef Message-ID: <57cb10b7.88cb1c0a.267a7.25f6@mx.google.com> Author: Armin Rigo Branch: Changeset: r2755:e8e4775048a1 Date: 2016-09-03 19:29 +0200 http://bitbucket.org/cffi/cffi/changeset/e8e4775048a1/ Log: Backed out changeset 0087e2aec9ef Un-kill the ctypes backend. Issue #282 for a justification. diff too long, truncating to 2000 out of 5541 lines diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -46,21 +46,20 @@ ''' def __init__(self, backend=None): - """Create an FFI instance. - - The 'backend' argument is not used any more and must be set to None. - It is still present only so that 'FFI(None)' still works, and - for a few tests. + """Create an FFI instance. The 'backend' argument is used to + select a non-default backend, mostly for tests. """ from . import cparser, model - if backend is None: - # You need the corresponding version of PyPy, or CPython - # with the '_cffi_backend' C extension module compiled. + # You need PyPy (>= 2.0 beta), or a CPython (>= 2.6) with + # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ assert backend.__version__ == __version__, \ "version mismatch, %s != %s" % (backend.__version__, __version__) + # (If you insist you can also try to pass the option + # 'backend=backend_ctypes.CTypesBackend()', but don't + # rely on it! It's probably not going to work well.) self._backend = backend self._lock = allocate_lock() @@ -76,6 +75,8 @@ self._init_once_cache = {} self._cdef_version = None self._embedding = None + if hasattr(backend, 'set_ffi'): + backend.set_ffi(self) for name in backend.__dict__: if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) @@ -83,10 +84,15 @@ with self._lock: self.BVoidP = self._get_cached_btype(model.voidp_type) self.BCharA = self._get_cached_btype(model.char_array_type) - # attach these constants to the class - if not hasattr(FFI, 'NULL'): - FFI.NULL = self.cast(self.BVoidP, 0) - FFI.CData, FFI.CType = backend._get_types() + if isinstance(backend, types.ModuleType): + # _cffi_backend: attach these constants to the class + if not hasattr(FFI, 'NULL'): + FFI.NULL = self.cast(self.BVoidP, 0) + FFI.CData, FFI.CType = backend._get_types() + else: + # ctypes backend: attach these constants to the instance + self.NULL = self.cast(self.BVoidP, 0) + self.CData, self.CType = backend._get_types() def cdef(self, csource, override=False, packed=False): """Parse the given C source. This registers all declared functions, diff --git a/cffi/backend_ctypes.py b/cffi/backend_ctypes.py new file mode 100644 --- /dev/null +++ b/cffi/backend_ctypes.py @@ -0,0 +1,1097 @@ +import ctypes, ctypes.util, operator, sys +from . import model + +if sys.version_info < (3,): + bytechr = chr +else: + unicode = str + long = int + xrange = range + bytechr = lambda num: bytes([num]) + +class CTypesType(type): + pass + +class CTypesData(object): + __metaclass__ = CTypesType + __slots__ = ['__weakref__'] + __name__ = '' + + def __init__(self, *args): + raise TypeError("cannot instantiate %r" % (self.__class__,)) + + @classmethod + def _newp(cls, init): + raise TypeError("expected a pointer or array ctype, got '%s'" + % (cls._get_c_name(),)) + + @staticmethod + def _to_ctypes(value): + raise TypeError + + @classmethod + def _arg_to_ctypes(cls, *value): + try: + ctype = cls._ctype + except AttributeError: + raise TypeError("cannot create an instance of %r" % (cls,)) + if value: + res = cls._to_ctypes(*value) + if not isinstance(res, ctype): + res = cls._ctype(res) + else: + res = cls._ctype() + return res + + @classmethod + def _create_ctype_obj(cls, init): + if init is None: + return cls._arg_to_ctypes() + else: + return cls._arg_to_ctypes(init) + + @staticmethod + def _from_ctypes(ctypes_value): + raise TypeError + + @classmethod + def _get_c_name(cls, replace_with=''): + return cls._reftypename.replace(' &', replace_with) + + @classmethod + def _fix_class(cls): + cls.__name__ = 'CData<%s>' % (cls._get_c_name(),) + cls.__qualname__ = 'CData<%s>' % (cls._get_c_name(),) + cls.__module__ = 'ffi' + + def _get_own_repr(self): + raise NotImplementedError + + def _addr_repr(self, address): + if address == 0: + return 'NULL' + else: + if address < 0: + address += 1 << (8*ctypes.sizeof(ctypes.c_void_p)) + return '0x%x' % address + + def __repr__(self, c_name=None): + own = self._get_own_repr() + return '' % (c_name or self._get_c_name(), own) + + def _convert_to_address(self, BClass): + if BClass is None: + raise TypeError("cannot convert %r to an address" % ( + self._get_c_name(),)) + else: + raise TypeError("cannot convert %r to %r" % ( + self._get_c_name(), BClass._get_c_name())) + + @classmethod + def _get_size(cls): + return ctypes.sizeof(cls._ctype) + + def _get_size_of_instance(self): + return ctypes.sizeof(self._ctype) + + @classmethod + def _cast_from(cls, source): + raise TypeError("cannot cast to %r" % (cls._get_c_name(),)) + + def _cast_to_integer(self): + return self._convert_to_address(None) + + @classmethod + def _alignment(cls): + return ctypes.alignment(cls._ctype) + + def __iter__(self): + raise TypeError("cdata %r does not support iteration" % ( + self._get_c_name()),) + + def _make_cmp(name): + cmpfunc = getattr(operator, name) + def cmp(self, other): + if isinstance(other, CTypesData): + return cmpfunc(self._convert_to_address(None), + other._convert_to_address(None)) + else: + return NotImplemented + cmp.func_name = name + return cmp + + __eq__ = _make_cmp('__eq__') + __ne__ = _make_cmp('__ne__') + __lt__ = _make_cmp('__lt__') + __le__ = _make_cmp('__le__') + __gt__ = _make_cmp('__gt__') + __ge__ = _make_cmp('__ge__') + + def __hash__(self): + return hash(type(self)) ^ hash(self._convert_to_address(None)) + + def _to_string(self, maxlen): + raise TypeError("string(): %r" % (self,)) + + +class CTypesGenericPrimitive(CTypesData): + __slots__ = [] + + def __eq__(self, other): + return self is other + + def __ne__(self, other): + return self is not other + + def __hash__(self): + return object.__hash__(self) + + def _get_own_repr(self): + return repr(self._from_ctypes(self._value)) + + +class CTypesGenericArray(CTypesData): + __slots__ = [] + + @classmethod + def _newp(cls, init): + return cls(init) + + def __iter__(self): + for i in xrange(len(self)): + yield self[i] + + def _get_own_repr(self): + return self._addr_repr(ctypes.addressof(self._blob)) + + +class CTypesGenericPtr(CTypesData): + __slots__ = ['_address', '_as_ctype_ptr'] + _automatic_casts = False + kind = "pointer" + + @classmethod + def _newp(cls, init): + return cls(init) + + @classmethod + def _cast_from(cls, source): + if source is None: + address = 0 + elif isinstance(source, CTypesData): + address = source._cast_to_integer() + elif isinstance(source, (int, long)): + address = source + else: + raise TypeError("bad type for cast to %r: %r" % + (cls, type(source).__name__)) + return cls._new_pointer_at(address) + + @classmethod + def _new_pointer_at(cls, address): + self = cls.__new__(cls) + self._address = address + self._as_ctype_ptr = ctypes.cast(address, cls._ctype) + return self + + def _get_own_repr(self): + try: + return self._addr_repr(self._address) + except AttributeError: + return '???' + + def _cast_to_integer(self): + return self._address + + def __nonzero__(self): + return bool(self._address) + __bool__ = __nonzero__ + + @classmethod + def _to_ctypes(cls, value): + if not isinstance(value, CTypesData): + raise TypeError("unexpected %s object" % type(value).__name__) + address = value._convert_to_address(cls) + return ctypes.cast(address, cls._ctype) + + @classmethod + def _from_ctypes(cls, ctypes_ptr): + address = ctypes.cast(ctypes_ptr, ctypes.c_void_p).value or 0 + return cls._new_pointer_at(address) + + @classmethod + def _initialize(cls, ctypes_ptr, value): + if value: + ctypes_ptr.contents = cls._to_ctypes(value).contents + + def _convert_to_address(self, BClass): + if (BClass in (self.__class__, None) or BClass._automatic_casts + or self._automatic_casts): + return self._address + else: + return CTypesData._convert_to_address(self, BClass) + + +class CTypesBaseStructOrUnion(CTypesData): + __slots__ = ['_blob'] + + @classmethod + def _create_ctype_obj(cls, init): + # may be overridden + raise TypeError("cannot instantiate opaque type %s" % (cls,)) + + def _get_own_repr(self): + return self._addr_repr(ctypes.addressof(self._blob)) + + @classmethod + def _offsetof(cls, fieldname): + return getattr(cls._ctype, fieldname).offset + + def _convert_to_address(self, BClass): + if getattr(BClass, '_BItem', None) is self.__class__: + return ctypes.addressof(self._blob) + else: + return CTypesData._convert_to_address(self, BClass) + + @classmethod + def _from_ctypes(cls, ctypes_struct_or_union): + self = cls.__new__(cls) + self._blob = ctypes_struct_or_union + return self + + @classmethod + def _to_ctypes(cls, value): + return value._blob + + def __repr__(self, c_name=None): + return CTypesData.__repr__(self, c_name or self._get_c_name(' &')) + + +class CTypesBackend(object): + + PRIMITIVE_TYPES = { + 'char': ctypes.c_char, + 'short': ctypes.c_short, + 'int': ctypes.c_int, + 'long': ctypes.c_long, + 'long long': ctypes.c_longlong, + 'signed char': ctypes.c_byte, + 'unsigned char': ctypes.c_ubyte, + 'unsigned short': ctypes.c_ushort, + 'unsigned int': ctypes.c_uint, + 'unsigned long': ctypes.c_ulong, + 'unsigned long long': ctypes.c_ulonglong, + 'float': ctypes.c_float, + 'double': ctypes.c_double, + '_Bool': ctypes.c_bool, + } + + for _name in ['unsigned long long', 'unsigned long', + 'unsigned int', 'unsigned short', 'unsigned char']: + _size = ctypes.sizeof(PRIMITIVE_TYPES[_name]) + PRIMITIVE_TYPES['uint%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_void_p): + PRIMITIVE_TYPES['uintptr_t'] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_size_t): + PRIMITIVE_TYPES['size_t'] = PRIMITIVE_TYPES[_name] + + for _name in ['long long', 'long', 'int', 'short', 'signed char']: + _size = ctypes.sizeof(PRIMITIVE_TYPES[_name]) + PRIMITIVE_TYPES['int%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_void_p): + PRIMITIVE_TYPES['intptr_t'] = PRIMITIVE_TYPES[_name] + PRIMITIVE_TYPES['ptrdiff_t'] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_size_t): + PRIMITIVE_TYPES['ssize_t'] = PRIMITIVE_TYPES[_name] + + + def __init__(self): + self.RTLD_LAZY = 0 # not supported anyway by ctypes + self.RTLD_NOW = 0 + self.RTLD_GLOBAL = ctypes.RTLD_GLOBAL + self.RTLD_LOCAL = ctypes.RTLD_LOCAL + + def set_ffi(self, ffi): + self.ffi = ffi + + def _get_types(self): + return CTypesData, CTypesType + + def load_library(self, path, flags=0): + cdll = ctypes.CDLL(path, flags) + return CTypesLibrary(self, cdll) + + def new_void_type(self): + class CTypesVoid(CTypesData): + __slots__ = [] + _reftypename = 'void &' + @staticmethod + def _from_ctypes(novalue): + return None + @staticmethod + def _to_ctypes(novalue): + if novalue is not None: + raise TypeError("None expected, got %s object" % + (type(novalue).__name__,)) + return None + CTypesVoid._fix_class() + return CTypesVoid + + def new_primitive_type(self, name): + if name == 'wchar_t': + raise NotImplementedError(name) + ctype = self.PRIMITIVE_TYPES[name] + if name == 'char': + kind = 'char' + elif name in ('float', 'double'): + kind = 'float' + else: + if name in ('signed char', 'unsigned char'): + kind = 'byte' + elif name == '_Bool': + kind = 'bool' + else: + kind = 'int' + is_signed = (ctype(-1).value == -1) + # + def _cast_source_to_int(source): + if isinstance(source, (int, long, float)): + source = int(source) + elif isinstance(source, CTypesData): + source = source._cast_to_integer() + elif isinstance(source, bytes): + source = ord(source) + elif source is None: + source = 0 + else: + raise TypeError("bad type for cast to %r: %r" % + (CTypesPrimitive, type(source).__name__)) + return source + # + kind1 = kind + class CTypesPrimitive(CTypesGenericPrimitive): + __slots__ = ['_value'] + _ctype = ctype + _reftypename = '%s &' % name + kind = kind1 + + def __init__(self, value): + self._value = value + + @staticmethod + def _create_ctype_obj(init): + if init is None: + return ctype() + return ctype(CTypesPrimitive._to_ctypes(init)) + + if kind == 'int' or kind == 'byte': + @classmethod + def _cast_from(cls, source): + source = _cast_source_to_int(source) + source = ctype(source).value # cast within range + return cls(source) + def __int__(self): + return self._value + + if kind == 'bool': + @classmethod + def _cast_from(cls, source): + if not isinstance(source, (int, long, float)): + source = _cast_source_to_int(source) + return cls(bool(source)) + def __int__(self): + return self._value + + if kind == 'char': + @classmethod + def _cast_from(cls, source): + source = _cast_source_to_int(source) + source = bytechr(source & 0xFF) + return cls(source) + def __int__(self): + return ord(self._value) + + if kind == 'float': + @classmethod + def _cast_from(cls, source): + if isinstance(source, float): + pass + elif isinstance(source, CTypesGenericPrimitive): + if hasattr(source, '__float__'): + source = float(source) + else: + source = int(source) + else: + source = _cast_source_to_int(source) + source = ctype(source).value # fix precision + return cls(source) + def __int__(self): + return int(self._value) + def __float__(self): + return self._value + + _cast_to_integer = __int__ + + if kind == 'int' or kind == 'byte' or kind == 'bool': + @staticmethod + def _to_ctypes(x): + if not isinstance(x, (int, long)): + if isinstance(x, CTypesData): + x = int(x) + else: + raise TypeError("integer expected, got %s" % + type(x).__name__) + if ctype(x).value != x: + if not is_signed and x < 0: + raise OverflowError("%s: negative integer" % name) + else: + raise OverflowError("%s: integer out of bounds" + % name) + return x + + if kind == 'char': + @staticmethod + def _to_ctypes(x): + if isinstance(x, bytes) and len(x) == 1: + return x + if isinstance(x, CTypesPrimitive): # > + return x._value + raise TypeError("character expected, got %s" % + type(x).__name__) + def __nonzero__(self): + return ord(self._value) != 0 + else: + def __nonzero__(self): + return self._value != 0 + __bool__ = __nonzero__ + + if kind == 'float': + @staticmethod + def _to_ctypes(x): + if not isinstance(x, (int, long, float, CTypesData)): + raise TypeError("float expected, got %s" % + type(x).__name__) + return ctype(x).value + + @staticmethod + def _from_ctypes(value): + return getattr(value, 'value', value) + + @staticmethod + def _initialize(blob, init): + blob.value = CTypesPrimitive._to_ctypes(init) + + if kind == 'char': + def _to_string(self, maxlen): + return self._value + if kind == 'byte': + def _to_string(self, maxlen): + return chr(self._value & 0xff) + # + CTypesPrimitive._fix_class() + return CTypesPrimitive + + def new_pointer_type(self, BItem): + getbtype = self.ffi._get_cached_btype + if BItem is getbtype(model.PrimitiveType('char')): + kind = 'charp' + elif BItem in (getbtype(model.PrimitiveType('signed char')), + getbtype(model.PrimitiveType('unsigned char'))): + kind = 'bytep' + elif BItem is getbtype(model.void_type): + kind = 'voidp' + else: + kind = 'generic' + # + class CTypesPtr(CTypesGenericPtr): + __slots__ = ['_own'] + if kind == 'charp': + __slots__ += ['__as_strbuf'] + _BItem = BItem + if hasattr(BItem, '_ctype'): + _ctype = ctypes.POINTER(BItem._ctype) + _bitem_size = ctypes.sizeof(BItem._ctype) + else: + _ctype = ctypes.c_void_p + if issubclass(BItem, CTypesGenericArray): + _reftypename = BItem._get_c_name('(* &)') + else: + _reftypename = BItem._get_c_name(' * &') + + def __init__(self, init): + ctypeobj = BItem._create_ctype_obj(init) + if kind == 'charp': + self.__as_strbuf = ctypes.create_string_buffer( + ctypeobj.value + b'\x00') + self._as_ctype_ptr = ctypes.cast( + self.__as_strbuf, self._ctype) + else: + self._as_ctype_ptr = ctypes.pointer(ctypeobj) + self._address = ctypes.cast(self._as_ctype_ptr, + ctypes.c_void_p).value + self._own = True + + def __add__(self, other): + if isinstance(other, (int, long)): + return self._new_pointer_at(self._address + + other * self._bitem_size) + else: + return NotImplemented + + def __sub__(self, other): + if isinstance(other, (int, long)): + return self._new_pointer_at(self._address - + other * self._bitem_size) + elif type(self) is type(other): + return (self._address - other._address) // self._bitem_size + else: + return NotImplemented + + def __getitem__(self, index): + if getattr(self, '_own', False) and index != 0: + raise IndexError + return BItem._from_ctypes(self._as_ctype_ptr[index]) + + def __setitem__(self, index, value): + self._as_ctype_ptr[index] = BItem._to_ctypes(value) + + if kind == 'charp' or kind == 'voidp': + @classmethod + def _arg_to_ctypes(cls, *value): + if value and isinstance(value[0], bytes): + return ctypes.c_char_p(value[0]) + else: + return super(CTypesPtr, cls)._arg_to_ctypes(*value) + + if kind == 'charp' or kind == 'bytep': + def _to_string(self, maxlen): + if maxlen < 0: + maxlen = sys.maxsize + p = ctypes.cast(self._as_ctype_ptr, + ctypes.POINTER(ctypes.c_char)) + n = 0 + while n < maxlen and p[n] != b'\x00': + n += 1 + return b''.join([p[i] for i in range(n)]) + + def _get_own_repr(self): + if getattr(self, '_own', False): + return 'owning %d bytes' % ( + ctypes.sizeof(self._as_ctype_ptr.contents),) + return super(CTypesPtr, self)._get_own_repr() + # + if (BItem is self.ffi._get_cached_btype(model.void_type) or + BItem is self.ffi._get_cached_btype(model.PrimitiveType('char'))): + CTypesPtr._automatic_casts = True + # + CTypesPtr._fix_class() + return CTypesPtr + + def new_array_type(self, CTypesPtr, length): + if length is None: + brackets = ' &[]' + else: + brackets = ' &[%d]' % length + BItem = CTypesPtr._BItem + getbtype = self.ffi._get_cached_btype + if BItem is getbtype(model.PrimitiveType('char')): + kind = 'char' + elif BItem in (getbtype(model.PrimitiveType('signed char')), + getbtype(model.PrimitiveType('unsigned char'))): + kind = 'byte' + else: + kind = 'generic' + # + class CTypesArray(CTypesGenericArray): + __slots__ = ['_blob', '_own'] + if length is not None: + _ctype = BItem._ctype * length + else: + __slots__.append('_ctype') + _reftypename = BItem._get_c_name(brackets) + _declared_length = length + _CTPtr = CTypesPtr + + def __init__(self, init): + if length is None: + if isinstance(init, (int, long)): + len1 = init + init = None + elif kind == 'char' and isinstance(init, bytes): + len1 = len(init) + 1 # extra null + else: + init = tuple(init) + len1 = len(init) + self._ctype = BItem._ctype * len1 + self._blob = self._ctype() + self._own = True + if init is not None: + self._initialize(self._blob, init) + + @staticmethod + def _initialize(blob, init): + if isinstance(init, bytes): + init = [init[i:i+1] for i in range(len(init))] + else: + init = tuple(init) + if len(init) > len(blob): + raise IndexError("too many initializers") + addr = ctypes.cast(blob, ctypes.c_void_p).value + PTR = ctypes.POINTER(BItem._ctype) + itemsize = ctypes.sizeof(BItem._ctype) + for i, value in enumerate(init): + p = ctypes.cast(addr + i * itemsize, PTR) + BItem._initialize(p.contents, value) + + def __len__(self): + return len(self._blob) + + def __getitem__(self, index): + if not (0 <= index < len(self._blob)): + raise IndexError + return BItem._from_ctypes(self._blob[index]) + + def __setitem__(self, index, value): + if not (0 <= index < len(self._blob)): + raise IndexError + self._blob[index] = BItem._to_ctypes(value) + + if kind == 'char' or kind == 'byte': + def _to_string(self, maxlen): + if maxlen < 0: + maxlen = len(self._blob) + p = ctypes.cast(self._blob, + ctypes.POINTER(ctypes.c_char)) + n = 0 + while n < maxlen and p[n] != b'\x00': + n += 1 + return b''.join([p[i] for i in range(n)]) + + def _get_own_repr(self): + if getattr(self, '_own', False): + return 'owning %d bytes' % (ctypes.sizeof(self._blob),) + return super(CTypesArray, self)._get_own_repr() + + def _convert_to_address(self, BClass): + if BClass in (CTypesPtr, None) or BClass._automatic_casts: + return ctypes.addressof(self._blob) + else: + return CTypesData._convert_to_address(self, BClass) + + @staticmethod + def _from_ctypes(ctypes_array): + self = CTypesArray.__new__(CTypesArray) + self._blob = ctypes_array + return self + + @staticmethod + def _arg_to_ctypes(value): + return CTypesPtr._arg_to_ctypes(value) + + def __add__(self, other): + if isinstance(other, (int, long)): + return CTypesPtr._new_pointer_at( + ctypes.addressof(self._blob) + + other * ctypes.sizeof(BItem._ctype)) + else: + return NotImplemented + + @classmethod + def _cast_from(cls, source): + raise NotImplementedError("casting to %r" % ( + cls._get_c_name(),)) + # + CTypesArray._fix_class() + return CTypesArray + + def _new_struct_or_union(self, kind, name, base_ctypes_class): + # + class struct_or_union(base_ctypes_class): + pass + struct_or_union.__name__ = '%s_%s' % (kind, name) + kind1 = kind + # + class CTypesStructOrUnion(CTypesBaseStructOrUnion): + __slots__ = ['_blob'] + _ctype = struct_or_union + _reftypename = '%s &' % (name,) + _kind = kind = kind1 + # + CTypesStructOrUnion._fix_class() + return CTypesStructOrUnion + + def new_struct_type(self, name): + return self._new_struct_or_union('struct', name, ctypes.Structure) + + def new_union_type(self, name): + return self._new_struct_or_union('union', name, ctypes.Union) + + def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp, + totalsize=-1, totalalignment=-1, sflags=0): + if totalsize >= 0 or totalalignment >= 0: + raise NotImplementedError("the ctypes backend of CFFI does not support " + "structures completed by verify(); please " + "compile and install the _cffi_backend module.") + struct_or_union = CTypesStructOrUnion._ctype + fnames = [fname for (fname, BField, bitsize) in fields] + btypes = [BField for (fname, BField, bitsize) in fields] + bitfields = [bitsize for (fname, BField, bitsize) in fields] + # + bfield_types = {} + cfields = [] + for (fname, BField, bitsize) in fields: + if bitsize < 0: + cfields.append((fname, BField._ctype)) + bfield_types[fname] = BField + else: + cfields.append((fname, BField._ctype, bitsize)) + bfield_types[fname] = Ellipsis + if sflags & 8: + struct_or_union._pack_ = 1 + struct_or_union._fields_ = cfields + CTypesStructOrUnion._bfield_types = bfield_types + # + @staticmethod + def _create_ctype_obj(init): + result = struct_or_union() + if init is not None: + initialize(result, init) + return result + CTypesStructOrUnion._create_ctype_obj = _create_ctype_obj + # + def initialize(blob, init): + if is_union: + if len(init) > 1: + raise ValueError("union initializer: %d items given, but " + "only one supported (use a dict if needed)" + % (len(init),)) + if not isinstance(init, dict): + if isinstance(init, (bytes, unicode)): + raise TypeError("union initializer: got a str") + init = tuple(init) + if len(init) > len(fnames): + raise ValueError("too many values for %s initializer" % + CTypesStructOrUnion._get_c_name()) + init = dict(zip(fnames, init)) + addr = ctypes.addressof(blob) + for fname, value in init.items(): + BField, bitsize = name2fieldtype[fname] + assert bitsize < 0, \ + "not implemented: initializer with bit fields" + offset = CTypesStructOrUnion._offsetof(fname) + PTR = ctypes.POINTER(BField._ctype) + p = ctypes.cast(addr + offset, PTR) + BField._initialize(p.contents, value) + is_union = CTypesStructOrUnion._kind == 'union' + name2fieldtype = dict(zip(fnames, zip(btypes, bitfields))) + # + for fname, BField, bitsize in fields: + if fname == '': + raise NotImplementedError("nested anonymous structs/unions") + if hasattr(CTypesStructOrUnion, fname): + raise ValueError("the field name %r conflicts in " + "the ctypes backend" % fname) + if bitsize < 0: + def getter(self, fname=fname, BField=BField, + offset=CTypesStructOrUnion._offsetof(fname), + PTR=ctypes.POINTER(BField._ctype)): + addr = ctypes.addressof(self._blob) + p = ctypes.cast(addr + offset, PTR) + return BField._from_ctypes(p.contents) + def setter(self, value, fname=fname, BField=BField): + setattr(self._blob, fname, BField._to_ctypes(value)) + # + if issubclass(BField, CTypesGenericArray): + setter = None + if BField._declared_length == 0: + def getter(self, fname=fname, BFieldPtr=BField._CTPtr, + offset=CTypesStructOrUnion._offsetof(fname), + PTR=ctypes.POINTER(BField._ctype)): + addr = ctypes.addressof(self._blob) + p = ctypes.cast(addr + offset, PTR) + return BFieldPtr._from_ctypes(p) + # + else: + def getter(self, fname=fname, BField=BField): + return BField._from_ctypes(getattr(self._blob, fname)) + def setter(self, value, fname=fname, BField=BField): + # xxx obscure workaround + value = BField._to_ctypes(value) + oldvalue = getattr(self._blob, fname) + setattr(self._blob, fname, value) + if value != getattr(self._blob, fname): + setattr(self._blob, fname, oldvalue) + raise OverflowError("value too large for bitfield") + setattr(CTypesStructOrUnion, fname, property(getter, setter)) + # + CTypesPtr = self.ffi._get_cached_btype(model.PointerType(tp)) + for fname in fnames: + if hasattr(CTypesPtr, fname): + raise ValueError("the field name %r conflicts in " + "the ctypes backend" % fname) + def getter(self, fname=fname): + return getattr(self[0], fname) + def setter(self, value, fname=fname): + setattr(self[0], fname, value) + setattr(CTypesPtr, fname, property(getter, setter)) + + def new_function_type(self, BArgs, BResult, has_varargs): + nameargs = [BArg._get_c_name() for BArg in BArgs] + if has_varargs: + nameargs.append('...') + nameargs = ', '.join(nameargs) + # + class CTypesFunctionPtr(CTypesGenericPtr): + __slots__ = ['_own_callback', '_name'] + _ctype = ctypes.CFUNCTYPE(getattr(BResult, '_ctype', None), + *[BArg._ctype for BArg in BArgs], + use_errno=True) + _reftypename = BResult._get_c_name('(* &)(%s)' % (nameargs,)) + + def __init__(self, init, error=None): + # create a callback to the Python callable init() + import traceback + assert not has_varargs, "varargs not supported for callbacks" + if getattr(BResult, '_ctype', None) is not None: + error = BResult._from_ctypes( + BResult._create_ctype_obj(error)) + else: + error = None + def callback(*args): + args2 = [] + for arg, BArg in zip(args, BArgs): + args2.append(BArg._from_ctypes(arg)) + try: + res2 = init(*args2) + res2 = BResult._to_ctypes(res2) + except: + traceback.print_exc() + res2 = error + if issubclass(BResult, CTypesGenericPtr): + if res2: + res2 = ctypes.cast(res2, ctypes.c_void_p).value + # .value: http://bugs.python.org/issue1574593 + else: + res2 = None + #print repr(res2) + return res2 + if issubclass(BResult, CTypesGenericPtr): + # The only pointers callbacks can return are void*s: + # http://bugs.python.org/issue5710 + callback_ctype = ctypes.CFUNCTYPE( + ctypes.c_void_p, + *[BArg._ctype for BArg in BArgs], + use_errno=True) + else: + callback_ctype = CTypesFunctionPtr._ctype + self._as_ctype_ptr = callback_ctype(callback) + self._address = ctypes.cast(self._as_ctype_ptr, + ctypes.c_void_p).value + self._own_callback = init + + @staticmethod + def _initialize(ctypes_ptr, value): + if value: + raise NotImplementedError("ctypes backend: not supported: " + "initializers for function pointers") + + def __repr__(self): + c_name = getattr(self, '_name', None) + if c_name: + i = self._reftypename.index('(* &)') + if self._reftypename[i-1] not in ' )*': + c_name = ' ' + c_name + c_name = self._reftypename.replace('(* &)', c_name) + return CTypesData.__repr__(self, c_name) + + def _get_own_repr(self): + if getattr(self, '_own_callback', None) is not None: + return 'calling %r' % (self._own_callback,) + return super(CTypesFunctionPtr, self)._get_own_repr() + + def __call__(self, *args): + if has_varargs: + assert len(args) >= len(BArgs) + extraargs = args[len(BArgs):] + args = args[:len(BArgs)] + else: + assert len(args) == len(BArgs) + ctypes_args = [] + for arg, BArg in zip(args, BArgs): + ctypes_args.append(BArg._arg_to_ctypes(arg)) + if has_varargs: + for i, arg in enumerate(extraargs): + if arg is None: + ctypes_args.append(ctypes.c_void_p(0)) # NULL + continue + if not isinstance(arg, CTypesData): + raise TypeError( + "argument %d passed in the variadic part " + "needs to be a cdata object (got %s)" % + (1 + len(BArgs) + i, type(arg).__name__)) + ctypes_args.append(arg._arg_to_ctypes(arg)) + result = self._as_ctype_ptr(*ctypes_args) + return BResult._from_ctypes(result) + # + CTypesFunctionPtr._fix_class() + return CTypesFunctionPtr + + def new_enum_type(self, name, enumerators, enumvalues, CTypesInt): + assert isinstance(name, str) + reverse_mapping = dict(zip(reversed(enumvalues), + reversed(enumerators))) + # + class CTypesEnum(CTypesInt): + __slots__ = [] + _reftypename = '%s &' % name + + def _get_own_repr(self): + value = self._value + try: + return '%d: %s' % (value, reverse_mapping[value]) + except KeyError: + return str(value) + + def _to_string(self, maxlen): + value = self._value + try: + return reverse_mapping[value] + except KeyError: + return str(value) + # + CTypesEnum._fix_class() + return CTypesEnum + + def get_errno(self): + return ctypes.get_errno() + + def set_errno(self, value): + ctypes.set_errno(value) + + def string(self, b, maxlen=-1): + return b._to_string(maxlen) + + def buffer(self, bptr, size=-1): + raise NotImplementedError("buffer() with ctypes backend") + + def sizeof(self, cdata_or_BType): + if isinstance(cdata_or_BType, CTypesData): + return cdata_or_BType._get_size_of_instance() + else: + assert issubclass(cdata_or_BType, CTypesData) + return cdata_or_BType._get_size() + + def alignof(self, BType): + assert issubclass(BType, CTypesData) + return BType._alignment() + + def newp(self, BType, source): + if not issubclass(BType, CTypesData): + raise TypeError + return BType._newp(source) + + def cast(self, BType, source): + return BType._cast_from(source) + + def callback(self, BType, source, error, onerror): + assert onerror is None # XXX not implemented + return BType(source, error) + + def gcp(self, cdata, destructor): + BType = self.typeof(cdata) + + if destructor is None: + if not (hasattr(BType, '_gcp_type') and + BType._gcp_type is BType): + raise TypeError("Can remove destructor only on a object " + "previously returned by ffi.gc()") + cdata._destructor = None + return None + + try: + gcp_type = BType._gcp_type + except AttributeError: + class CTypesDataGcp(BType): + __slots__ = ['_orig', '_destructor'] + def __del__(self): + if self._destructor is not None: + self._destructor(self._orig) + gcp_type = BType._gcp_type = CTypesDataGcp + new_cdata = self.cast(gcp_type, cdata) + new_cdata._orig = cdata + new_cdata._destructor = destructor + return new_cdata + + typeof = type + + def getcname(self, BType, replace_with): + return BType._get_c_name(replace_with) + + def typeoffsetof(self, BType, fieldname, num=0): + if isinstance(fieldname, str): + if num == 0 and issubclass(BType, CTypesGenericPtr): + BType = BType._BItem + if not issubclass(BType, CTypesBaseStructOrUnion): + raise TypeError("expected a struct or union ctype") + BField = BType._bfield_types[fieldname] + if BField is Ellipsis: + raise TypeError("not supported for bitfields") + return (BField, BType._offsetof(fieldname)) + elif isinstance(fieldname, (int, long)): + if issubclass(BType, CTypesGenericArray): + BType = BType._CTPtr + if not issubclass(BType, CTypesGenericPtr): + raise TypeError("expected an array or ptr ctype") + BItem = BType._BItem + offset = BItem._get_size() * fieldname + if offset > sys.maxsize: + raise OverflowError + return (BItem, offset) + else: + raise TypeError(type(fieldname)) + + def rawaddressof(self, BTypePtr, cdata, offset=None): + if isinstance(cdata, CTypesBaseStructOrUnion): + ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) + elif isinstance(cdata, CTypesGenericPtr): + if offset is None or not issubclass(type(cdata)._BItem, + CTypesBaseStructOrUnion): + raise TypeError("unexpected cdata type") + ptr = type(cdata)._to_ctypes(cdata) + elif isinstance(cdata, CTypesGenericArray): + ptr = type(cdata)._to_ctypes(cdata) + else: + raise TypeError("expected a ") + if offset: + ptr = ctypes.cast( + ctypes.c_void_p( + ctypes.cast(ptr, ctypes.c_void_p).value + offset), + type(ptr)) + return BTypePtr._from_ctypes(ptr) + + +class CTypesLibrary(object): + + def __init__(self, backend, cdll): + self.backend = backend + self.cdll = cdll + + def load_function(self, BType, name): + c_func = getattr(self.cdll, name) + funcobj = BType._from_ctypes(c_func) + funcobj._name = name + return funcobj + + def read_variable(self, BType, name): + try: + ctypes_obj = BType._ctype.in_dll(self.cdll, name) + except AttributeError as e: + raise NotImplementedError(e) + return BType._from_ctypes(ctypes_obj) + + def write_variable(self, BType, name, value): + new_ctypes_obj = BType._to_ctypes(value) + ctypes_obj = BType._ctype.in_dll(self.cdll, name) + ctypes.memmove(ctypes.addressof(ctypes_obj), + ctypes.addressof(new_ctypes_obj), + ctypes.sizeof(BType._ctype)) diff --git a/testing/cffi0/backend_tests.py b/testing/cffi0/backend_tests.py new file mode 100644 --- /dev/null +++ b/testing/cffi0/backend_tests.py @@ -0,0 +1,1868 @@ +import py +import platform +import sys, ctypes +from cffi import FFI, CDefError, FFIError, VerificationMissing +from testing.support import * + +SIZE_OF_INT = ctypes.sizeof(ctypes.c_int) +SIZE_OF_LONG = ctypes.sizeof(ctypes.c_long) +SIZE_OF_SHORT = ctypes.sizeof(ctypes.c_short) +SIZE_OF_PTR = ctypes.sizeof(ctypes.c_void_p) +SIZE_OF_WCHAR = ctypes.sizeof(ctypes.c_wchar) + + +class BackendTests: + + def test_integer_ranges(self): + ffi = FFI(backend=self.Backend()) + for (c_type, size) in [('char', 1), + ('short', 2), + ('short int', 2), + ('', 4), + ('int', 4), + ('long', SIZE_OF_LONG), + ('long int', SIZE_OF_LONG), + ('long long', 8), + ('long long int', 8), + ]: + for unsigned in [None, False, True]: + c_decl = {None: '', + False: 'signed ', + True: 'unsigned '}[unsigned] + c_type + if c_decl == 'char' or c_decl == '': + continue + self._test_int_type(ffi, c_decl, size, unsigned) + + def test_fixedsize_int(self): + ffi = FFI(backend=self.Backend()) + for size in [1, 2, 4, 8]: + self._test_int_type(ffi, 'int%d_t' % (8*size), size, False) + self._test_int_type(ffi, 'uint%d_t' % (8*size), size, True) + self._test_int_type(ffi, 'intptr_t', SIZE_OF_PTR, False) + self._test_int_type(ffi, 'uintptr_t', SIZE_OF_PTR, True) + self._test_int_type(ffi, 'ptrdiff_t', SIZE_OF_PTR, False) + self._test_int_type(ffi, 'size_t', SIZE_OF_PTR, True) + self._test_int_type(ffi, 'ssize_t', SIZE_OF_PTR, False) + + def _test_int_type(self, ffi, c_decl, size, unsigned): + if unsigned: + min = 0 + max = (1 << (8*size)) - 1 + else: + min = -(1 << (8*size-1)) + max = (1 << (8*size-1)) - 1 + min = int(min) + max = int(max) + p = ffi.cast(c_decl, min) + assert p != min # no __eq__(int) + assert bool(p) is bool(min) + assert int(p) == min + p = ffi.cast(c_decl, max) + assert int(p) == max + p = ffi.cast(c_decl, long(max)) + assert int(p) == max + q = ffi.cast(c_decl, min - 1) + assert ffi.typeof(q) is ffi.typeof(p) and int(q) == max + q = ffi.cast(c_decl, long(min - 1)) + assert ffi.typeof(q) is ffi.typeof(p) and int(q) == max + assert q != p + assert int(q) == int(p) + assert hash(q) != hash(p) # unlikely + c_decl_ptr = '%s *' % c_decl + py.test.raises(OverflowError, ffi.new, c_decl_ptr, min - 1) + py.test.raises(OverflowError, ffi.new, c_decl_ptr, max + 1) + py.test.raises(OverflowError, ffi.new, c_decl_ptr, long(min - 1)) + py.test.raises(OverflowError, ffi.new, c_decl_ptr, long(max + 1)) + assert ffi.new(c_decl_ptr, min)[0] == min + assert ffi.new(c_decl_ptr, max)[0] == max + assert ffi.new(c_decl_ptr, long(min))[0] == min + assert ffi.new(c_decl_ptr, long(max))[0] == max + + def test_new_unsupported_type(self): + ffi = FFI(backend=self.Backend()) + e = py.test.raises(TypeError, ffi.new, "int") + assert str(e.value) == "expected a pointer or array ctype, got 'int'" + + def test_new_single_integer(self): + ffi = FFI(backend=self.Backend()) + p = ffi.new("int *") # similar to ffi.new("int[1]") + assert p[0] == 0 + p[0] = -123 + assert p[0] == -123 + p = ffi.new("int *", -42) + assert p[0] == -42 + assert repr(p) == "" % SIZE_OF_INT + + def test_new_array_no_arg(self): + ffi = FFI(backend=self.Backend()) + p = ffi.new("int[10]") + # the object was zero-initialized: + for i in range(10): + assert p[i] == 0 + + def test_array_indexing(self): + ffi = FFI(backend=self.Backend()) + p = ffi.new("int[10]") + p[0] = 42 + p[9] = 43 + assert p[0] == 42 + assert p[9] == 43 + py.test.raises(IndexError, "p[10]") + py.test.raises(IndexError, "p[10] = 44") + py.test.raises(IndexError, "p[-1]") + py.test.raises(IndexError, "p[-1] = 44") + + def test_new_array_args(self): + ffi = FFI(backend=self.Backend()) + # this tries to be closer to C: where we say "int x[5] = {10, 20, ..}" + # then here we must enclose the items in a list + p = ffi.new("int[5]", [10, 20, 30, 40, 50]) + assert p[0] == 10 + assert p[1] == 20 + assert p[2] == 30 + assert p[3] == 40 + assert p[4] == 50 + p = ffi.new("int[4]", [25]) + assert p[0] == 25 + assert p[1] == 0 # follow C convention rather than LuaJIT's + assert p[2] == 0 + assert p[3] == 0 + p = ffi.new("int[4]", [ffi.cast("int", -5)]) + assert p[0] == -5 + assert repr(p) == "" % (4*SIZE_OF_INT) + + def test_new_array_varsize(self): + ffi = FFI(backend=self.Backend()) + p = ffi.new("int[]", 10) # a single integer is the length + assert p[9] == 0 + py.test.raises(IndexError, "p[10]") + # + py.test.raises(TypeError, ffi.new, "int[]") + # + p = ffi.new("int[]", [-6, -7]) # a list is all the items, like C + assert p[0] == -6 + assert p[1] == -7 + py.test.raises(IndexError, "p[2]") + assert repr(p) == "" % (2*SIZE_OF_INT) + # + p = ffi.new("int[]", 0) + py.test.raises(IndexError, "p[0]") + py.test.raises(ValueError, ffi.new, "int[]", -1) + assert repr(p) == "" + + def test_pointer_init(self): + ffi = FFI(backend=self.Backend()) + n = ffi.new("int *", 24) + a = ffi.new("int *[10]", [ffi.NULL, ffi.NULL, n, n, ffi.NULL]) + for i in range(10): + if i not in (2, 3): + assert a[i] == ffi.NULL + assert a[2] == a[3] == n + + def test_cannot_cast(self): + ffi = FFI(backend=self.Backend()) + a = ffi.new("short int[10]") + e = py.test.raises(TypeError, ffi.new, "long int **", a) + msg = str(e.value) + assert "'short[10]'" in msg and "'long *'" in msg + + def test_new_pointer_to_array(self): + ffi = FFI(backend=self.Backend()) + a = ffi.new("int[4]", [100, 102, 104, 106]) + p = ffi.new("int **", a) + assert p[0] == ffi.cast("int *", a) + assert p[0][2] == 104 + p = ffi.cast("int *", a) + assert p[0] == 100 + assert p[1] == 102 + assert p[2] == 104 + assert p[3] == 106 + # keepalive: a + + def test_pointer_direct(self): + ffi = FFI(backend=self.Backend()) + p = ffi.cast("int*", 0) + assert p is not None + assert bool(p) is False + assert p == ffi.cast("int*", 0) + assert p != None + assert repr(p) == "" + a = ffi.new("int[]", [123, 456]) + p = ffi.cast("int*", a) + assert bool(p) is True + assert p == ffi.cast("int*", a) + assert p != ffi.cast("int*", 0) + assert p[0] == 123 + assert p[1] == 456 + + def test_repr(self): + typerepr = self.TypeRepr + ffi = FFI(backend=self.Backend()) + ffi.cdef("struct foo { short a, b, c; };") + p = ffi.cast("short unsigned int", 0) + assert repr(p) == "" + assert repr(ffi.typeof(p)) == typerepr % "unsigned short" + p = ffi.cast("unsigned short int", 0) + assert repr(p) == "" + assert repr(ffi.typeof(p)) == typerepr % "unsigned short" + p = ffi.cast("int*", 0) + assert repr(p) == "" + assert repr(ffi.typeof(p)) == typerepr % "int *" + # + p = ffi.new("int*") + assert repr(p) == "" % SIZE_OF_INT + assert repr(ffi.typeof(p)) == typerepr % "int *" + p = ffi.new("int**") + assert repr(p) == "" % SIZE_OF_PTR + assert repr(ffi.typeof(p)) == typerepr % "int * *" + p = ffi.new("int [2]") + assert repr(p) == "" % (2*SIZE_OF_INT) + assert repr(ffi.typeof(p)) == typerepr % "int[2]" + p = ffi.new("int*[2][3]") + assert repr(p) == "" % ( + 6*SIZE_OF_PTR) + assert repr(ffi.typeof(p)) == typerepr % "int *[2][3]" + p = ffi.new("struct foo *") + assert repr(p) == "" % ( + 3*SIZE_OF_SHORT) + assert repr(ffi.typeof(p)) == typerepr % "struct foo *" + # + q = ffi.cast("short", -123) + assert repr(q) == "" + assert repr(ffi.typeof(q)) == typerepr % "short" + p = ffi.new("int*") + q = ffi.cast("short*", p) + assert repr(q).startswith(" 2: + assert ffi.new("wchar_t*", u+'\U00012345')[0] == u+'\U00012345' + else: + py.test.raises(TypeError, ffi.new, "wchar_t*", u+'\U00012345') + assert ffi.new("wchar_t*")[0] == u+'\x00' + assert int(ffi.cast("wchar_t", 300)) == 300 + assert not bool(ffi.cast("wchar_t", 0)) + assert bool(ffi.cast("wchar_t", 1)) + assert bool(ffi.cast("wchar_t", 65535)) + if SIZE_OF_WCHAR > 2: + assert bool(ffi.cast("wchar_t", 65536)) + py.test.raises(TypeError, ffi.new, "wchar_t*", 32) + py.test.raises(TypeError, ffi.new, "wchar_t*", "foo") + # + p = ffi.new("wchar_t[]", [u+'a', u+'b', u+'\u1234']) + assert len(p) == 3 + assert p[0] == u+'a' + assert p[1] == u+'b' and type(p[1]) is unicode + assert p[2] == u+'\u1234' + p[0] = u+'x' + assert p[0] == u+'x' and type(p[0]) is unicode + p[1] = u+'\u1357' + assert p[1] == u+'\u1357' + p = ffi.new("wchar_t[]", u+"abcd") + assert len(p) == 5 + assert p[4] == u+'\x00' + p = ffi.new("wchar_t[]", u+"a\u1234b") + assert len(p) == 4 + assert p[1] == u+'\u1234' + # + p = ffi.new("wchar_t[]", u+'\U00023456') + if SIZE_OF_WCHAR == 2: + assert sys.maxunicode == 0xffff + assert len(p) == 3 + assert p[0] == u+'\ud84d' + assert p[1] == u+'\udc56' + assert p[2] == u+'\x00' + else: + assert len(p) == 2 + assert p[0] == u+'\U00023456' + assert p[1] == u+'\x00' + # + p = ffi.new("wchar_t[4]", u+"ab") + assert len(p) == 4 + assert [p[i] for i in range(4)] == [u+'a', u+'b', u+'\x00', u+'\x00'] + p = ffi.new("wchar_t[2]", u+"ab") + assert len(p) == 2 + assert [p[i] for i in range(2)] == [u+'a', u+'b'] + py.test.raises(IndexError, ffi.new, "wchar_t[2]", u+"abc") + + def test_none_as_null_doesnt_work(self): + ffi = FFI(backend=self.Backend()) + p = ffi.new("int*[1]") + assert p[0] is not None + assert p[0] != None + assert p[0] == ffi.NULL + assert repr(p[0]) == "" + # + n = ffi.new("int*", 99) + p = ffi.new("int*[]", [n]) + assert p[0][0] == 99 + py.test.raises(TypeError, "p[0] = None") + p[0] = ffi.NULL + assert p[0] == ffi.NULL + + def test_float(self): + ffi = FFI(backend=self.Backend()) + p = ffi.new("float[]", [-2, -2.5]) + assert p[0] == -2.0 + assert p[1] == -2.5 + p[1] += 17.75 + assert p[1] == 15.25 + # + p = ffi.new("float*", 15.75) + assert p[0] == 15.75 + py.test.raises(TypeError, int, p) + py.test.raises(TypeError, float, p) + p[0] = 0.0 + assert bool(p) is True + # + p = ffi.new("float*", 1.1) + f = p[0] + assert f != 1.1 # because of rounding effect + assert abs(f - 1.1) < 1E-7 + # + INF = 1E200 * 1E200 + assert 1E200 != INF + p[0] = 1E200 + assert p[0] == INF # infinite, not enough precision + + def test_struct_simple(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("struct foo { int a; short b, c; };") + s = ffi.new("struct foo*") + assert s.a == s.b == s.c == 0 + s.b = -23 + assert s.b == -23 + py.test.raises(OverflowError, "s.b = 32768") + # + s = ffi.new("struct foo*", [-2, -3]) + assert s.a == -2 + assert s.b == -3 + assert s.c == 0 + py.test.raises((AttributeError, TypeError), "del s.a") + assert repr(s) == "" % ( + SIZE_OF_INT + 2 * SIZE_OF_SHORT) + # + py.test.raises(ValueError, ffi.new, "struct foo*", [1, 2, 3, 4]) + + def test_constructor_struct_from_dict(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("struct foo { int a; short b, c; };") + s = ffi.new("struct foo*", {'b': 123, 'c': 456}) + assert s.a == 0 + assert s.b == 123 + assert s.c == 456 + py.test.raises(KeyError, ffi.new, "struct foo*", {'d': 456}) + + def test_struct_pointer(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("struct foo { int a; short b, c; };") + s = ffi.new("struct foo*") + assert s[0].a == s[0].b == s[0].c == 0 + s[0].b = -23 + assert s[0].b == s.b == -23 + py.test.raises(OverflowError, "s[0].b = -32769") + py.test.raises(IndexError, "s[1]") + + def test_struct_opaque(self): + ffi = FFI(backend=self.Backend()) + py.test.raises(TypeError, ffi.new, "struct baz*") + p = ffi.new("struct baz **") # this works + assert p[0] == ffi.NULL + + def test_pointer_to_struct(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("struct foo { int a; short b, c; };") + s = ffi.new("struct foo *") + s.a = -42 + assert s[0].a == -42 + p = ffi.new("struct foo **", s) + assert p[0].a == -42 + assert p[0][0].a == -42 + p[0].a = -43 + assert s.a == -43 + assert s[0].a == -43 + p[0][0].a = -44 + assert s.a == -44 + assert s[0].a == -44 + s.a = -45 + assert p[0].a == -45 + assert p[0][0].a == -45 + s[0].a = -46 + assert p[0].a == -46 + assert p[0][0].a == -46 + + def test_constructor_struct_of_array(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("struct foo { int a[2]; char b[3]; };") + s = ffi.new("struct foo *", [[10, 11], [b'a', b'b', b'c']]) + assert s.a[1] == 11 + assert s.b[2] == b'c' + s.b[1] = b'X' + assert s.b[0] == b'a' + assert s.b[1] == b'X' + assert s.b[2] == b'c' + + def test_recursive_struct(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("struct foo { int value; struct foo *next; };") + s = ffi.new("struct foo*") + t = ffi.new("struct foo*") + s.value = 123 + s.next = t + t.value = 456 + assert s.value == 123 + assert s.next.value == 456 + + def test_union_simple(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("union foo { int a; short b, c; };") + u = ffi.new("union foo*") + assert u.a == u.b == u.c == 0 + u.b = -23 + assert u.b == -23 + assert u.a != 0 + py.test.raises(OverflowError, "u.b = 32768") + # + u = ffi.new("union foo*", [-2]) + assert u.a == -2 + py.test.raises((AttributeError, TypeError), "del u.a") + assert repr(u) == "" % SIZE_OF_INT + + def test_union_opaque(self): + ffi = FFI(backend=self.Backend()) + py.test.raises(TypeError, ffi.new, "union baz *") + u = ffi.new("union baz **") # this works + assert u[0] == ffi.NULL + + def test_union_initializer(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("union foo { char a; int b; };") + py.test.raises(TypeError, ffi.new, "union foo*", b'A') + py.test.raises(TypeError, ffi.new, "union foo*", 5) + py.test.raises(ValueError, ffi.new, "union foo*", [b'A', 5]) + u = ffi.new("union foo*", [b'A']) + assert u.a == b'A' + py.test.raises(TypeError, ffi.new, "union foo*", [1005]) + u = ffi.new("union foo*", {'b': 12345}) + assert u.b == 12345 + u = ffi.new("union foo*", []) + assert u.a == b'\x00' + assert u.b == 0 + + def test_sizeof_type(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + struct foo { int a; short b, c, d; }; + union foo { int a; short b, c, d; }; + """) + for c_type, expected_size in [ + ('char', 1), + ('unsigned int', 4), + ('char *', SIZE_OF_PTR), + ('int[5]', 20), + ('struct foo', 12), + ('union foo', 4), + ]: + size = ffi.sizeof(c_type) + assert size == expected_size, (size, expected_size, ctype) + + def test_sizeof_cdata(self): + ffi = FFI(backend=self.Backend()) + assert ffi.sizeof(ffi.new("short*")) == SIZE_OF_PTR + assert ffi.sizeof(ffi.cast("short", 123)) == SIZE_OF_SHORT + # + a = ffi.new("int[]", [10, 11, 12, 13, 14]) + assert len(a) == 5 + assert ffi.sizeof(a) == 5 * SIZE_OF_INT + + def test_string_from_char_pointer(self): + ffi = FFI(backend=self.Backend()) + x = ffi.new("char*", b"x") + assert str(x) == repr(x) + assert ffi.string(x) == b"x" + assert ffi.string(ffi.new("char*", b"\x00")) == b"" + py.test.raises(TypeError, ffi.new, "char*", unicode("foo")) + + def test_unicode_from_wchar_pointer(self): + ffi = FFI(backend=self.Backend()) + self.check_wchar_t(ffi) + x = ffi.new("wchar_t*", u+"x") + assert unicode(x) == unicode(repr(x)) + assert ffi.string(x) == u+"x" + assert ffi.string(ffi.new("wchar_t*", u+"\x00")) == u+"" + + def test_string_from_char_array(self): + ffi = FFI(backend=self.Backend()) + p = ffi.new("char[]", b"hello.") + p[5] = b'!' + assert ffi.string(p) == b"hello!" + p[6] = b'?' + assert ffi.string(p) == b"hello!?" + p[3] = b'\x00' + assert ffi.string(p) == b"hel" + assert ffi.string(p, 2) == b"he" + py.test.raises(IndexError, "p[7] = b'X'") + # + a = ffi.new("char[]", b"hello\x00world") + assert len(a) == 12 + p = ffi.cast("char *", a) + assert ffi.string(p) == b'hello' + + def test_string_from_wchar_array(self): + ffi = FFI(backend=self.Backend()) + self.check_wchar_t(ffi) + assert ffi.string(ffi.cast("wchar_t", "x")) == u+"x" + assert ffi.string(ffi.cast("wchar_t", u+"x")) == u+"x" + x = ffi.cast("wchar_t", "x") + assert str(x) == repr(x) + assert ffi.string(x) == u+"x" + # + p = ffi.new("wchar_t[]", u+"hello.") + p[5] = u+'!' + assert ffi.string(p) == u+"hello!" + p[6] = u+'\u04d2' + assert ffi.string(p) == u+"hello!\u04d2" + p[3] = u+'\x00' + assert ffi.string(p) == u+"hel" + assert ffi.string(p, 123) == u+"hel" + py.test.raises(IndexError, "p[7] = u+'X'") + # + a = ffi.new("wchar_t[]", u+"hello\x00world") + assert len(a) == 12 + p = ffi.cast("wchar_t *", a) + assert ffi.string(p) == u+'hello' + assert ffi.string(p, 123) == u+'hello' + assert ffi.string(p, 5) == u+'hello' + assert ffi.string(p, 2) == u+'he' + + def test_fetch_const_char_p_field(self): + # 'const' is ignored so far + ffi = FFI(backend=self.Backend()) + ffi.cdef("struct foo { const char *name; };") + t = ffi.new("const char[]", b"testing") + s = ffi.new("struct foo*", [t]) + assert type(s.name) not in (bytes, str, unicode) + assert ffi.string(s.name) == b"testing" + py.test.raises(TypeError, "s.name = None") + s.name = ffi.NULL + assert s.name == ffi.NULL + + def test_fetch_const_wchar_p_field(self): + # 'const' is ignored so far + ffi = FFI(backend=self.Backend()) + self.check_wchar_t(ffi) + ffi.cdef("struct foo { const wchar_t *name; };") + t = ffi.new("const wchar_t[]", u+"testing") + s = ffi.new("struct foo*", [t]) + assert type(s.name) not in (bytes, str, unicode) + assert ffi.string(s.name) == u+"testing" + s.name = ffi.NULL + assert s.name == ffi.NULL + + def test_voidp(self): + ffi = FFI(backend=self.Backend()) + py.test.raises(TypeError, ffi.new, "void*") + p = ffi.new("void **") + assert p[0] == ffi.NULL + a = ffi.new("int[]", [10, 11, 12]) + p = ffi.new("void **", a) + vp = p[0] + py.test.raises(TypeError, "vp[0]") + py.test.raises(TypeError, ffi.new, "short **", a) + # + ffi.cdef("struct foo { void *p; int *q; short *r; };") + s = ffi.new("struct foo *") + s.p = a # works + s.q = a # works + py.test.raises(TypeError, "s.r = a") # fails + b = ffi.cast("int *", a) + s.p = b # works + s.q = b # works + py.test.raises(TypeError, "s.r = b") # fails + + def test_functionptr_simple(self): + ffi = FFI(backend=self.Backend()) + py.test.raises(TypeError, ffi.callback, "int(*)(int)", 0) + def cb(n): + return n + 1 + cb.__qualname__ = 'cb' + p = ffi.callback("int(*)(int)", cb) + res = p(41) # calling an 'int(*)(int)', i.e. a function pointer + assert res == 42 and type(res) is int + res = p(ffi.cast("int", -41)) + assert res == -40 and type(res) is int + assert repr(p).startswith( + "" % ( + SIZE_OF_PTR) + py.test.raises(TypeError, "q(43)") + res = q[0](43) + assert res == 44 + q = ffi.cast("int(*)(int)", p) + assert repr(q).startswith(" Author: Armin Rigo Branch: Changeset: r2756:f959e7d3af34 Date: 2016-09-03 19:30 +0200 http://bitbucket.org/cffi/cffi/changeset/f959e7d3af34/ Log: Un-document the removal of the ctypes backend diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -13,10 +13,6 @@ name, you can rename it manually to ``NAME.abi3.so``, or use the very recent setuptools 26. -* Removed the ctypes backend. If ``_cffi_backend`` was not compiled, - you could ask (using an undocumented interface) for ``backend_ctypes`` - instead. That was never fully functional and long deprecated. - * Added ``ffi.compile(debug=...)``, similar to ``python setup.py build --debug`` but defaulting to True if we are running a debugging version of Python itself. From pypy.commits at gmail.com Sat Sep 3 14:04:43 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Sep 2016 11:04:43 -0700 (PDT) Subject: [pypy-commit] cffi default: Issue #282: probable test and fix Message-ID: <57cb10bb.229ec20a.918a5.91a1@mx.google.com> Author: Armin Rigo Branch: Changeset: r2757:5fa1d8697d3e Date: 2016-09-03 20:04 +0200 http://bitbucket.org/cffi/cffi/changeset/5fa1d8697d3e/ Log: Issue #282: probable test and fix diff --git a/cffi/backend_ctypes.py b/cffi/backend_ctypes.py --- a/cffi/backend_ctypes.py +++ b/cffi/backend_ctypes.py @@ -997,29 +997,35 @@ assert onerror is None # XXX not implemented return BType(source, error) + _weakref_cache_ref = None + def gcp(self, cdata, destructor): - BType = self.typeof(cdata) + if self._weakref_cache_ref is None: + import weakref + class MyRef(weakref.ref): + def __eq__(self, other): + return self() is other() + def __ne__(self, other): + return self() is not other() + self._weakref_cache_ref = {}, MyRef + weak_cache, MyRef = self._weakref_cache_ref if destructor is None: - if not (hasattr(BType, '_gcp_type') and - BType._gcp_type is BType): + try: + del weak_cache[MyRef(cdata)] + except KeyError: raise TypeError("Can remove destructor only on a object " "previously returned by ffi.gc()") - cdata._destructor = None return None - try: - gcp_type = BType._gcp_type - except AttributeError: - class CTypesDataGcp(BType): - __slots__ = ['_orig', '_destructor'] - def __del__(self): - if self._destructor is not None: - self._destructor(self._orig) - gcp_type = BType._gcp_type = CTypesDataGcp - new_cdata = self.cast(gcp_type, cdata) - new_cdata._orig = cdata - new_cdata._destructor = destructor + def remove(k): + cdata, destructor = weak_cache.pop(k, (None, None)) + if destructor is not None: + destructor(cdata) + + new_cdata = self.cast(self.typeof(cdata), cdata) + assert new_cdata is not cdata + weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor) return new_cdata typeof = type diff --git a/testing/cffi0/backend_tests.py b/testing/cffi0/backend_tests.py --- a/testing/cffi0/backend_tests.py +++ b/testing/cffi0/backend_tests.py @@ -1478,6 +1478,7 @@ assert p1[0] == 123 seen.append(1) q = ffi.gc(p, destructor) + assert ffi.typeof(q) is ffi.typeof(p) import gc; gc.collect() assert seen == [] del q From pypy.commits at gmail.com Sat Sep 3 14:10:55 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Sep 2016 11:10:55 -0700 (PDT) Subject: [pypy-commit] cffi default: fix Message-ID: <57cb122f.436ec20a.434cf.8c51@mx.google.com> Author: Armin Rigo Branch: Changeset: r2758:b81ca61b6de6 Date: 2016-09-03 20:10 +0200 http://bitbucket.org/cffi/cffi/changeset/b81ca61b6de6/ Log: fix diff --git a/cffi/backend_ctypes.py b/cffi/backend_ctypes.py --- a/cffi/backend_ctypes.py +++ b/cffi/backend_ctypes.py @@ -1004,9 +1004,11 @@ import weakref class MyRef(weakref.ref): def __eq__(self, other): - return self() is other() + myref = self() + return self is other or ( + myref is not None and myref is other()) def __ne__(self, other): - return self() is not other() + return not (self == other) self._weakref_cache_ref = {}, MyRef weak_cache, MyRef = self._weakref_cache_ref From pypy.commits at gmail.com Sat Sep 3 14:16:00 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Sep 2016 11:16:00 -0700 (PDT) Subject: [pypy-commit] pypy default: import cffi/b81ca61b6de6 Message-ID: <57cb1360.c398c20a.9932f.9d3b@mx.google.com> Author: Armin Rigo Branch: Changeset: r86854:fdd46325fe3d Date: 2016-09-03 20:14 +0200 http://bitbucket.org/pypy/pypy/changeset/fdd46325fe3d/ Log: import cffi/b81ca61b6de6 diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.8.0 +Version: 1.8.1 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.8.0" -__version_info__ = (1, 8, 0) +__version__ = "1.8.1" +__version_info__ = (1, 8, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -1,4 +1,20 @@ #define _CFFI_ + +/* We try to define Py_LIMITED_API before including Python.h. + + Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and + Py_REF_DEBUG are not defined. This is a best-effort approximation: + we can learn about Py_DEBUG from pyconfig.h, but it is unclear if + the same works for the other two macros. Py_DEBUG implies them, + but not the other way around. +*/ +#ifndef _CFFI_USE_EMBEDDING +# include +# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) +# define Py_LIMITED_API +# endif +#endif + #include #ifdef __cplusplus extern "C" { diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.8.0" + "\ncompiled with cffi version: 1.8.1" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -652,7 +652,7 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0, target=None): + def compile(self, tmpdir='.', verbose=0, target=None, debug=None): """The 'target' argument gives the final file name of the compiled DLL. Use '*' to force distutils' choice, suitable for regular CPython C API modules. Use a file name ending in '.*' @@ -669,7 +669,7 @@ module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, target=target, source_extension=source_extension, - compiler_verbose=verbose, **kwds) + compiler_verbose=verbose, debug=debug, **kwds) def init_once(self, func, tag): # Read _init_once_cache[tag], which is either (False, lock) if diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -997,29 +997,37 @@ assert onerror is None # XXX not implemented return BType(source, error) + _weakref_cache_ref = None + def gcp(self, cdata, destructor): - BType = self.typeof(cdata) + if self._weakref_cache_ref is None: + import weakref + class MyRef(weakref.ref): + def __eq__(self, other): + myref = self() + return self is other or ( + myref is not None and myref is other()) + def __ne__(self, other): + return not (self == other) + self._weakref_cache_ref = {}, MyRef + weak_cache, MyRef = self._weakref_cache_ref if destructor is None: - if not (hasattr(BType, '_gcp_type') and - BType._gcp_type is BType): + try: + del weak_cache[MyRef(cdata)] + except KeyError: raise TypeError("Can remove destructor only on a object " "previously returned by ffi.gc()") - cdata._destructor = None return None - try: - gcp_type = BType._gcp_type - except AttributeError: - class CTypesDataGcp(BType): - __slots__ = ['_orig', '_destructor'] - def __del__(self): - if self._destructor is not None: - self._destructor(self._orig) - gcp_type = BType._gcp_type = CTypesDataGcp - new_cdata = self.cast(gcp_type, cdata) - new_cdata._orig = cdata - new_cdata._destructor = destructor + def remove(k): + cdata, destructor = weak_cache.pop(k, (None, None)) + if destructor is not None: + destructor(cdata) + + new_cdata = self.cast(self.typeof(cdata), cdata) + assert new_cdata is not cdata + weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor) return new_cdata typeof = type diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -21,12 +21,12 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, debug=None): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, debug) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +36,7 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _build(tmpdir, ext, compiler_verbose=0, debug=None): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -44,6 +44,9 @@ dist = Distribution({'ext_modules': [ext]}) dist.parse_config_files() options = dist.get_option_dict('build_ext') + if debug is None: + debug = sys.flags.debug + options['debug'] = ('ffiplatform', debug) options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) options['build_temp'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -275,8 +275,8 @@ def write_c_source_to_f(self, f, preamble): self._f = f prnt = self._prnt - if self.ffi._embedding is None: - prnt('#define Py_LIMITED_API') + if self.ffi._embedding is not None: + prnt('#define _CFFI_USE_EMBEDDING') # # first the '#include' (actually done by inlining the file's content) lines = self._rel_readlines('_cffi_include.h') @@ -1431,7 +1431,7 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, target=None, **kwds): + compiler_verbose=1, target=None, debug=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: @@ -1467,7 +1467,8 @@ if target != '*': _patch_for_target(patchlist, target) os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, + compiler_verbose, debug) finally: os.chdir(cwd) _unpatch_meths(patchlist) diff --git a/lib_pypy/cffi/setuptools_ext.py b/lib_pypy/cffi/setuptools_ext.py --- a/lib_pypy/cffi/setuptools_ext.py +++ b/lib_pypy/cffi/setuptools_ext.py @@ -69,16 +69,36 @@ else: _add_c_module(dist, ffi, module_name, source, source_extension, kwds) +def _set_py_limited_api(Extension, kwds): + """ + Add py_limited_api to kwds if setuptools >= 26 is in use. + Do not alter the setting if it already exists. + Setuptools takes care of ignoring the flag on Python 2 and PyPy. + """ + if 'py_limited_api' not in kwds: + import setuptools + try: + setuptools_major_version = int(setuptools.__version__.partition('.')[0]) + if setuptools_major_version >= 26: + kwds['py_limited_api'] = True + except ValueError: # certain development versions of setuptools + # If we don't know the version number of setuptools, we + # try to set 'py_limited_api' anyway. At worst, we get a + # warning. + kwds['py_limited_api'] = True + return kwds def _add_c_module(dist, ffi, module_name, source, source_extension, kwds): from distutils.core import Extension - from distutils.command.build_ext import build_ext + # We are a setuptools extension. Need this build_ext for py_limited_api. + from setuptools.command.build_ext import build_ext from distutils.dir_util import mkpath from distutils import log from cffi import recompiler allsources = ['$PLACEHOLDER'] allsources.extend(kwds.pop('sources', [])) + kwds = _set_py_limited_api(Extension, kwds) ext = Extension(name=module_name, sources=allsources, **kwds) def make_mod(tmpdir, pre_run=None): diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.8.0" +VERSION = "1.8.1" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.8.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.8.1", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1479,6 +1479,7 @@ assert p1[0] == 123 seen.append(1) q = ffi.gc(p, destructor) + assert ffi.typeof(q) is ffi.typeof(p) import gc; gc.collect() assert seen == [] del q diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py @@ -149,3 +149,25 @@ p = snip_setuptools_verify2.C.getpwuid(0) assert snip_setuptools_verify2.ffi.string(p.pw_name) == b"root" ''') + + def test_set_py_limited_api(self): + from cffi.setuptools_ext import _set_py_limited_api + try: + import setuptools + orig_version = setuptools.__version__ + setuptools.__version__ = '26.0.0' + from setuptools import Extension + + kwds = _set_py_limited_api(Extension, {}) + assert kwds['py_limited_api'] == True + + setuptools.__version__ = '25.0' + kwds = _set_py_limited_api(Extension, {}) + assert not kwds + + setuptools.__version__ = 'development' + kwds = _set_py_limited_api(Extension, {}) + assert kwds['py_limited_api'] == True + + finally: + setuptools.__version__ = orig_version diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1975,9 +1975,9 @@ def test_function_returns_partial_struct(): ffi = FFI() - ffi.cdef("struct a { int a; ...; }; struct a f1(int);") + ffi.cdef("struct aaa { int a; ...; }; struct aaa f1(int);") lib = verify(ffi, "test_function_returns_partial_struct", """ - struct a { int b, a, c; }; - static struct a f1(int x) { struct a s = {0}; s.a = x; return s; } + struct aaa { int b, a, c; }; + static struct aaa f1(int x) { struct aaa s = {0}; s.a = x; return s; } """) assert lib.f1(52).a == 52 From pypy.commits at gmail.com Sat Sep 3 15:07:59 2016 From: pypy.commits at gmail.com (pjenvey) Date: Sat, 03 Sep 2016 12:07:59 -0700 (PDT) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <57cb1f8f.ca11c30a.ff1fd.ad59@mx.google.com> Author: Philip Jenvey Branch: py3k Changeset: r86855:83f439eeb461 Date: 2016-09-03 12:07 -0700 http://bitbucket.org/pypy/pypy/changeset/83f439eeb461/ Log: merge default diff --git a/_pytest/python.py b/_pytest/python.py --- a/_pytest/python.py +++ b/_pytest/python.py @@ -498,7 +498,10 @@ """ Collector for test methods. """ def collect(self): if hasinit(self.obj): - pytest.skip("class %s.%s with __init__ won't get collected" % ( + # XXX used to be skip(), but silently skipping classes + # XXX just because they have been written long ago is + # XXX imho a very, very, very bad idea + pytest.fail("class %s.%s with __init__ won't get collected" % ( self.obj.__module__, self.obj.__name__, )) diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.8.0 +Version: 1.8.1 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.8.0" -__version_info__ = (1, 8, 0) +__version__ = "1.8.1" +__version_info__ = (1, 8, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -1,4 +1,20 @@ #define _CFFI_ + +/* We try to define Py_LIMITED_API before including Python.h. + + Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and + Py_REF_DEBUG are not defined. This is a best-effort approximation: + we can learn about Py_DEBUG from pyconfig.h, but it is unclear if + the same works for the other two macros. Py_DEBUG implies them, + but not the other way around. +*/ +#ifndef _CFFI_USE_EMBEDDING +# include +# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) +# define Py_LIMITED_API +# endif +#endif + #include #ifdef __cplusplus extern "C" { diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.8.0" + "\ncompiled with cffi version: 1.8.1" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -652,7 +652,7 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0, target=None): + def compile(self, tmpdir='.', verbose=0, target=None, debug=None): """The 'target' argument gives the final file name of the compiled DLL. Use '*' to force distutils' choice, suitable for regular CPython C API modules. Use a file name ending in '.*' @@ -669,7 +669,7 @@ module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, target=target, source_extension=source_extension, - compiler_verbose=verbose, **kwds) + compiler_verbose=verbose, debug=debug, **kwds) def init_once(self, func, tag): # Read _init_once_cache[tag], which is either (False, lock) if diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -997,29 +997,37 @@ assert onerror is None # XXX not implemented return BType(source, error) + _weakref_cache_ref = None + def gcp(self, cdata, destructor): - BType = self.typeof(cdata) + if self._weakref_cache_ref is None: + import weakref + class MyRef(weakref.ref): + def __eq__(self, other): + myref = self() + return self is other or ( + myref is not None and myref is other()) + def __ne__(self, other): + return not (self == other) + self._weakref_cache_ref = {}, MyRef + weak_cache, MyRef = self._weakref_cache_ref if destructor is None: - if not (hasattr(BType, '_gcp_type') and - BType._gcp_type is BType): + try: + del weak_cache[MyRef(cdata)] + except KeyError: raise TypeError("Can remove destructor only on a object " "previously returned by ffi.gc()") - cdata._destructor = None return None - try: - gcp_type = BType._gcp_type - except AttributeError: - class CTypesDataGcp(BType): - __slots__ = ['_orig', '_destructor'] - def __del__(self): - if self._destructor is not None: - self._destructor(self._orig) - gcp_type = BType._gcp_type = CTypesDataGcp - new_cdata = self.cast(gcp_type, cdata) - new_cdata._orig = cdata - new_cdata._destructor = destructor + def remove(k): + cdata, destructor = weak_cache.pop(k, (None, None)) + if destructor is not None: + destructor(cdata) + + new_cdata = self.cast(self.typeof(cdata), cdata) + assert new_cdata is not cdata + weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor) return new_cdata typeof = type diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -21,12 +21,12 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, debug=None): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, debug) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +36,7 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _build(tmpdir, ext, compiler_verbose=0, debug=None): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -44,6 +44,9 @@ dist = Distribution({'ext_modules': [ext]}) dist.parse_config_files() options = dist.get_option_dict('build_ext') + if debug is None: + debug = sys.flags.debug + options['debug'] = ('ffiplatform', debug) options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) options['build_temp'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -275,8 +275,8 @@ def write_c_source_to_f(self, f, preamble): self._f = f prnt = self._prnt - if self.ffi._embedding is None: - prnt('#define Py_LIMITED_API') + if self.ffi._embedding is not None: + prnt('#define _CFFI_USE_EMBEDDING') # # first the '#include' (actually done by inlining the file's content) lines = self._rel_readlines('_cffi_include.h') @@ -1431,7 +1431,7 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, target=None, **kwds): + compiler_verbose=1, target=None, debug=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: @@ -1467,7 +1467,8 @@ if target != '*': _patch_for_target(patchlist, target) os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, + compiler_verbose, debug) finally: os.chdir(cwd) _unpatch_meths(patchlist) diff --git a/lib_pypy/cffi/setuptools_ext.py b/lib_pypy/cffi/setuptools_ext.py --- a/lib_pypy/cffi/setuptools_ext.py +++ b/lib_pypy/cffi/setuptools_ext.py @@ -69,16 +69,36 @@ else: _add_c_module(dist, ffi, module_name, source, source_extension, kwds) +def _set_py_limited_api(Extension, kwds): + """ + Add py_limited_api to kwds if setuptools >= 26 is in use. + Do not alter the setting if it already exists. + Setuptools takes care of ignoring the flag on Python 2 and PyPy. + """ + if 'py_limited_api' not in kwds: + import setuptools + try: + setuptools_major_version = int(setuptools.__version__.partition('.')[0]) + if setuptools_major_version >= 26: + kwds['py_limited_api'] = True + except ValueError: # certain development versions of setuptools + # If we don't know the version number of setuptools, we + # try to set 'py_limited_api' anyway. At worst, we get a + # warning. + kwds['py_limited_api'] = True + return kwds def _add_c_module(dist, ffi, module_name, source, source_extension, kwds): from distutils.core import Extension - from distutils.command.build_ext import build_ext + # We are a setuptools extension. Need this build_ext for py_limited_api. + from setuptools.command.build_ext import build_ext from distutils.dir_util import mkpath from distutils import log from cffi import recompiler allsources = ['$PLACEHOLDER'] allsources.extend(kwds.pop('sources', [])) + kwds = _set_py_limited_api(Extension, kwds) ext = Extension(name=module_name, sources=allsources, **kwds) def make_mod(tmpdir, pre_run=None): diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.8.0" +VERSION = "1.8.1" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.8.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.8.1", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -187,6 +187,43 @@ """) + def test_oldstyle_methcall(self): + def main(): + def g(): pass + class A: + def f(self): + return self.x + 1 + class I(A): + pass + class J(I): + pass + + + class B(J): + def __init__(self, x): + self.x = x + + i = 0 + b = B(1) + while i < 1000: + g() + v = b.f() # ID: meth + i += v + return i + + log = self.run(main, [], threshold=80) + loop, = log.loops_by_filename(self.filepath, is_entry_bridge=True) + assert loop.match_by_id('meth', + ''' + guard_nonnull_class(p18, ..., descr=...) + p52 = getfield_gc_r(p18, descr=...) # read map + guard_value(p52, ConstPtr(ptr53), descr=...) + p54 = getfield_gc_r(p18, descr=...) # read class + guard_value(p54, ConstPtr(ptr55), descr=...) + p56 = force_token() # done + ''') + + def test_oldstyle_newstyle_mix(self): def main(): class A: diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1479,6 +1479,7 @@ assert p1[0] == 123 seen.append(1) q = ffi.gc(p, destructor) + assert ffi.typeof(q) is ffi.typeof(p) import gc; gc.collect() assert seen == [] del q diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py @@ -149,3 +149,25 @@ p = snip_setuptools_verify2.C.getpwuid(0) assert snip_setuptools_verify2.ffi.string(p.pw_name) == b"root" ''') + + def test_set_py_limited_api(self): + from cffi.setuptools_ext import _set_py_limited_api + try: + import setuptools + orig_version = setuptools.__version__ + setuptools.__version__ = '26.0.0' + from setuptools import Extension + + kwds = _set_py_limited_api(Extension, {}) + assert kwds['py_limited_api'] == True + + setuptools.__version__ = '25.0' + kwds = _set_py_limited_api(Extension, {}) + assert not kwds + + setuptools.__version__ = 'development' + kwds = _set_py_limited_api(Extension, {}) + assert kwds['py_limited_api'] == True + + finally: + setuptools.__version__ = orig_version diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1975,9 +1975,9 @@ def test_function_returns_partial_struct(): ffi = FFI() - ffi.cdef("struct a { int a; ...; }; struct a f1(int);") + ffi.cdef("struct aaa { int a; ...; }; struct aaa f1(int);") lib = verify(ffi, "test_function_returns_partial_struct", """ - struct a { int b, a, c; }; - static struct a f1(int x) { struct a s = {0}; s.a = x; return s; } + struct aaa { int b, a, c; }; + static struct aaa f1(int x) { struct aaa s = {0}; s.a = x; return s; } """) assert lib.f1(52).a == 52 diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py b/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py @@ -194,6 +194,29 @@ _fields_ = [('t', enum)] assert isinstance(S().t, enum) + def test_no_missing_shape_to_ffi_type(self): + # whitebox test + import sys + if '__pypy__' not in sys.builtin_module_names: + skip("only for pypy's ctypes") + skip("re-enable after adding 'g' to _shape_to_ffi_type.typemap, " + "which I think needs fighting all the way up from " + "rpython.rlib.libffi") + from _ctypes.basics import _shape_to_ffi_type + from _rawffi import Array + for i in range(1, 256): + try: + Array(chr(i)) + except ValueError: + pass + else: + assert chr(i) in _shape_to_ffi_type.typemap + + @py.test.mark.xfail + def test_pointer_to_long_double(self): + import ctypes + ctypes.POINTER(ctypes.c_longdouble) + ## def test_perf(self): ## check_perf() diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -164,8 +164,15 @@ # annotations that are passed in, and don't annotate the old # graph -- it's already low-level operations! for a, s_newarg in zip(block.inputargs, cells): - s_oldarg = self.binding(a) - assert annmodel.unionof(s_oldarg, s_newarg) == s_oldarg + s_oldarg = a.annotation + # XXX: Should use s_oldarg.contains(s_newarg) but that breaks + # PyPy translation + if annmodel.unionof(s_oldarg, s_newarg) != s_oldarg: + raise annmodel.AnnotatorError( + "Late-stage annotation is not allowed to modify the " + "existing annotation for variable %s: %s" % + (a, s_oldarg)) + else: assert not self.frozen if block not in self.annotated: diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -17,7 +17,7 @@ from rpython.flowspace.model import Variable, Constant, const from rpython.flowspace.operation import op from rpython.rlib import rarithmetic -from rpython.annotator.model import AnnotatorError +from rpython.annotator.model import AnnotatorError, TLS BINARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 2]) @@ -436,6 +436,11 @@ class __extend__(pairtype(SomeFloat, SomeFloat)): def union((flt1, flt2)): + if not TLS.allow_int_to_float: + # in this mode, if one of the two is actually the + # subclass SomeInteger, complain + if isinstance(flt1, SomeInteger) or isinstance(flt2, SomeInteger): + raise UnionError(flt1, flt2) return SomeFloat() add = sub = mul = union diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -44,6 +44,7 @@ # A global attribute :-( Patch it with 'True' to enable checking of # the no_nul attribute... check_str_without_nul = False + allow_int_to_float = True TLS = State() class SomeObject(object): @@ -749,6 +750,7 @@ s1 = pair(s1, s2).union() else: # this is just a performance shortcut + # XXX: This is a lie! Grep for no_side_effects_in_union and weep. if s1 != s2: s1 = pair(s1, s2).union() return s1 diff --git a/rpython/rlib/libffi.py b/rpython/rlib/libffi.py --- a/rpython/rlib/libffi.py +++ b/rpython/rlib/libffi.py @@ -47,6 +47,8 @@ cls.ulonglong = clibffi.cast_type_to_ffitype(rffi.ULONGLONG) cls.signed = clibffi.cast_type_to_ffitype(rffi.SIGNED) cls.wchar_t = clibffi.cast_type_to_ffitype(lltype.UniChar) + # XXX long double support: clibffi.ffi_type_longdouble, but then + # XXX fix the whole rest of this file to add a case for long double del cls._import @staticmethod diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -98,8 +98,15 @@ try: ctypes.CDLL(name) except OSError as e: + # common case: ctypes fails too, with the real dlerror() + # message in str(e). Return that error message. return str(e) else: + # uncommon case: may happen if 'name' is a linker script + # (which the C-level dlopen() can't handle) and we are + # directly running on pypy (whose implementation of ctypes + # or cffi will resolve linker scripts). In that case, + # unsure what we can do. return ("opening %r with ctypes.CDLL() works, " "but not with c_dlopen()??" % (name,)) @@ -160,7 +167,18 @@ mode = _dlopen_default_mode() elif (mode & (RTLD_LAZY | RTLD_NOW)) == 0: mode |= RTLD_NOW + # + # haaaack for 'pypy py.test -A' if libm.so is a linker script + # (see reason in _dlerror_on_dlopen_untranslated()) + must_free = False + if not we_are_translated() and platform.name == "linux": + if name and rffi.charp2str(name) == 'libm.so': + name = rffi.str2charp('libm.so.6') + must_free = True + # res = c_dlopen(name, rffi.cast(rffi.INT, mode)) + if must_free: + rffi.free_charp(name) if not res: if not we_are_translated(): err = _dlerror_on_dlopen_untranslated(name) diff --git a/rpython/rlib/rmarshal.py b/rpython/rlib/rmarshal.py --- a/rpython/rlib/rmarshal.py +++ b/rpython/rlib/rmarshal.py @@ -346,11 +346,15 @@ # on s_bigger. It relies on the fact that s_bigger was created with # an expression like 'annotation([s_item])' which returns a ListDef with # no bookkeeper, on which side-effects are not allowed. + saved = annmodel.TLS.allow_int_to_float try: + annmodel.TLS.allow_int_to_float = False s_union = annmodel.unionof(s_bigger, s_smaller) return s_bigger.contains(s_union) except (annmodel.UnionError, TooLateForChange): return False + finally: + annmodel.TLS.allow_int_to_float = saved class __extend__(pairtype(MTag, annmodel.SomeObject)): diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -137,7 +137,27 @@ result=result) return result.build(), pos - at specialize.argtype(6) +def _invalid_cont_byte(ordch): + return ordch>>6 != 0x2 # 0b10 + +_invalid_byte_2_of_2 = _invalid_cont_byte +_invalid_byte_3_of_3 = _invalid_cont_byte +_invalid_byte_3_of_4 = _invalid_cont_byte +_invalid_byte_4_of_4 = _invalid_cont_byte + + at specialize.arg(2) +def _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): + return (ordch2>>6 != 0x2 or # 0b10 + (ordch1 == 0xe0 and ordch2 < 0xa0) + # surrogates shouldn't be valid UTF-8! + or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f)) + +def _invalid_byte_2_of_4(ordch1, ordch2): + return (ordch2>>6 != 0x2 or # 0b10 + (ordch1 == 0xf0 and ordch2 < 0x90) or + (ordch1 == 0xf4 and ordch2 > 0x8f)) + + at specialize.arg(5) def str_decode_utf_8_impl(s, size, errors, final, errorhandler, allow_surrogates, result): if size == 0: @@ -157,22 +177,23 @@ if pos + n > size: if not final: break + # argh, this obscure block of code is mostly a copy of + # what follows :-( charsleft = size - pos - 1 # either 0, 1, 2 - # note: when we get the 'unexpected end of data' we don't care - # about the pos anymore and we just ignore the value + # note: when we get the 'unexpected end of data' we need + # to care about the pos returned; it can be lower than size, + # in case we need to continue running this loop if not charsleft: # there's only the start byte and nothing else r, pos = errorhandler(errors, 'utf8', 'unexpected end of data', s, pos, pos+1) result.append(r) - break + continue ordch2 = ord(s[pos+1]) if n == 3: # 3-bytes seq with only a continuation byte - if (ordch2>>6 != 0x2 or # 0b10 - (ordch1 == 0xe0 and ordch2 < 0xa0)): - # or (ordch1 == 0xed and ordch2 > 0x9f) + if _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): # second byte invalid, take the first and continue r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', @@ -185,19 +206,17 @@ 'unexpected end of data', s, pos, pos+2) result.append(r) - break + continue elif n == 4: # 4-bytes seq with 1 or 2 continuation bytes - if (ordch2>>6 != 0x2 or # 0b10 - (ordch1 == 0xf0 and ordch2 < 0x90) or - (ordch1 == 0xf4 and ordch2 > 0x8f)): + if _invalid_byte_2_of_4(ordch1, ordch2): # second byte invalid, take the first and continue r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue - elif charsleft == 2 and ord(s[pos+2])>>6 != 0x2: # 0b10 + elif charsleft == 2 and _invalid_byte_3_of_4(ord(s[pos+2])): # third byte invalid, take the first two and continue r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', @@ -210,7 +229,8 @@ 'unexpected end of data', s, pos, pos+charsleft+1) result.append(r) - break + continue + raise AssertionError("unreachable") if n == 0: r, pos = errorhandler(errors, 'utf8', @@ -223,7 +243,7 @@ elif n == 2: ordch2 = ord(s[pos+1]) - if ordch2>>6 != 0x2: # 0b10 + if _invalid_byte_2_of_2(ordch2): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) @@ -237,17 +257,13 @@ elif n == 3: ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) - if (ordch2>>6 != 0x2 or # 0b10 - (ordch1 == 0xe0 and ordch2 < 0xa0) - # surrogates shouldn't be valid UTF-8! - or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f) - ): + if _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0x2: # 0b10 + elif _invalid_byte_3_of_3(ordch3): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+2) @@ -263,21 +279,19 @@ ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) ordch4 = ord(s[pos+3]) - if (ordch2>>6 != 0x2 or # 0b10 - (ordch1 == 0xf0 and ordch2 < 0x90) or - (ordch1 == 0xf4 and ordch2 > 0x8f)): + if _invalid_byte_2_of_4(ordch1, ordch2): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0x2: # 0b10 + elif _invalid_byte_3_of_4(ordch3): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue - elif ordch4>>6 != 0x2: # 0b10 + elif _invalid_byte_4_of_4(ordch4): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+3) diff --git a/rpython/rlib/test/test_rmarshal.py b/rpython/rlib/test/test_rmarshal.py --- a/rpython/rlib/test/test_rmarshal.py +++ b/rpython/rlib/test/test_rmarshal.py @@ -128,10 +128,12 @@ def test_llinterp_marshal(): from rpython.rtyper.test.test_llinterp import interpret - marshaller = get_marshaller([(int, str, float)]) + marshaller1 = get_marshaller([(int, str, float)]) + marshaller2 = get_marshaller([(int, str, int)]) def f(): buf = [] - marshaller(buf, [(5, "hello", -0.5), (7, "world", 1E100)]) + marshaller1(buf, [(5, "hello", -0.5), (7, "world", 1E100)]) + marshaller2(buf, [(5, "hello", 1)]) return ''.join(buf) res = interpret(f, []) res = ''.join(res.chars) @@ -139,14 +141,20 @@ assert res == ('[\x02\x00\x00\x00(\x03\x00\x00\x00i\x05\x00\x00\x00' 's\x05\x00\x00\x00hellof\x04-0.5(\x03\x00\x00\x00' 'i\x07\x00\x00\x00s\x05\x00\x00\x00world' - 'f\x061e+100') + 'f\x061e+100' + '[\x01\x00\x00\x00(\x03\x00\x00\x00i\x05\x00\x00\x00' + 's\x05\x00\x00\x00helloi\x01\x00\x00\x00') else: assert res == ('[\x02\x00\x00\x00(\x03\x00\x00\x00' 'I\x05\x00\x00\x00\x00\x00\x00\x00' 's\x05\x00\x00\x00hellof\x04-0.5(\x03\x00\x00\x00' 'I\x07\x00\x00\x00\x00\x00\x00\x00' 's\x05\x00\x00\x00world' - 'f\x061e+100') + 'f\x061e+100' + '[\x01\x00\x00\x00(\x03\x00\x00\x00' + 'I\x05\x00\x00\x00\x00\x00\x00\x00' + 's\x05\x00\x00\x00hello' + 'I\x01\x00\x00\x00\x00\x00\x00\x00') def test_llinterp_unmarshal(): from rpython.rtyper.test.test_llinterp import interpret diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -286,9 +286,15 @@ class TestUTF8Decoding(UnicodeTests): - def __init__(self): + def setup_method(self, meth): self.decoder = self.getdecoder('utf-8') + def custom_replace(self, errors, encoding, msg, s, startingpos, endingpos): + assert errors == 'custom' + # returns FOO, but consumes only one character (not up to endingpos) + FOO = u'\u1234' + return FOO, startingpos + 1 + def to_bytestring(self, bytes): return ''.join(chr(int(c, 16)) for c in bytes.split()) @@ -309,6 +315,7 @@ E.g. <80> is a continuation byte and can appear only after a start byte. """ FFFD = u'\ufffd' + FOO = u'\u1234' for byte in '\x80\xA0\x9F\xBF\xC0\xC1\xF5\xFF': py.test.raises(UnicodeDecodeError, self.decoder, byte, 1, None, final=True) self.checkdecodeerror(byte, 'utf-8', 0, 1, addstuff=False, @@ -320,6 +327,11 @@ assert self.decoder(byte, 1, 'ignore', final=True) == (u'', 1) assert (self.decoder('aaaa' + byte + 'bbbb', 9, 'ignore', final=True) == (u'aaaabbbb', 9)) + assert self.decoder(byte, 1, 'custom', final=True, + errorhandler=self.custom_replace) == (FOO, 1) + assert (self.decoder('aaaa' + byte + 'bbbb', 9, 'custom', + final=True, errorhandler=self.custom_replace) == + (u'aaaa'+ FOO + u'bbbb', 9)) def test_unexpected_end_of_data(self): """ @@ -343,6 +355,7 @@ 'F4 80', 'F4 8F', 'F4 80 80', 'F4 80 BF', 'F4 8F 80', 'F4 8F BF' ] FFFD = u'\ufffd' + FOO = u'\u1234' for seq in sequences: seq = self.to_bytestring(seq) py.test.raises(UnicodeDecodeError, self.decoder, seq, len(seq), @@ -358,6 +371,12 @@ ) == (u'', len(seq)) assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, 'ignore', final=True) == (u'aaaabbbb', len(seq) + 8)) + assert (self.decoder(seq, len(seq), 'custom', final=True, + errorhandler=self.custom_replace) == + (FOO * len(seq), len(seq))) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, 'custom', + final=True, errorhandler=self.custom_replace) == + (u'aaaa'+ FOO * len(seq) + u'bbbb', len(seq) + 8)) def test_invalid_cb_for_2bytes_seq(self): """ diff --git a/rpython/rtyper/lltypesystem/test/test_rffi.py b/rpython/rtyper/lltypesystem/test/test_rffi.py --- a/rpython/rtyper/lltypesystem/test/test_rffi.py +++ b/rpython/rtyper/lltypesystem/test/test_rffi.py @@ -38,6 +38,24 @@ xf = self.compile(f, []) assert xf() == 8+3 + def test_no_float_to_int_conversion(self): + c_source = py.code.Source(""" + int someexternalfunction(int x) + { + return (x + 3); + } + """) + + eci = ExternalCompilationInfo(separate_module_sources=[c_source]) + z = llexternal('someexternalfunction', [Signed], Signed, + compilation_info=eci) + + def f(): + return z(8.2) + + py.test.raises(TypeError, f) + py.test.raises(TypeError, self.compile, f, []) + def test_hashdefine(self): h_source = """ #define X(i) (i+3) diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -540,7 +540,7 @@ def ll_ullong_py_mod_zer(x, y): if y == 0: raise ZeroDivisionError - return llop.ullong_mod(UnsignedLongLong, x, y) + return ll_ullong_py_mod(x, y) @jit.dont_look_inside def ll_lllong_py_mod(x, y): diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -532,8 +532,7 @@ return sys.maxint else: res = Solution[blockmap[graph.startblock]] - assert res >= 0 - return res + return max(res, 0.0) def static_instruction_count(graph): count = 0 diff --git a/rpython/translator/c/test/test_exception.py b/rpython/translator/c/test/test_exception.py --- a/rpython/translator/c/test/test_exception.py +++ b/rpython/translator/c/test/test_exception.py @@ -9,7 +9,7 @@ getcompiledopt = test_backendoptimized.TestTypedOptimizedTestCase().getcompiled -class TestException(Exception): +class InTestException(Exception): pass class MyException(Exception): @@ -18,7 +18,7 @@ def test_simple1(): def raise_(i): if i == 0: - raise TestException() + raise InTestException() elif i == 1: raise MyException() else: @@ -29,7 +29,7 @@ b = raise_(i) + 12 c = raise_(i) + 13 return a+b+c - except TestException: + except InTestException: return 7 except MyException: return 123 diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -213,6 +213,7 @@ log.WARNING(warning) def main(): + sys.setrecursionlimit(2000) # PyPy can't translate within cpython's 1k limit targetspec_dic, translateconfig, config, args = parse_options_and_load_target() from rpython.translator import translator from rpython.translator import driver diff --git a/rpython/translator/sandbox/test/test_sandbox.py b/rpython/translator/sandbox/test/test_sandbox.py --- a/rpython/translator/sandbox/test/test_sandbox.py +++ b/rpython/translator/sandbox/test/test_sandbox.py @@ -29,6 +29,7 @@ assert msg == fnname msg = read_message(f) assert msg == args + assert [type(x) for x in msg] == [type(x) for x in args] if isinstance(result, Exception): write_exception(g, result) else: From pypy.commits at gmail.com Sat Sep 3 17:43:08 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Sep 2016 14:43:08 -0700 (PDT) Subject: [pypy-commit] cffi default: Python 3 fix Message-ID: <57cb43ec.a3a3c20a.2eacd.9364@mx.google.com> Author: Armin Rigo Branch: Changeset: r2759:d04043b6e7bc Date: 2016-09-03 23:42 +0200 http://bitbucket.org/cffi/cffi/changeset/d04043b6e7bc/ Log: Python 3 fix diff --git a/cffi/backend_ctypes.py b/cffi/backend_ctypes.py --- a/cffi/backend_ctypes.py +++ b/cffi/backend_ctypes.py @@ -1009,6 +1009,12 @@ myref is not None and myref is other()) def __ne__(self, other): return not (self == other) + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self()) + return self._hash self._weakref_cache_ref = {}, MyRef weak_cache, MyRef = self._weakref_cache_ref From pypy.commits at gmail.com Sun Sep 4 01:29:29 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 03 Sep 2016 22:29:29 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: update version numbers, make test run untranslated Message-ID: <57cbb139.ca11c30a.ff1fd.43cd@mx.google.com> Author: Matti Picus Branch: release-5.x Changeset: r86856:b0cef2e770b9 Date: 2016-09-04 08:26 +0300 http://bitbucket.org/pypy/pypy/changeset/b0cef2e770b9/ Log: update version numbers, make test run untranslated diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "5.4.0" -#define PYPY_VERSION_NUM 0x05040000 +#define PYPY_VERSION "5.4.1" +#define PYPY_VERSION_NUM 0x05040100 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -32,9 +32,11 @@ assert module.py_minor_version == sys.version_info.minor assert module.py_micro_version == sys.version_info.micro - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') + #@pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') def test_pypy_versions(self): import sys + if '__pypy__' not in sys.builtin_module_names: + py.test.skip("pypy only test") init = """ if (Py_IsInitialized()) { PyObject *m = Py_InitModule("foo", NULL); diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (5, 4, 0, "final", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 4, 1, "final", 0) #XXX # sync patchlevel.h import pypy From pypy.commits at gmail.com Sun Sep 4 01:29:31 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 03 Sep 2016 22:29:31 -0700 (PDT) Subject: [pypy-commit] pypy default: update version minor number, make test run untranslated Message-ID: <57cbb13b.21b0c20a.5d2be.a57b@mx.google.com> Author: Matti Picus Branch: Changeset: r86857:3bbe61b5ea1c Date: 2016-09-04 08:28 +0300 http://bitbucket.org/pypy/pypy/changeset/3bbe61b5ea1c/ Log: update version minor number, make test run untranslated diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "5.4.1-alpha0" -#define PYPY_VERSION_NUM 0x05040100 +#define PYPY_VERSION "5.5.0-alpha0" +#define PYPY_VERSION_NUM 0x05050000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -32,9 +32,11 @@ assert module.py_minor_version == sys.version_info.minor assert module.py_micro_version == sys.version_info.micro - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') + #@pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') def test_pypy_versions(self): import sys + if '__pypy__' not in sys.builtin_module_names: + py.test.skip("pypy only test") init = """ if (Py_IsInitialized()) { PyObject *m = Py_InitModule("foo", NULL); diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (5, 4, 1, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 5, 0, "alpha", 0) #XXX # sync patchlevel.h import pypy From pypy.commits at gmail.com Sun Sep 4 07:37:14 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Sep 2016 04:37:14 -0700 (PDT) Subject: [pypy-commit] cffi default: Skip test if setuptools cannot be imported Message-ID: <57cc076a.54bc1c0a.bc6d2.4187@mx.google.com> Author: Armin Rigo Branch: Changeset: r2760:cb6147abe7af Date: 2016-09-04 13:37 +0200 http://bitbucket.org/cffi/cffi/changeset/cb6147abe7af/ Log: Skip test if setuptools cannot be imported diff --git a/testing/cffi0/test_zintegration.py b/testing/cffi0/test_zintegration.py --- a/testing/cffi0/test_zintegration.py +++ b/testing/cffi0/test_zintegration.py @@ -153,7 +153,10 @@ from cffi.setuptools_ext import _set_py_limited_api try: import setuptools - orig_version = setuptools.__version__ + except ImportError as e: + py.test.skip(str(e)) + orig_version = setuptools.__version__ + try: setuptools.__version__ = '26.0.0' from setuptools import Extension From pypy.commits at gmail.com Sun Sep 4 07:38:33 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Sep 2016 04:38:33 -0700 (PDT) Subject: [pypy-commit] pypy default: import cffi/cb6147abe7af Message-ID: <57cc07b9.81a2c20a.d8de6.aee3@mx.google.com> Author: Armin Rigo Branch: Changeset: r86858:c224171fc33e Date: 2016-09-04 13:37 +0200 http://bitbucket.org/pypy/pypy/changeset/c224171fc33e/ Log: import cffi/cb6147abe7af diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -1009,6 +1009,12 @@ myref is not None and myref is other()) def __ne__(self, other): return not (self == other) + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self()) + return self._hash self._weakref_cache_ref = {}, MyRef weak_cache, MyRef = self._weakref_cache_ref diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py @@ -154,7 +154,10 @@ from cffi.setuptools_ext import _set_py_limited_api try: import setuptools - orig_version = setuptools.__version__ + except ImportError as e: + py.test.skip(str(e)) + orig_version = setuptools.__version__ + try: setuptools.__version__ = '26.0.0' from setuptools import Extension From pypy.commits at gmail.com Sun Sep 4 07:48:24 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Sep 2016 04:48:24 -0700 (PDT) Subject: [pypy-commit] pypy default: These specialize.arg() are not valid, because callers inside runicode Message-ID: <57cc0a08.68adc20a.966c2.a0e8@mx.google.com> Author: Armin Rigo Branch: Changeset: r86859:1f8c8d955c04 Date: 2016-09-04 13:47 +0200 http://bitbucket.org/pypy/pypy/changeset/1f8c8d955c04/ Log: These specialize.arg() are not valid, because callers inside runicode may not give a constant at all. See updated test. diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -145,7 +145,6 @@ _invalid_byte_3_of_4 = _invalid_cont_byte _invalid_byte_4_of_4 = _invalid_cont_byte - at specialize.arg(2) def _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): return (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0) @@ -157,7 +156,6 @@ (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)) - at specialize.arg(5) def str_decode_utf_8_impl(s, size, errors, final, errorhandler, allow_surrogates, result): if size == 0: diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -823,9 +823,15 @@ def f(x): s1 = "".join(["\xd7\x90\xd6\x96\xeb\x96\x95\xf0\x90\x91\x93"] * x) - u, consumed = runicode.str_decode_utf_8(s1, len(s1), True) - s2 = runicode.unicode_encode_utf_8(u, len(u), True) - return s1 == s2 + u, consumed = runicode.str_decode_utf_8(s1, len(s1), 'strict', + allow_surrogates=True) + s2 = runicode.unicode_encode_utf_8(u, len(u), 'strict', + allow_surrogates=True) + u3, consumed3 = runicode.str_decode_utf_8(s1, len(s1), 'strict', + allow_surrogates=False) + s3 = runicode.unicode_encode_utf_8(u3, len(u3), 'strict', + allow_surrogates=False) + return s1 == s2 == s3 res = interpret(f, [2]) assert res From pypy.commits at gmail.com Sun Sep 4 08:25:20 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Sep 2016 05:25:20 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: Fix for test_zjit by explicitly disabling the W_MemoryView case here Message-ID: <57cc12b0.04e21c0a.17845.7d95@mx.google.com> Author: Armin Rigo Branch: buffer-interface Changeset: r86860:73e7b767d5f7 Date: 2016-09-04 14:19 +0200 http://bitbucket.org/pypy/pypy/changeset/73e7b767d5f7/ Log: Fix for test_zjit by explicitly disabling the W_MemoryView case here diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -97,7 +97,16 @@ print "ERROR: did not implement return type for interpreter" raise TypeError(w_res) - if self.graph is None: + if self.graph is not None: + return + + from pypy.module.micronumpy import ctors + def unimplemented(*args): + "Only for W_MemoryView objects, which are not compiled in" + raise NotImplementedError + prev_3118 = ctors._array_from_buffer_3118 + ctors._array_from_buffer_3118 = unimplemented + try: interp, graph = self.meta_interp(f, [0], listops=True, listcomp=True, @@ -107,6 +116,8 @@ vec=True) self.__class__.interp = interp self.__class__.graph = graph + finally: + ctors._array_from_buffer_3118 = prev_3118 def check_vectorized(self, expected_tried, expected_success): profiler = get_profiler() From pypy.commits at gmail.com Sun Sep 4 08:50:21 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Sep 2016 05:50:21 -0700 (PDT) Subject: [pypy-commit] pypy default: Why final=False here? Message-ID: <57cc188d.041f1c0a.fbb56.5e44@mx.google.com> Author: Armin Rigo Branch: Changeset: r86861:50051cb90662 Date: 2016-09-04 14:39 +0200 http://bitbucket.org/pypy/pypy/changeset/50051cb90662/ Log: Why final=False here? diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -30,7 +30,7 @@ assert value is not None result = UnicodeBuilder(len(value)) self.rstr_decode_utf_8( - value, len(value), 'strict', final=False, + value, len(value), 'strict', final=True, errorhandler=self.ll_raise_unicode_exception_decode, allow_surrogates=False, result=result) return self.ll.llunicode(result.build()) diff --git a/rpython/rtyper/test/test_runicode.py b/rpython/rtyper/test/test_runicode.py --- a/rpython/rtyper/test/test_runicode.py +++ b/rpython/rtyper/test/test_runicode.py @@ -162,6 +162,18 @@ assert self.ll_to_string(self.interpret(f, [0])) == f(0) + def test_unicode_decode_final(self): + strings = ['\xc3', ''] + def f(n): + try: + strings[n].decode('utf-8') + except UnicodeDecodeError: + return True + return False + + assert f(0) + assert self.interpret(f, [0]) + def test_utf_8_decoding_annotation(self): from rpython.rlib.runicode import str_decode_utf_8 def errorhandler(errors, encoding, msg, s, From pypy.commits at gmail.com Sun Sep 4 08:50:25 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Sep 2016 05:50:25 -0700 (PDT) Subject: [pypy-commit] pypy default: Another attempt at fixing the original problem Message-ID: <57cc1891.94071c0a.8c4bb.558a@mx.google.com> Author: Armin Rigo Branch: Changeset: r86862:74b4b27aaa7b Date: 2016-09-04 14:49 +0200 http://bitbucket.org/pypy/pypy/changeset/74b4b27aaa7b/ Log: Another attempt at fixing the original problem diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -1,5 +1,5 @@ import sys -from rpython.rlib.objectmodel import specialize, we_are_translated +from rpython.rlib.objectmodel import specialize, we_are_translated, enforceargs from rpython.rlib.rstring import StringBuilder, UnicodeBuilder from rpython.rlib.rarithmetic import r_uint, intmask, widen from rpython.rlib.unicodedata import unicodedb @@ -145,17 +145,21 @@ _invalid_byte_3_of_4 = _invalid_cont_byte _invalid_byte_4_of_4 = _invalid_cont_byte + at enforceargs(allow_surrogates=bool) def _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): return (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0) # surrogates shouldn't be valid UTF-8! - or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f)) + or (ordch1 == 0xed and ordch2 > 0x9f and not allow_surrogates)) def _invalid_byte_2_of_4(ordch1, ordch2): return (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)) +# note: this specialize() is here for rtyper/rstr.py, which calls this +# function too but with its own fixed errorhandler + at specialize.arg_or_var(4) def str_decode_utf_8_impl(s, size, errors, final, errorhandler, allow_surrogates, result): if size == 0: @@ -328,6 +332,9 @@ return unicode_encode_utf_8_impl(s, size, errors, errorhandler, allow_surrogates=allow_surrogates) +# note: this specialize() is here for rtyper/rstr.py, which calls this +# function too but with its own fixed errorhandler + at specialize.arg_or_var(3) def unicode_encode_utf_8_impl(s, size, errors, errorhandler, allow_surrogates=False): assert(size >= 0) diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -55,7 +55,7 @@ s = s.encode(encoding) except LookupError as e: py.test.skip(e) - result, consumed = decoder(s, len(s), True) + result, consumed = decoder(s, len(s), 'strict', final=True) assert consumed == len(s) self.typeequals(trueresult, result) @@ -69,7 +69,7 @@ s = s.decode(encoding) except LookupError as e: py.test.skip(e) - result = encoder(s, len(s), True) + result = encoder(s, len(s), 'strict') self.typeequals(trueresult, result) def checkencodeerror(self, s, encoding, start, stop): diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -35,7 +35,8 @@ allow_surrogates=False, result=result) return self.ll.llunicode(result.build()) - def ll_raise_unicode_exception_decode(self, errors, encoding, msg, s, + @staticmethod + def ll_raise_unicode_exception_decode(errors, encoding, msg, s, startingpos, endingpos): raise UnicodeDecodeError(encoding, s, startingpos, endingpos, msg) @@ -411,7 +412,8 @@ allow_surrogates=False) return self.ll.llstr(bytes) - def ll_raise_unicode_exception_encode(self, errors, encoding, msg, u, + @staticmethod + def ll_raise_unicode_exception_encode(errors, encoding, msg, u, startingpos, endingpos): raise UnicodeEncodeError(encoding, u, startingpos, endingpos, msg) From pypy.commits at gmail.com Sun Sep 4 11:00:27 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 04 Sep 2016 08:00:27 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: Issue #2388: the problem is obscure interaction with a different call (I Message-ID: <57cc370b.a6a5c20a.90c9b.e2d3@mx.google.com> Author: Matti Picus Branch: release-5.x Changeset: r86863:145c1bd68950 Date: 2016-09-04 17:55 +0300 http://bitbucket.org/pypy/pypy/changeset/145c1bd68950/ Log: Issue #2388: the problem is obscure interaction with a different call (I don't know which one) with the signature (string, float), which was considered as more general than the signature (string, int) of os.access(). (grafted from 7d6c66b1477085a45a3a64056a010adf3e018924) diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -17,7 +17,7 @@ from rpython.flowspace.model import Variable, Constant, const from rpython.flowspace.operation import op from rpython.rlib import rarithmetic -from rpython.annotator.model import AnnotatorError +from rpython.annotator.model import AnnotatorError, TLS BINARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 2]) @@ -436,6 +436,11 @@ class __extend__(pairtype(SomeFloat, SomeFloat)): def union((flt1, flt2)): + if not TLS.allow_int_to_float: + # in this mode, if one of the two is actually the + # subclass SomeInteger, complain + if isinstance(flt1, SomeInteger) or isinstance(flt2, SomeInteger): + raise UnionError(flt1, flt2) return SomeFloat() add = sub = mul = union diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -44,6 +44,7 @@ # A global attribute :-( Patch it with 'True' to enable checking of # the no_nul attribute... check_str_without_nul = False + allow_int_to_float = True TLS = State() class SomeObject(object): diff --git a/rpython/rlib/rmarshal.py b/rpython/rlib/rmarshal.py --- a/rpython/rlib/rmarshal.py +++ b/rpython/rlib/rmarshal.py @@ -346,11 +346,15 @@ # on s_bigger. It relies on the fact that s_bigger was created with # an expression like 'annotation([s_item])' which returns a ListDef with # no bookkeeper, on which side-effects are not allowed. + saved = annmodel.TLS.allow_int_to_float try: + annmodel.TLS.allow_int_to_float = False s_union = annmodel.unionof(s_bigger, s_smaller) return s_bigger.contains(s_union) except (annmodel.UnionError, TooLateForChange): return False + finally: + annmodel.TLS.allow_int_to_float = saved class __extend__(pairtype(MTag, annmodel.SomeObject)): diff --git a/rpython/rlib/test/test_rmarshal.py b/rpython/rlib/test/test_rmarshal.py --- a/rpython/rlib/test/test_rmarshal.py +++ b/rpython/rlib/test/test_rmarshal.py @@ -128,10 +128,12 @@ def test_llinterp_marshal(): from rpython.rtyper.test.test_llinterp import interpret - marshaller = get_marshaller([(int, str, float)]) + marshaller1 = get_marshaller([(int, str, float)]) + marshaller2 = get_marshaller([(int, str, int)]) def f(): buf = [] - marshaller(buf, [(5, "hello", -0.5), (7, "world", 1E100)]) + marshaller1(buf, [(5, "hello", -0.5), (7, "world", 1E100)]) + marshaller2(buf, [(5, "hello", 1)]) return ''.join(buf) res = interpret(f, []) res = ''.join(res.chars) @@ -139,14 +141,20 @@ assert res == ('[\x02\x00\x00\x00(\x03\x00\x00\x00i\x05\x00\x00\x00' 's\x05\x00\x00\x00hellof\x04-0.5(\x03\x00\x00\x00' 'i\x07\x00\x00\x00s\x05\x00\x00\x00world' - 'f\x061e+100') + 'f\x061e+100' + '[\x01\x00\x00\x00(\x03\x00\x00\x00i\x05\x00\x00\x00' + 's\x05\x00\x00\x00helloi\x01\x00\x00\x00') else: assert res == ('[\x02\x00\x00\x00(\x03\x00\x00\x00' 'I\x05\x00\x00\x00\x00\x00\x00\x00' 's\x05\x00\x00\x00hellof\x04-0.5(\x03\x00\x00\x00' 'I\x07\x00\x00\x00\x00\x00\x00\x00' 's\x05\x00\x00\x00world' - 'f\x061e+100') + 'f\x061e+100' + '[\x01\x00\x00\x00(\x03\x00\x00\x00' + 'I\x05\x00\x00\x00\x00\x00\x00\x00' + 's\x05\x00\x00\x00hello' + 'I\x01\x00\x00\x00\x00\x00\x00\x00') def test_llinterp_unmarshal(): from rpython.rtyper.test.test_llinterp import interpret From pypy.commits at gmail.com Sun Sep 4 11:00:29 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 04 Sep 2016 08:00:29 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: Issue #2389: the custom error handler may return a 'pos' that is smaller Message-ID: <57cc370d.45c8c20a.4d79b.ed10@mx.google.com> Author: Matti Picus Branch: release-5.x Changeset: r86864:110c3a4539c8 Date: 2016-09-04 17:55 +0300 http://bitbucket.org/pypy/pypy/changeset/110c3a4539c8/ Log: Issue #2389: the custom error handler may return a 'pos' that is smaller than 'size', in which case we need to continue looping (grafted from e9dd5882eed653226e8108e4182949b3c58c5de7) diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -157,22 +157,26 @@ if pos + n > size: if not final: break + # argh, this obscure block of code is mostly a copy of + # what follows :-( charsleft = size - pos - 1 # either 0, 1, 2 - # note: when we get the 'unexpected end of data' we don't care - # about the pos anymore and we just ignore the value + # note: when we get the 'unexpected end of data' we need + # to care about the pos returned; it can be lower than size, + # in case we need to continue running this loop if not charsleft: # there's only the start byte and nothing else r, pos = errorhandler(errors, 'utf8', 'unexpected end of data', s, pos, pos+1) result.append(r) - break + continue ordch2 = ord(s[pos+1]) if n == 3: # 3-bytes seq with only a continuation byte if (ordch2>>6 != 0x2 or # 0b10 - (ordch1 == 0xe0 and ordch2 < 0xa0)): - # or (ordch1 == 0xed and ordch2 > 0x9f) + (ordch1 == 0xe0 and ordch2 < 0xa0) + or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f) + ): # second byte invalid, take the first and continue r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', @@ -185,7 +189,7 @@ 'unexpected end of data', s, pos, pos+2) result.append(r) - break + continue elif n == 4: # 4-bytes seq with 1 or 2 continuation bytes if (ordch2>>6 != 0x2 or # 0b10 @@ -210,7 +214,8 @@ 'unexpected end of data', s, pos, pos+charsleft+1) result.append(r) - break + continue + raise AssertionError("unreachable") if n == 0: r, pos = errorhandler(errors, 'utf8', diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -289,6 +289,12 @@ def __init__(self): self.decoder = self.getdecoder('utf-8') + def custom_replace(self, errors, encoding, msg, s, startingpos, endingpos): + assert errors == 'custom' + # returns FOO, but consumes only one character (not up to endingpos) + FOO = u'\u1234' + return FOO, startingpos + 1 + def to_bytestring(self, bytes): return ''.join(chr(int(c, 16)) for c in bytes.split()) @@ -309,6 +315,7 @@ E.g. <80> is a continuation byte and can appear only after a start byte. """ FFFD = u'\ufffd' + FOO = u'\u1234' for byte in '\x80\xA0\x9F\xBF\xC0\xC1\xF5\xFF': py.test.raises(UnicodeDecodeError, self.decoder, byte, 1, None, final=True) self.checkdecodeerror(byte, 'utf-8', 0, 1, addstuff=False, @@ -320,6 +327,11 @@ assert self.decoder(byte, 1, 'ignore', final=True) == (u'', 1) assert (self.decoder('aaaa' + byte + 'bbbb', 9, 'ignore', final=True) == (u'aaaabbbb', 9)) + assert self.decoder(byte, 1, 'custom', final=True, + errorhandler=self.custom_replace) == (FOO, 1) + assert (self.decoder('aaaa' + byte + 'bbbb', 9, 'custom', + final=True, errorhandler=self.custom_replace) == + (u'aaaa'+ FOO + u'bbbb', 9)) def test_unexpected_end_of_data(self): """ @@ -343,6 +355,7 @@ 'F4 80', 'F4 8F', 'F4 80 80', 'F4 80 BF', 'F4 8F 80', 'F4 8F BF' ] FFFD = u'\ufffd' + FOO = u'\u1234' for seq in sequences: seq = self.to_bytestring(seq) py.test.raises(UnicodeDecodeError, self.decoder, seq, len(seq), @@ -358,6 +371,12 @@ ) == (u'', len(seq)) assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, 'ignore', final=True) == (u'aaaabbbb', len(seq) + 8)) + assert (self.decoder(seq, len(seq), 'custom', final=True, + errorhandler=self.custom_replace) == + (FOO * len(seq), len(seq))) + assert (self.decoder('aaaa' + seq + 'bbbb', len(seq) + 8, 'custom', + final=True, errorhandler=self.custom_replace) == + (u'aaaa'+ FOO * len(seq) + u'bbbb', len(seq) + 8)) def test_invalid_cb_for_2bytes_seq(self): """ From pypy.commits at gmail.com Sun Sep 4 11:00:31 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 04 Sep 2016 08:00:31 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: Move the bit checking inside helpers, share it from the two places Message-ID: <57cc370f.8bd51c0a.5659f.6d55@mx.google.com> Author: Matti Picus Branch: release-5.x Changeset: r86865:dfb434142f63 Date: 2016-09-04 17:55 +0300 http://bitbucket.org/pypy/pypy/changeset/dfb434142f63/ Log: Move the bit checking inside helpers, share it from the two places (grafted from ee3a2fbec01afa109be9414e105ea7250a7e1b24) diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -137,6 +137,25 @@ result=result) return result.build(), pos +def _invalid_cont_byte(ordch): + return ordch>>6 != 0x2 # 0b10 + +_invalid_byte_2_of_2 = _invalid_cont_byte +_invalid_byte_3_of_3 = _invalid_cont_byte +_invalid_byte_3_of_4 = _invalid_cont_byte +_invalid_byte_4_of_4 = _invalid_cont_byte + +def _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): + return (ordch2>>6 != 0x2 or # 0b10 + (ordch1 == 0xe0 and ordch2 < 0xa0) + # surrogates shouldn't be valid UTF-8! + or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f)) + +def _invalid_byte_2_of_4(ordch1, ordch2): + return (ordch2>>6 != 0x2 or # 0b10 + (ordch1 == 0xf0 and ordch2 < 0x90) or + (ordch1 == 0xf4 and ordch2 > 0x8f)) + @specialize.argtype(6) def str_decode_utf_8_impl(s, size, errors, final, errorhandler, allow_surrogates, result): @@ -173,10 +192,7 @@ ordch2 = ord(s[pos+1]) if n == 3: # 3-bytes seq with only a continuation byte - if (ordch2>>6 != 0x2 or # 0b10 - (ordch1 == 0xe0 and ordch2 < 0xa0) - or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f) - ): + if _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): # second byte invalid, take the first and continue r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', @@ -192,16 +208,14 @@ continue elif n == 4: # 4-bytes seq with 1 or 2 continuation bytes - if (ordch2>>6 != 0x2 or # 0b10 - (ordch1 == 0xf0 and ordch2 < 0x90) or - (ordch1 == 0xf4 and ordch2 > 0x8f)): + if _invalid_byte_2_of_4(ordch1, ordch2): # second byte invalid, take the first and continue r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue - elif charsleft == 2 and ord(s[pos+2])>>6 != 0x2: # 0b10 + elif charsleft == 2 and _invalid_byte_3_of_4(ord(s[pos+2])): # third byte invalid, take the first two and continue r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', @@ -228,7 +242,7 @@ elif n == 2: ordch2 = ord(s[pos+1]) - if ordch2>>6 != 0x2: # 0b10 + if _invalid_byte_2_of_2(ordch2): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) @@ -242,17 +256,13 @@ elif n == 3: ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) - if (ordch2>>6 != 0x2 or # 0b10 - (ordch1 == 0xe0 and ordch2 < 0xa0) - # surrogates shouldn't be valid UTF-8! - or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f) - ): + if _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0x2: # 0b10 + elif _invalid_byte_3_of_3(ordch3): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+2) @@ -268,21 +278,19 @@ ordch2 = ord(s[pos+1]) ordch3 = ord(s[pos+2]) ordch4 = ord(s[pos+3]) - if (ordch2>>6 != 0x2 or # 0b10 - (ordch1 == 0xf0 and ordch2 < 0x90) or - (ordch1 == 0xf4 and ordch2 > 0x8f)): + if _invalid_byte_2_of_4(ordch1, ordch2): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue - elif ordch3>>6 != 0x2: # 0b10 + elif _invalid_byte_3_of_4(ordch3): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue - elif ordch4>>6 != 0x2: # 0b10 + elif _invalid_byte_4_of_4(ordch4): r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+3) From pypy.commits at gmail.com Sun Sep 4 11:00:33 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 04 Sep 2016 08:00:33 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: import cffi/b81ca61b6de6 Message-ID: <57cc3711.68adc20a.966c2.df9a@mx.google.com> Author: Matti Picus Branch: release-5.x Changeset: r86866:b29dd462f6ed Date: 2016-09-04 17:55 +0300 http://bitbucket.org/pypy/pypy/changeset/b29dd462f6ed/ Log: import cffi/b81ca61b6de6 (grafted from fdd46325fe3d2c1d29496931411c453c0af576a2) diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.8.0 +Version: 1.8.1 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.8.0" -__version_info__ = (1, 8, 0) +__version__ = "1.8.1" +__version_info__ = (1, 8, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -1,4 +1,20 @@ #define _CFFI_ + +/* We try to define Py_LIMITED_API before including Python.h. + + Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and + Py_REF_DEBUG are not defined. This is a best-effort approximation: + we can learn about Py_DEBUG from pyconfig.h, but it is unclear if + the same works for the other two macros. Py_DEBUG implies them, + but not the other way around. +*/ +#ifndef _CFFI_USE_EMBEDDING +# include +# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) +# define Py_LIMITED_API +# endif +#endif + #include #ifdef __cplusplus extern "C" { diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.8.0" + "\ncompiled with cffi version: 1.8.1" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -652,7 +652,7 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0, target=None): + def compile(self, tmpdir='.', verbose=0, target=None, debug=None): """The 'target' argument gives the final file name of the compiled DLL. Use '*' to force distutils' choice, suitable for regular CPython C API modules. Use a file name ending in '.*' @@ -669,7 +669,7 @@ module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, target=target, source_extension=source_extension, - compiler_verbose=verbose, **kwds) + compiler_verbose=verbose, debug=debug, **kwds) def init_once(self, func, tag): # Read _init_once_cache[tag], which is either (False, lock) if diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -997,29 +997,37 @@ assert onerror is None # XXX not implemented return BType(source, error) + _weakref_cache_ref = None + def gcp(self, cdata, destructor): - BType = self.typeof(cdata) + if self._weakref_cache_ref is None: + import weakref + class MyRef(weakref.ref): + def __eq__(self, other): + myref = self() + return self is other or ( + myref is not None and myref is other()) + def __ne__(self, other): + return not (self == other) + self._weakref_cache_ref = {}, MyRef + weak_cache, MyRef = self._weakref_cache_ref if destructor is None: - if not (hasattr(BType, '_gcp_type') and - BType._gcp_type is BType): + try: + del weak_cache[MyRef(cdata)] + except KeyError: raise TypeError("Can remove destructor only on a object " "previously returned by ffi.gc()") - cdata._destructor = None return None - try: - gcp_type = BType._gcp_type - except AttributeError: - class CTypesDataGcp(BType): - __slots__ = ['_orig', '_destructor'] - def __del__(self): - if self._destructor is not None: - self._destructor(self._orig) - gcp_type = BType._gcp_type = CTypesDataGcp - new_cdata = self.cast(gcp_type, cdata) - new_cdata._orig = cdata - new_cdata._destructor = destructor + def remove(k): + cdata, destructor = weak_cache.pop(k, (None, None)) + if destructor is not None: + destructor(cdata) + + new_cdata = self.cast(self.typeof(cdata), cdata) + assert new_cdata is not cdata + weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor) return new_cdata typeof = type diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -21,12 +21,12 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, debug=None): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, debug) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +36,7 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _build(tmpdir, ext, compiler_verbose=0, debug=None): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -44,6 +44,9 @@ dist = Distribution({'ext_modules': [ext]}) dist.parse_config_files() options = dist.get_option_dict('build_ext') + if debug is None: + debug = sys.flags.debug + options['debug'] = ('ffiplatform', debug) options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) options['build_temp'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -275,8 +275,8 @@ def write_c_source_to_f(self, f, preamble): self._f = f prnt = self._prnt - if self.ffi._embedding is None: - prnt('#define Py_LIMITED_API') + if self.ffi._embedding is not None: + prnt('#define _CFFI_USE_EMBEDDING') # # first the '#include' (actually done by inlining the file's content) lines = self._rel_readlines('_cffi_include.h') @@ -1431,7 +1431,7 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, target=None, **kwds): + compiler_verbose=1, target=None, debug=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: @@ -1467,7 +1467,8 @@ if target != '*': _patch_for_target(patchlist, target) os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, + compiler_verbose, debug) finally: os.chdir(cwd) _unpatch_meths(patchlist) diff --git a/lib_pypy/cffi/setuptools_ext.py b/lib_pypy/cffi/setuptools_ext.py --- a/lib_pypy/cffi/setuptools_ext.py +++ b/lib_pypy/cffi/setuptools_ext.py @@ -69,16 +69,36 @@ else: _add_c_module(dist, ffi, module_name, source, source_extension, kwds) +def _set_py_limited_api(Extension, kwds): + """ + Add py_limited_api to kwds if setuptools >= 26 is in use. + Do not alter the setting if it already exists. + Setuptools takes care of ignoring the flag on Python 2 and PyPy. + """ + if 'py_limited_api' not in kwds: + import setuptools + try: + setuptools_major_version = int(setuptools.__version__.partition('.')[0]) + if setuptools_major_version >= 26: + kwds['py_limited_api'] = True + except ValueError: # certain development versions of setuptools + # If we don't know the version number of setuptools, we + # try to set 'py_limited_api' anyway. At worst, we get a + # warning. + kwds['py_limited_api'] = True + return kwds def _add_c_module(dist, ffi, module_name, source, source_extension, kwds): from distutils.core import Extension - from distutils.command.build_ext import build_ext + # We are a setuptools extension. Need this build_ext for py_limited_api. + from setuptools.command.build_ext import build_ext from distutils.dir_util import mkpath from distutils import log from cffi import recompiler allsources = ['$PLACEHOLDER'] allsources.extend(kwds.pop('sources', [])) + kwds = _set_py_limited_api(Extension, kwds) ext = Extension(name=module_name, sources=allsources, **kwds) def make_mod(tmpdir, pre_run=None): diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.8.0" +VERSION = "1.8.1" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.8.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.8.1", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1479,6 +1479,7 @@ assert p1[0] == 123 seen.append(1) q = ffi.gc(p, destructor) + assert ffi.typeof(q) is ffi.typeof(p) import gc; gc.collect() assert seen == [] del q diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py @@ -149,3 +149,25 @@ p = snip_setuptools_verify2.C.getpwuid(0) assert snip_setuptools_verify2.ffi.string(p.pw_name) == b"root" ''') + + def test_set_py_limited_api(self): + from cffi.setuptools_ext import _set_py_limited_api + try: + import setuptools + orig_version = setuptools.__version__ + setuptools.__version__ = '26.0.0' + from setuptools import Extension + + kwds = _set_py_limited_api(Extension, {}) + assert kwds['py_limited_api'] == True + + setuptools.__version__ = '25.0' + kwds = _set_py_limited_api(Extension, {}) + assert not kwds + + setuptools.__version__ = 'development' + kwds = _set_py_limited_api(Extension, {}) + assert kwds['py_limited_api'] == True + + finally: + setuptools.__version__ = orig_version diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1975,9 +1975,9 @@ def test_function_returns_partial_struct(): ffi = FFI() - ffi.cdef("struct a { int a; ...; }; struct a f1(int);") + ffi.cdef("struct aaa { int a; ...; }; struct aaa f1(int);") lib = verify(ffi, "test_function_returns_partial_struct", """ - struct a { int b, a, c; }; - static struct a f1(int x) { struct a s = {0}; s.a = x; return s; } + struct aaa { int b, a, c; }; + static struct aaa f1(int x) { struct aaa s = {0}; s.a = x; return s; } """) assert lib.f1(52).a == 52 From pypy.commits at gmail.com Sun Sep 4 11:00:34 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 04 Sep 2016 08:00:34 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: import cffi/cb6147abe7af Message-ID: <57cc3712.8628c20a.f6b0b.e4be@mx.google.com> Author: Matti Picus Branch: release-5.x Changeset: r86867:dfa482ad3f4c Date: 2016-09-04 17:55 +0300 http://bitbucket.org/pypy/pypy/changeset/dfa482ad3f4c/ Log: import cffi/cb6147abe7af (grafted from c224171fc33e53442553735d23896c4cd2d51b34) diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -1009,6 +1009,12 @@ myref is not None and myref is other()) def __ne__(self, other): return not (self == other) + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self()) + return self._hash self._weakref_cache_ref = {}, MyRef weak_cache, MyRef = self._weakref_cache_ref diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py @@ -154,7 +154,10 @@ from cffi.setuptools_ext import _set_py_limited_api try: import setuptools - orig_version = setuptools.__version__ + except ImportError as e: + py.test.skip(str(e)) + orig_version = setuptools.__version__ + try: setuptools.__version__ = '26.0.0' from setuptools import Extension From pypy.commits at gmail.com Sun Sep 4 11:28:41 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 04 Sep 2016 08:28:41 -0700 (PDT) Subject: [pypy-commit] pypy union-side-effects: Disallow unions of char and unichar, since they make the annotator inconsistent Message-ID: <57cc3da9.8fc51c0a.4b372.9990@mx.google.com> Author: Ronan Lamy Branch: union-side-effects Changeset: r86868:ec11787ac32c Date: 2016-09-04 16:27 +0100 http://bitbucket.org/pypy/pypy/changeset/ec11787ac32c/ Log: Disallow unions of char and unichar, since they make the annotator inconsistent diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -378,11 +378,6 @@ return SomeChar(no_nul=no_nul) -class __extend__(pairtype(SomeChar, SomeUnicodeCodePoint), - pairtype(SomeUnicodeCodePoint, SomeChar)): - def union((uchr1, uchr2)): - return SomeUnicodeCodePoint() - class __extend__(pairtype(SomeUnicodeCodePoint, SomeUnicodeCodePoint)): def union((uchr1, uchr2)): no_nul = uchr1.no_nul and uchr2.no_nul diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3453,17 +3453,6 @@ assert isinstance(s, annmodel.SomeUnicodeString) assert s.no_nul - def test_unicode_char(self): - def f(x, i): - for c in x: - if c == i: - return c - return 'x' - - a = self.RPythonAnnotator() - s = a.build_types(f, [unicode, str]) - assert isinstance(s, annmodel.SomeUnicodeCodePoint) - def test_strformatting_unicode(self): def f(x): return '%s' % unichr(x) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -727,6 +727,11 @@ count = 0 n = end - start m = len(s2.chars) + tp = typeOf(s1) + if tp == string_repr.lowleveltype or tp == Char: + NUL = '\0' + else: + NUL = u'\0' if m == 0: if mode == FAST_COUNT: @@ -771,7 +776,7 @@ if i + m < len(s1.chars): c = s1.chars[i + m] else: - c = '\0' + c = NUL if not bloom(mask, c): i += m else: @@ -780,7 +785,7 @@ if i + m < len(s1.chars): c = s1.chars[i + m] else: - c = '\0' + c = NUL if not bloom(mask, c): i += m else: diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -715,9 +715,7 @@ return hop.genop('cast_unichar_to_int', vlist, resulttype=Signed) -class __extend__(pairtype(AbstractUniCharRepr, AbstractUniCharRepr), - pairtype(AbstractCharRepr, AbstractUniCharRepr), - pairtype(AbstractUniCharRepr, AbstractCharRepr)): +class __extend__(pairtype(AbstractUniCharRepr, AbstractUniCharRepr)): def rtype_eq(_, hop): return _rtype_unchr_compare_template(hop, 'eq') def rtype_ne(_, hop): return _rtype_unchr_compare_template(hop, 'ne') def rtype_lt(_, hop): return _rtype_unchr_compare_template_ord(hop, 'lt') @@ -737,12 +735,7 @@ vlist = hop.inputargs(*hop.args_r) vlist2 = [] for v in vlist: - if v.concretetype == lltype.Char: - v = hop.genop('cast_char_to_int', [v], resulttype=lltype.Signed) - elif v.concretetype == lltype.UniChar: - v = hop.genop('cast_unichar_to_int', [v], resulttype=lltype.Signed) - else: - assert 0, v.concretetype + v = hop.genop('cast_unichar_to_int', [v], resulttype=lltype.Signed) vlist2.append(v) return hop.genop('int_' + func, vlist2, resulttype=Bool) @@ -767,11 +760,6 @@ return llops.gendirectcall(r_from.ll.ll_stritem_nonneg, v, c_zero) return NotImplemented -class __extend__(pairtype(AbstractCharRepr, AbstractUniCharRepr)): - def convert_from_to((r_from, r_to), v, llops): - v2 = llops.genop('cast_char_to_int', [v], resulttype=Signed) - return llops.genop('cast_int_to_unichar', [v2], resulttype=UniChar) - # ____________________________________________________________ # # Iteration. diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -1321,7 +1321,7 @@ def test_unicharlist_extension_1(self): def f(n): - s = 'hello%d' % n + s = u'hello%d' % n l = [u'a', u'b'] l += s return ''.join([chr(ord(c)) for c in l]) @@ -1348,7 +1348,7 @@ def test_unicharlist_extension_2(self): def f(n, i): - s = 'hello%d' % n + s = u'hello%d' % n assert 0 <= i <= len(s) l = [u'a', u'b'] l += s[i:] @@ -1377,7 +1377,7 @@ def test_unicharlist_extension_3(self): def f(n, i, j): - s = 'hello%d' % n + s = u'hello%d' % n assert 0 <= i <= j <= len(s) l = [u'a', u'b'] l += s[i:j] @@ -1396,7 +1396,7 @@ def test_unicharlist_extension_4(self): def f(n): - s = 'hello%d' % n + s = u'hello%d' % n l = [u'a', u'b'] l += s[:-1] return ''.join([chr(ord(c)) for c in l]) @@ -1416,7 +1416,7 @@ def test_unicharlist_extension_5(self): def f(count): l = [u'a', u'b'] - l += '.' * count # NON-UNICODE-char * count + l += u'.' * count return ''.join([chr(ord(c)) for c in l]) res = self.interpret(f, [7]) assert self.ll_to_string(res) == 'ab.......' diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -1120,7 +1120,7 @@ for i, x in enumerate(s): if i == n: return x - return 'x' + return const('x') res = self.interpret(fn, [2]) assert res == 'c' diff --git a/rpython/rtyper/test/test_runicode.py b/rpython/rtyper/test/test_runicode.py --- a/rpython/rtyper/test/test_runicode.py +++ b/rpython/rtyper/test/test_runicode.py @@ -216,33 +216,6 @@ return d[c] assert self.interpret(fn, [u'\u03b1']) == 42 - def test_convert_char_to_unichar(self): - def g(c): - return ord(c) - def fn(n): - if n < 0: - c = unichr(-n) - else: - c = chr(n) - return g(c) - assert self.interpret(fn, [65]) == 65 - assert self.interpret(fn, [-5555]) == 5555 - - def test_char_unichar_eq(self): - def fn(c1, c2): - return c1 == c2 - assert self.interpret(fn, [u'(', '(']) == True - assert self.interpret(fn, [u'\u1028', '(']) == False - assert self.interpret(fn, ['(', u'(']) == True - assert self.interpret(fn, ['(', u'\u1028']) == False - - def test_char_unichar_eq_2(self): - def fn(c1): - return c1 == 'X' - assert self.interpret(fn, [u'(']) == False - assert self.interpret(fn, [u'\u1058']) == False - assert self.interpret(fn, [u'X']) == True - def test_strformat_unicode_arg(self): const = self.const def percentS(s, i): From pypy.commits at gmail.com Sun Sep 4 11:48:45 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Sep 2016 08:48:45 -0700 (PDT) Subject: [pypy-commit] pypy default: Explicitly detect that we found OpenSSL 1.1, and abort with a message Message-ID: <57cc425d.87941c0a.801f7.8e9c@mx.google.com> Author: Armin Rigo Branch: Changeset: r86869:48ca2469fc30 Date: 2016-09-04 17:48 +0200 http://bitbucket.org/pypy/pypy/changeset/48ca2469fc30/ Log: Explicitly detect that we found OpenSSL 1.1, and abort with a message as explicit as possible, hopefully better than an obscure crash soon diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -97,6 +97,21 @@ OPENSSL_VERSION_NUMBER = cconfig["OPENSSL_VERSION_NUMBER"] HAVE_TLSv1_2 = OPENSSL_VERSION_NUMBER >= 0x10001000 +if OPENSSL_VERSION_NUMBER >= 0x10100000: + eci.pre_include_bits = () + eci.post_include_bits = () + raise Exception("""OpenSSL version >= 1.1 not supported yet. + + This program requires OpenSSL version 1.0.x, and may also + work with LibreSSL or OpenSSL 0.9.x. OpenSSL 1.1 is quite + some work to update to; contributions are welcome. Sorry, + you need to install an older version of OpenSSL for now. + Make sure this older version is the one picked up by this + program when it runs the compiler. + + This is the configuration used: %r""" % (eci,)) + + class CConfig: _compilation_info_ = eci From pypy.commits at gmail.com Sun Sep 4 14:11:46 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 04 Sep 2016 11:11:46 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: Explicitly detect that we found OpenSSL 1.1, and abort with a message Message-ID: <57cc63e2.21b0c20a.5d2be.8e11@mx.google.com> Author: Matti Picus Branch: release-5.x Changeset: r86870:050d84dd7899 Date: 2016-09-04 21:10 +0300 http://bitbucket.org/pypy/pypy/changeset/050d84dd7899/ Log: Explicitly detect that we found OpenSSL 1.1, and abort with a message as explicit as possible, hopefully better than an obscure crash soon (grafted from 48ca2469fc30053c39d92b8d4ccef6e82800fb57) diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -97,6 +97,21 @@ OPENSSL_VERSION_NUMBER = cconfig["OPENSSL_VERSION_NUMBER"] HAVE_TLSv1_2 = OPENSSL_VERSION_NUMBER >= 0x10001000 +if OPENSSL_VERSION_NUMBER >= 0x10100000: + eci.pre_include_bits = () + eci.post_include_bits = () + raise Exception("""OpenSSL version >= 1.1 not supported yet. + + This program requires OpenSSL version 1.0.x, and may also + work with LibreSSL or OpenSSL 0.9.x. OpenSSL 1.1 is quite + some work to update to; contributions are welcome. Sorry, + you need to install an older version of OpenSSL for now. + Make sure this older version is the one picked up by this + program when it runs the compiler. + + This is the configuration used: %r""" % (eci,)) + + class CConfig: _compilation_info_ = eci From pypy.commits at gmail.com Sun Sep 4 14:31:55 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 04 Sep 2016 11:31:55 -0700 (PDT) Subject: [pypy-commit] pypy union-side-effects: Add test for constants Message-ID: <57cc689b.434bc20a.b8022.3185@mx.google.com> Author: Ronan Lamy Branch: union-side-effects Changeset: r86871:6cee69266cee Date: 2016-09-04 19:31 +0100 http://bitbucket.org/pypy/pypy/changeset/6cee69266cee/ Log: Add test for constants diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -1,6 +1,6 @@ import pytest -from hypothesis import given +from hypothesis import given, assume, example from hypothesis import strategies as st from rpython.flowspace.model import Variable @@ -143,16 +143,21 @@ def compatible_pair(pair_s): return compatible(*pair_s) -st_float = st.just(SomeFloat()) | st.builds(const_float, st.floats()) -st_int = st.one_of(st.builds(SomeInteger, st.booleans(), st.booleans()), - st.builds(const_int, st.integers())) +st_const_float = st.builds(const_float, st.floats()) +st_float = st.just(SomeFloat()) | st_const_float +st_const_int = st.builds(const_int, st.integers()) +st_int = st.builds(SomeInteger, st.booleans(), st.booleans()) | st_const_int +st_const_bool = st.sampled_from([s_True, s_False]) st_bool = st.sampled_from([s_Bool, s_True, s_False]) st_numeric = st.one_of(st_float, st_int, st_bool) -st_str = (st.builds(SomeString, st.booleans(), st.booleans()) - | st.builds(const_str, st.binary())) +st_const_str = st.builds(const_str, st.binary()) +st_str = st.builds(SomeString, st.booleans(), st.booleans()) | st_const_str +st_const_unicode = st.builds(const_unicode, st.text()) st_unicode = (st.builds(SomeUnicodeString, st.booleans(), st.booleans()) - | st.builds(const_unicode, st.text())) + | st_const_unicode) st_simple = st.one_of(st_numeric, st_str, st_unicode, st.just(s_ImpossibleValue), st.just(s_None)) +st_const = st.one_of(st_const_float, st_const_int, st_const_bool, + st_const_str, st_const_unicode, st.just(s_None)) def valid_unions(st_ann): """From a strategy generating annotations, create a strategy returning @@ -188,6 +193,11 @@ s1, s2, s3 = t assert union(union(s1, s2), s3) == union(s1, union(s2, s3)) + at given(s_const=st_const, s_obj=st_annotation) +def test_constants_are_atoms(s_const, s_obj): + assume(s_const.contains(s_obj)) + assert s_const == s_obj or s_obj == s_ImpossibleValue + def compile_function(function, annotation=[]): t = TranslationContext() From pypy.commits at gmail.com Sun Sep 4 14:34:59 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 04 Sep 2016 11:34:59 -0700 (PDT) Subject: [pypy-commit] pypy union-side-effects: hg merge default Message-ID: <57cc6953.45c8c20a.4d79b.2fc4@mx.google.com> Author: Ronan Lamy Branch: union-side-effects Changeset: r86872:be656798450c Date: 2016-09-04 19:34 +0100 http://bitbucket.org/pypy/pypy/changeset/be656798450c/ Log: hg merge default diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.8.0 +Version: 1.8.1 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.8.0" -__version_info__ = (1, 8, 0) +__version__ = "1.8.1" +__version_info__ = (1, 8, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -1,4 +1,20 @@ #define _CFFI_ + +/* We try to define Py_LIMITED_API before including Python.h. + + Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and + Py_REF_DEBUG are not defined. This is a best-effort approximation: + we can learn about Py_DEBUG from pyconfig.h, but it is unclear if + the same works for the other two macros. Py_DEBUG implies them, + but not the other way around. +*/ +#ifndef _CFFI_USE_EMBEDDING +# include +# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) +# define Py_LIMITED_API +# endif +#endif + #include #ifdef __cplusplus extern "C" { diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.8.0" + "\ncompiled with cffi version: 1.8.1" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -652,7 +652,7 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0, target=None): + def compile(self, tmpdir='.', verbose=0, target=None, debug=None): """The 'target' argument gives the final file name of the compiled DLL. Use '*' to force distutils' choice, suitable for regular CPython C API modules. Use a file name ending in '.*' @@ -669,7 +669,7 @@ module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, target=target, source_extension=source_extension, - compiler_verbose=verbose, **kwds) + compiler_verbose=verbose, debug=debug, **kwds) def init_once(self, func, tag): # Read _init_once_cache[tag], which is either (False, lock) if diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -997,29 +997,43 @@ assert onerror is None # XXX not implemented return BType(source, error) + _weakref_cache_ref = None + def gcp(self, cdata, destructor): - BType = self.typeof(cdata) + if self._weakref_cache_ref is None: + import weakref + class MyRef(weakref.ref): + def __eq__(self, other): + myref = self() + return self is other or ( + myref is not None and myref is other()) + def __ne__(self, other): + return not (self == other) + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self()) + return self._hash + self._weakref_cache_ref = {}, MyRef + weak_cache, MyRef = self._weakref_cache_ref if destructor is None: - if not (hasattr(BType, '_gcp_type') and - BType._gcp_type is BType): + try: + del weak_cache[MyRef(cdata)] + except KeyError: raise TypeError("Can remove destructor only on a object " "previously returned by ffi.gc()") - cdata._destructor = None return None - try: - gcp_type = BType._gcp_type - except AttributeError: - class CTypesDataGcp(BType): - __slots__ = ['_orig', '_destructor'] - def __del__(self): - if self._destructor is not None: - self._destructor(self._orig) - gcp_type = BType._gcp_type = CTypesDataGcp - new_cdata = self.cast(gcp_type, cdata) - new_cdata._orig = cdata - new_cdata._destructor = destructor + def remove(k): + cdata, destructor = weak_cache.pop(k, (None, None)) + if destructor is not None: + destructor(cdata) + + new_cdata = self.cast(self.typeof(cdata), cdata) + assert new_cdata is not cdata + weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor) return new_cdata typeof = type diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -21,12 +21,12 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, debug=None): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, debug) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +36,7 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _build(tmpdir, ext, compiler_verbose=0, debug=None): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -44,6 +44,9 @@ dist = Distribution({'ext_modules': [ext]}) dist.parse_config_files() options = dist.get_option_dict('build_ext') + if debug is None: + debug = sys.flags.debug + options['debug'] = ('ffiplatform', debug) options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) options['build_temp'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -275,8 +275,8 @@ def write_c_source_to_f(self, f, preamble): self._f = f prnt = self._prnt - if self.ffi._embedding is None: - prnt('#define Py_LIMITED_API') + if self.ffi._embedding is not None: + prnt('#define _CFFI_USE_EMBEDDING') # # first the '#include' (actually done by inlining the file's content) lines = self._rel_readlines('_cffi_include.h') @@ -1431,7 +1431,7 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, target=None, **kwds): + compiler_verbose=1, target=None, debug=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: @@ -1467,7 +1467,8 @@ if target != '*': _patch_for_target(patchlist, target) os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, + compiler_verbose, debug) finally: os.chdir(cwd) _unpatch_meths(patchlist) diff --git a/lib_pypy/cffi/setuptools_ext.py b/lib_pypy/cffi/setuptools_ext.py --- a/lib_pypy/cffi/setuptools_ext.py +++ b/lib_pypy/cffi/setuptools_ext.py @@ -69,16 +69,36 @@ else: _add_c_module(dist, ffi, module_name, source, source_extension, kwds) +def _set_py_limited_api(Extension, kwds): + """ + Add py_limited_api to kwds if setuptools >= 26 is in use. + Do not alter the setting if it already exists. + Setuptools takes care of ignoring the flag on Python 2 and PyPy. + """ + if 'py_limited_api' not in kwds: + import setuptools + try: + setuptools_major_version = int(setuptools.__version__.partition('.')[0]) + if setuptools_major_version >= 26: + kwds['py_limited_api'] = True + except ValueError: # certain development versions of setuptools + # If we don't know the version number of setuptools, we + # try to set 'py_limited_api' anyway. At worst, we get a + # warning. + kwds['py_limited_api'] = True + return kwds def _add_c_module(dist, ffi, module_name, source, source_extension, kwds): from distutils.core import Extension - from distutils.command.build_ext import build_ext + # We are a setuptools extension. Need this build_ext for py_limited_api. + from setuptools.command.build_ext import build_ext from distutils.dir_util import mkpath from distutils import log from cffi import recompiler allsources = ['$PLACEHOLDER'] allsources.extend(kwds.pop('sources', [])) + kwds = _set_py_limited_api(Extension, kwds) ext = Extension(name=module_name, sources=allsources, **kwds) def make_mod(tmpdir, pre_run=None): diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.8.0" +VERSION = "1.8.1" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.8.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.8.1", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "5.4.1-alpha0" -#define PYPY_VERSION_NUM 0x05040100 +#define PYPY_VERSION "5.5.0-alpha0" +#define PYPY_VERSION_NUM 0x05050000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -32,9 +32,11 @@ assert module.py_minor_version == sys.version_info.minor assert module.py_micro_version == sys.version_info.micro - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') + #@pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') def test_pypy_versions(self): import sys + if '__pypy__' not in sys.builtin_module_names: + py.test.skip("pypy only test") init = """ if (Py_IsInitialized()) { PyObject *m = Py_InitModule("foo", NULL); diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (5, 4, 1, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 5, 0, "alpha", 0) #XXX # sync patchlevel.h import pypy diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1479,6 +1479,7 @@ assert p1[0] == 123 seen.append(1) q = ffi.gc(p, destructor) + assert ffi.typeof(q) is ffi.typeof(p) import gc; gc.collect() assert seen == [] del q diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py @@ -149,3 +149,28 @@ p = snip_setuptools_verify2.C.getpwuid(0) assert snip_setuptools_verify2.ffi.string(p.pw_name) == b"root" ''') + + def test_set_py_limited_api(self): + from cffi.setuptools_ext import _set_py_limited_api + try: + import setuptools + except ImportError as e: + py.test.skip(str(e)) + orig_version = setuptools.__version__ + try: + setuptools.__version__ = '26.0.0' + from setuptools import Extension + + kwds = _set_py_limited_api(Extension, {}) + assert kwds['py_limited_api'] == True + + setuptools.__version__ = '25.0' + kwds = _set_py_limited_api(Extension, {}) + assert not kwds + + setuptools.__version__ = 'development' + kwds = _set_py_limited_api(Extension, {}) + assert kwds['py_limited_api'] == True + + finally: + setuptools.__version__ = orig_version diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1975,9 +1975,9 @@ def test_function_returns_partial_struct(): ffi = FFI() - ffi.cdef("struct a { int a; ...; }; struct a f1(int);") + ffi.cdef("struct aaa { int a; ...; }; struct aaa f1(int);") lib = verify(ffi, "test_function_returns_partial_struct", """ - struct a { int b, a, c; }; - static struct a f1(int x) { struct a s = {0}; s.a = x; return s; } + struct aaa { int b, a, c; }; + static struct aaa f1(int x) { struct aaa s = {0}; s.a = x; return s; } """) assert lib.f1(52).a == 52 diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -97,6 +97,21 @@ OPENSSL_VERSION_NUMBER = cconfig["OPENSSL_VERSION_NUMBER"] HAVE_TLSv1_2 = OPENSSL_VERSION_NUMBER >= 0x10001000 +if OPENSSL_VERSION_NUMBER >= 0x10100000: + eci.pre_include_bits = () + eci.post_include_bits = () + raise Exception("""OpenSSL version >= 1.1 not supported yet. + + This program requires OpenSSL version 1.0.x, and may also + work with LibreSSL or OpenSSL 0.9.x. OpenSSL 1.1 is quite + some work to update to; contributions are welcome. Sorry, + you need to install an older version of OpenSSL for now. + Make sure this older version is the one picked up by this + program when it runs the compiler. + + This is the configuration used: %r""" % (eci,)) + + class CConfig: _compilation_info_ = eci diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -1,5 +1,5 @@ import sys -from rpython.rlib.objectmodel import specialize, we_are_translated +from rpython.rlib.objectmodel import specialize, we_are_translated, enforceargs from rpython.rlib.rstring import StringBuilder, UnicodeBuilder from rpython.rlib.rarithmetic import r_uint, intmask, widen from rpython.rlib.unicodedata import unicodedb @@ -145,19 +145,21 @@ _invalid_byte_3_of_4 = _invalid_cont_byte _invalid_byte_4_of_4 = _invalid_cont_byte - at specialize.arg(2) + at enforceargs(allow_surrogates=bool) def _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): return (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0) # surrogates shouldn't be valid UTF-8! - or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f)) + or (ordch1 == 0xed and ordch2 > 0x9f and not allow_surrogates)) def _invalid_byte_2_of_4(ordch1, ordch2): return (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)) - at specialize.arg(5) +# note: this specialize() is here for rtyper/rstr.py, which calls this +# function too but with its own fixed errorhandler + at specialize.arg_or_var(4) def str_decode_utf_8_impl(s, size, errors, final, errorhandler, allow_surrogates, result): if size == 0: @@ -330,6 +332,9 @@ return unicode_encode_utf_8_impl(s, size, errors, errorhandler, allow_surrogates=allow_surrogates) +# note: this specialize() is here for rtyper/rstr.py, which calls this +# function too but with its own fixed errorhandler + at specialize.arg_or_var(3) def unicode_encode_utf_8_impl(s, size, errors, errorhandler, allow_surrogates=False): assert(size >= 0) diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -55,7 +55,7 @@ s = s.encode(encoding) except LookupError as e: py.test.skip(e) - result, consumed = decoder(s, len(s), True) + result, consumed = decoder(s, len(s), 'strict', final=True) assert consumed == len(s) self.typeequals(trueresult, result) @@ -69,7 +69,7 @@ s = s.decode(encoding) except LookupError as e: py.test.skip(e) - result = encoder(s, len(s), True) + result = encoder(s, len(s), 'strict') self.typeequals(trueresult, result) def checkencodeerror(self, s, encoding, start, stop): @@ -823,9 +823,15 @@ def f(x): s1 = "".join(["\xd7\x90\xd6\x96\xeb\x96\x95\xf0\x90\x91\x93"] * x) - u, consumed = runicode.str_decode_utf_8(s1, len(s1), True) - s2 = runicode.unicode_encode_utf_8(u, len(u), True) - return s1 == s2 + u, consumed = runicode.str_decode_utf_8(s1, len(s1), 'strict', + allow_surrogates=True) + s2 = runicode.unicode_encode_utf_8(u, len(u), 'strict', + allow_surrogates=True) + u3, consumed3 = runicode.str_decode_utf_8(s1, len(s1), 'strict', + allow_surrogates=False) + s3 = runicode.unicode_encode_utf_8(u3, len(u3), 'strict', + allow_surrogates=False) + return s1 == s2 == s3 res = interpret(f, [2]) assert res diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -540,7 +540,7 @@ def ll_ullong_py_mod_zer(x, y): if y == 0: raise ZeroDivisionError - return llop.ullong_mod(UnsignedLongLong, x, y) + return ll_ullong_py_mod(x, y) @jit.dont_look_inside def ll_lllong_py_mod(x, y): diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -30,12 +30,13 @@ assert value is not None result = UnicodeBuilder(len(value)) self.rstr_decode_utf_8( - value, len(value), 'strict', final=False, + value, len(value), 'strict', final=True, errorhandler=self.ll_raise_unicode_exception_decode, allow_surrogates=False, result=result) return self.ll.llunicode(result.build()) - def ll_raise_unicode_exception_decode(self, errors, encoding, msg, s, + @staticmethod + def ll_raise_unicode_exception_decode(errors, encoding, msg, s, startingpos, endingpos): raise UnicodeDecodeError(encoding, s, startingpos, endingpos, msg) @@ -411,7 +412,8 @@ allow_surrogates=False) return self.ll.llstr(bytes) - def ll_raise_unicode_exception_encode(self, errors, encoding, msg, u, + @staticmethod + def ll_raise_unicode_exception_encode(errors, encoding, msg, u, startingpos, endingpos): raise UnicodeEncodeError(encoding, u, startingpos, endingpos, msg) diff --git a/rpython/rtyper/test/test_runicode.py b/rpython/rtyper/test/test_runicode.py --- a/rpython/rtyper/test/test_runicode.py +++ b/rpython/rtyper/test/test_runicode.py @@ -162,6 +162,18 @@ assert self.ll_to_string(self.interpret(f, [0])) == f(0) + def test_unicode_decode_final(self): + strings = ['\xc3', ''] + def f(n): + try: + strings[n].decode('utf-8') + except UnicodeDecodeError: + return True + return False + + assert f(0) + assert self.interpret(f, [0]) + def test_utf_8_decoding_annotation(self): from rpython.rlib.runicode import str_decode_utf_8 def errorhandler(errors, encoding, msg, s, From pypy.commits at gmail.com Sun Sep 4 19:17:10 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 04 Sep 2016 16:17:10 -0700 (PDT) Subject: [pypy-commit] pypy union-side-effects: Explicitly forbid comparisons between str and unicode Message-ID: <57ccab76.2146c20a.7f80b.6e31@mx.google.com> Author: Ronan Lamy Branch: union-side-effects Changeset: r86873:e1f64f1f25cc Date: 2016-09-05 00:16 +0100 http://bitbucket.org/pypy/pypy/changeset/e1f64f1f25cc/ Log: Explicitly forbid comparisons between str and unicode diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -641,6 +641,20 @@ result.const = str1.const + str2.const return result +for cmp_op in [op.lt, op.le, op.eq, op.ne, op.gt, op.ge]: + @cmp_op.register(SomeUnicodeString, SomeString) + @cmp_op.register(SomeUnicodeString, SomeChar) + @cmp_op.register(SomeString, SomeUnicodeString) + @cmp_op.register(SomeChar, SomeUnicodeString) + @cmp_op.register(SomeUnicodeCodePoint, SomeString) + @cmp_op.register(SomeUnicodeCodePoint, SomeChar) + @cmp_op.register(SomeString, SomeUnicodeCodePoint) + @cmp_op.register(SomeChar, SomeUnicodeCodePoint) + def cmp_str_unicode(annotator, v1, v2): + raise AnnotatorError( + "Comparing byte strings with unicode strings is not RPython") + + class __extend__(pairtype(SomeInteger, SomeList)): def mul((int1, lst2)): diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -1367,7 +1367,7 @@ oc = ord(ch) # Escape quotes - if quotes and (oc == quote or ch == '\\'): + if quotes and (oc == quote or ch == u'\\'): result.append(STR('\\')) result.append(CHR(oc)) pos += 1 @@ -1390,13 +1390,13 @@ pos -= 1 # Map special whitespace to '\t', \n', '\r' - if ch == '\t': + if ch == u'\t': result.append(STR('\\t')) - elif ch == '\n': + elif ch == u'\n': result.append(STR('\\n')) - elif ch == '\r': + elif ch == u'\r': result.append(STR('\\r')) - elif ch == '\\': + elif ch == u'\\': result.append(STR('\\\\')) # Map non-printable or non-ascii to '\xhh' or '\uhhhh' From pypy.commits at gmail.com Sun Sep 4 22:25:01 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 04 Sep 2016 19:25:01 -0700 (PDT) Subject: [pypy-commit] pypy union-side-effects: Fix str vs unicode correctness issues Message-ID: <57ccd77d.c3f0c20a.54624.a917@mx.google.com> Author: Ronan Lamy Branch: union-side-effects Changeset: r86874:350957bdef41 Date: 2016-09-05 03:24 +0100 http://bitbucket.org/pypy/pypy/changeset/350957bdef41/ Log: Fix str vs unicode correctness issues diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -231,16 +231,16 @@ while True: # Fast path for non-control chars. The loop always ends # since the Py_UNICODE storage is NUL-terminated. - while i < size and line[start + i] > '\r': + while i < size and line[start + i] > u'\r': i += 1 if i >= size: return -1, size ch = line[start + i] i += 1 - if ch == '\n': + if ch == u'\n': return i, 0 - if ch == '\r': - if line[start + i] == '\n': + if ch == u'\r': + if line[start + i] == u'\n': return i + 1, 0 else: return i, 0 diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -184,11 +184,11 @@ except IndexError: space = self.space raise oefmt(space.w_ValueError, "incomplete format key") - if c == ')': + if c == const(')'): pcount -= 1 if pcount == 0: break - elif c == '(': + elif c == const('('): pcount += 1 i += 1 self.fmtpos = i + 1 # first character after ')' @@ -203,7 +203,7 @@ return space.getitem(self.w_valuedict, w_key) def parse_fmt(self): - if self.peekchr() == '(': + if self.peekchr() == const('('): w_value = self.getmappingvalue(self.getmappingkey()) else: w_value = None @@ -216,7 +216,7 @@ self.f_ljust = True self.width = -self.width - if self.peekchr() == '.': + if self.peekchr() == const('.'): self.forward() self.prec = self.peel_num('prec', INT_MAX) if self.prec < 0: @@ -225,7 +225,7 @@ self.prec = -1 c = self.peekchr() - if c == 'h' or c == 'l' or c == 'L': + if c == const('h') or c == const('l') or c == const('L'): self.forward() return w_value @@ -240,15 +240,15 @@ self.f_zero = False while True: c = self.peekchr() - if c == '-': + if c == const('-'): self.f_ljust = True - elif c == '+': + elif c == const('+'): self.f_sign = True - elif c == ' ': + elif c == const(' '): self.f_blank = True - elif c == '#': + elif c == const('#'): self.f_alt = True - elif c == '0': + elif c == const('0'): self.f_zero = True else: break @@ -259,7 +259,7 @@ def peel_num(self, name, maxval): space = self.space c = self.peekchr() - if c == '*': + if c == const('*'): self.forward() w_value = self.nextinputvalue() if name == 'width': @@ -293,7 +293,7 @@ fmt = self.fmt i = i0 = self.fmtpos while i < len(fmt): - if fmt[i] == '%': + if fmt[i] == const('%'): break i += 1 else: @@ -306,7 +306,7 @@ w_value = self.parse_fmt() c = self.peekchr() self.forward() - if c == '%': + if c == const('%'): self.std_wp(const('%')) continue if w_value is None: @@ -315,7 +315,7 @@ # dispatch on the formatter # (this turns into a switch after translation) for c1 in FORMATTER_CHARS: - if c == c1: + if c == const(c1): # 'c1' is an annotation constant here, # so this getattr() is ok do_fmt = getattr(self, 'fmt_' + c1) diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -45,6 +45,7 @@ def make_template_formatting_class(for_unicode): + STR = unicode if for_unicode else str class TemplateFormatter(object): is_unicode = for_unicode @@ -90,19 +91,19 @@ while i < end: c = s[i] i += 1 - if c == "{" or c == "}": + if c == STR("{") or c == STR("}"): at_end = i == end # Find escaped "{" and "}" markup_follows = True - if c == "}": - if at_end or s[i] != "}": + if c == STR("}"): + if at_end or s[i] != STR("}"): raise oefmt(space.w_ValueError, "Single '}'") i += 1 markup_follows = False - if c == "{": + if c == STR("{"): if at_end: raise oefmt(space.w_ValueError, "Single '{'") - if s[i] == "{": + if s[i] == STR("{"): i += 1 markup_follows = False # Attach literal data, ending with { or } @@ -124,10 +125,10 @@ recursive = False while i < end: c = s[i] - if c == "{": + if c == STR("{"): recursive = True nested += 1 - elif c == "}": + elif c == STR("}"): nested -= 1 if not nested: break @@ -150,9 +151,9 @@ i = start while i < end: c = s[i] - if c == ":" or c == "!": + if c == STR(":") or c == STR("!"): end_name = i - if c == "!": + if c == STR("!"): i += 1 if i == end: raise oefmt(self.space.w_ValueError, @@ -160,7 +161,7 @@ conversion = s[i] i += 1 if i < end: - if s[i] != ':': + if s[i] != STR(':'): raise oefmt(self.space.w_ValueError, "expected ':' after format " "specifier") @@ -180,7 +181,7 @@ end = len(name) while i < end: c = name[i] - if c == "[" or c == ".": + if c == STR("[") or c == STR("."): break i += 1 empty = not i @@ -232,12 +233,12 @@ i = start while i < end: c = name[i] - if c == ".": + if c == STR("."): i += 1 start = i while i < end: c = name[i] - if c == "[" or c == ".": + if c == STR("[") or c == STR("."): break i += 1 if start == i: @@ -249,13 +250,13 @@ else: self.parser_list_w.append(space.newtuple([ space.w_True, w_attr])) - elif c == "[": + elif c == STR("["): got_bracket = False i += 1 start = i while i < end: c = name[i] - if c == "]": + if c == STR("]"): got_bracket = True break i += 1 @@ -284,7 +285,7 @@ end = len(name) while i < end: c = name[i] - if c == "[" or c == ".": + if c == STR("[") or c == STR("."): break i += 1 if i == 0: @@ -307,9 +308,9 @@ def _convert(self, w_obj, conversion): space = self.space conv = conversion[0] - if conv == "r": + if conv == STR("r"): return space.repr(w_obj) - elif conv == "s": + elif conv == STR("s"): if self.is_unicode: return space.call_function(space.w_unicode, w_obj) return space.str(w_obj) @@ -400,6 +401,7 @@ LONG_DIGITS = string.digits + string.ascii_lowercase def make_formatting_class(for_unicode): + _lit = unicode if for_unicode else str class Formatter(BaseFormatter): """__format__ implementation for builtin types.""" @@ -412,22 +414,22 @@ self.spec = spec def _is_alignment(self, c): - return (c == "<" or - c == ">" or - c == "=" or - c == "^") + return (c == _lit("<") or + c == _lit(">") or + c == _lit("=") or + c == _lit("^")) def _is_sign(self, c): - return (c == " " or - c == "+" or - c == "-") + return (c == _lit(" ") or + c == _lit("+") or + c == _lit("-")) def _parse_spec(self, default_type, default_align): space = self.space self._fill_char = self._lit(" ")[0] self._align = default_align self._alternate = False - self._sign = "\0" + self._sign = _lit("\0") self._thousands_sep = False self._precision = -1 the_type = default_type @@ -451,19 +453,19 @@ if length - i >= 1 and self._is_sign(spec[i]): self._sign = spec[i] i += 1 - if length - i >= 1 and spec[i] == "#": + if length - i >= 1 and spec[i] == _lit("#"): self._alternate = True i += 1 - if not got_fill_char and length - i >= 1 and spec[i] == "0": + if not got_fill_char and length - i >= 1 and spec[i] == _lit("0"): self._fill_char = self._lit("0")[0] if not got_align: - self._align = "=" + self._align = _lit("=") i += 1 self._width, i = _parse_int(self.space, spec, i, length) - if length != i and spec[i] == ",": + if length != i and spec[i] == _lit(","): self._thousands_sep = True i += 1 - if length != i and spec[i] == ".": + if length != i and spec[i] == _lit("."): i += 1 self._precision, i = _parse_int(self.space, spec, i, length) if self._precision == -1: @@ -471,28 +473,20 @@ if length - i > 1: raise oefmt(space.w_ValueError, "invalid format spec") if length - i == 1: - presentation_type = spec[i] - if self.is_unicode: - try: - the_type = spec[i].encode("ascii")[0] - except UnicodeEncodeError: - raise oefmt(space.w_ValueError, - "invalid presentation type") - else: - the_type = presentation_type + the_type = spec[i] i += 1 self._type = the_type if self._thousands_sep: tp = self._type - if (tp == "d" or - tp == "e" or - tp == "f" or - tp == "g" or - tp == "E" or - tp == "G" or - tp == "%" or - tp == "F" or - tp == "\0"): + if (tp == _lit("d") or + tp == _lit("e") or + tp == _lit("f") or + tp == _lit("g") or + tp == _lit("E") or + tp == _lit("G") or + tp == _lit("%") or + tp == _lit("F") or + tp == _lit("\0")): # ok pass else: @@ -506,11 +500,11 @@ else: total = length align = self._align - if align == ">": + if align == _lit(">"): left = total - length - elif align == "^": + elif align == _lit("^"): left = (total - length) / 2 - elif align == "<" or align == "=": + elif align == _lit("<") or align == _lit("="): left = 0 else: raise AssertionError("shouldn't be here") @@ -539,23 +533,24 @@ return rstring.StringBuilder() def _unknown_presentation(self, tp): + spec = self._type.encode('ascii') if for_unicode else self._type raise oefmt(self.space.w_ValueError, - "unknown presentation for %s: '%s'", tp, self._type) + "unknown presentation for %s: '%s'", tp, spec) def format_string(self, string): space = self.space - if self._parse_spec("s", "<"): + if self._parse_spec(_lit("s"), _lit("<")): return space.wrap(string) - if self._type != "s": + if self._type != _lit("s"): self._unknown_presentation("string") - if self._sign != "\0": + if self._sign != _lit("\0"): raise oefmt(space.w_ValueError, "Sign not allowed in string format specifier") if self._alternate: raise oefmt(space.w_ValueError, "Alternate form (#) not allowed in string format " "specifier") - if self._align == "=": + if self._align == _lit("="): raise oefmt(space.w_ValueError, "'=' alignment not allowed in string format " "specifier") @@ -760,8 +755,8 @@ "precision not allowed in integer type") sign_char = "\0" tp = self._type - if tp == "c": - if self._sign != "\0": + if tp == _lit("c"): + if self._sign != _lit("\0"): raise oefmt(space.w_ValueError, "sign not allowed with 'c' presentation type") value = space.int_w(w_num) @@ -776,16 +771,16 @@ to_prefix = 0 to_numeric = 0 else: - if tp == "b": + if tp == _lit("b"): base = 2 skip_leading = 2 - elif tp == "o": + elif tp == _lit("o"): base = 8 skip_leading = 2 - elif tp == "x" or tp == "X": + elif tp == _lit("x") or tp == _lit("X"): base = 16 skip_leading = 2 - elif tp == "n" or tp == "d": + elif tp == _lit("n") or tp == _lit("d"): base = 10 skip_leading = 0 else: @@ -808,7 +803,7 @@ spec = self._calc_num_width(n_prefix, sign_char, to_numeric, n_digits, n_remainder, False, result) fill = self._fill_char - upper = self._type == "X" + upper = self._type == _lit("X") return self.space.wrap(self._fill_number(spec, result, to_numeric, to_prefix, fill, to_remainder, upper)) @@ -874,7 +869,7 @@ def format_int_or_long(self, w_num, kind): space = self.space - if self._parse_spec("d", ">"): + if self._parse_spec(_lit("d"), _lit(">")): if self.is_unicode: return space.call_function(space.w_unicode, w_num) return self.space.str(w_num) @@ -922,15 +917,15 @@ "Alternate form (#) not allowed in float formats") tp = self._type self._get_locale(tp) - if tp == "\0": - tp = "g" + if tp == _lit("\0"): + tp = _lit("g") default_precision = 12 flags |= rfloat.DTSF_ADD_DOT_0 - elif tp == "n": - tp = "g" + elif tp == _lit("n"): + tp = _lit("g") value = space.float_w(w_float) - if tp == "%": - tp = "f" + if tp == _lit("%"): + tp = _lit("f") value *= 100 add_pct = True else: @@ -963,7 +958,7 @@ def format_float(self, w_float): space = self.space - if self._parse_spec("\0", ">"): + if self._parse_spec(_lit("\0"), _lit(">")): if self.is_unicode: return space.call_function(space.w_unicode, w_float) return space.str(w_float) @@ -1002,9 +997,9 @@ "specifier") skip_re = 0 add_parens = 0 - if tp == "\0": + if tp == _lit("\0"): #should mirror str() output - tp = "g" + tp = _lit("g") default_precision = 12 #test if real part is non-zero if (w_complex.realval == 0 and @@ -1013,9 +1008,9 @@ else: add_parens = 1 - if tp == "n": + if tp == _lit("n"): #same as 'g' except for locale, taken care of later - tp = "g" + tp = _lit("g") #check if precision not set if self._precision == -1: @@ -1073,7 +1068,7 @@ #self._grouped_digits will get overwritten in imaginary calc_num_width re_grouped_digits = self._grouped_digits if not skip_re: - self._sign = "+" + self._sign = _lit("+") im_spec = self._calc_num_width(0, im_sign, to_imag_number, n_im_digits, im_n_remainder, im_have_dec, im_num) @@ -1127,7 +1122,7 @@ """return the string representation of a complex number""" space = self.space #parse format specification, set associated variables - if self._parse_spec("\0", ">"): + if self._parse_spec(_lit("\0"), _lit(">")): return space.str(w_complex) tp = self._type if (tp == "\0" or @@ -1148,9 +1143,5 @@ @specialize.arg(2) def run_formatter(space, w_format_spec, meth, *args): - if space.isinstance_w(w_format_spec, space.w_unicode): - formatter = unicode_formatter(space, space.unicode_w(w_format_spec)) - return getattr(formatter, meth)(*args) - else: - formatter = str_formatter(space, space.str_w(w_format_spec)) - return getattr(formatter, meth)(*args) + formatter = str_formatter(space, space.str_w(w_format_spec)) + return getattr(formatter, meth)(*args) diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -220,7 +220,7 @@ offset = len(token) while 1: - if token[offset-1] == "\n" or token[offset-1] == "\r": + if token[offset-1] == self._chr("\n") or token[offset-1] == self._chr("\r"): break distance += 1 offset -= 1 @@ -598,7 +598,8 @@ eol = pos pos += 1 # read CRLF as one line break - if pos < length and value[eol] == '\r' and value[pos] == '\n': + if (pos < length and value[eol] == self._chr('\r') + and value[pos] == self._chr('\n')): pos += 1 if keepends: eol = pos @@ -780,7 +781,8 @@ return self._new(selfval) builder = self._builder(width) - if len(selfval) > 0 and (selfval[0] == '+' or selfval[0] == '-'): + if len(selfval) > 0 and ( + selfval[0] == self._chr('+') or selfval[0] == self._chr('-')): # copy sign to first position builder.append(selfval[0]) start = 1 From pypy.commits at gmail.com Mon Sep 5 03:29:26 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 05 Sep 2016 00:29:26 -0700 (PDT) Subject: [pypy-commit] pypy redirect-assembler-jitlog: translation issues Message-ID: <57cd1ed6.68adc20a.966c2.f187@mx.google.com> Author: Richard Plangger Branch: redirect-assembler-jitlog Changeset: r86875:adb14149ca02 Date: 2016-09-05 08:34 +0200 http://bitbucket.org/pypy/pypy/changeset/adb14149ca02/ Log: translation issues diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1123,8 +1123,8 @@ """ jitcell_token = make_jitcell_token(jitdriver_sd) # - logger = jitdriver_sd.metainterp_sd.jitlog - jitcell_token.number = logger.next_id() + #logger = jitdriver_sd.metainterp_sd.jitlog + #jitcell_token.number = logger.next_id() jl.tmp_callback(jitcell_token) # nb_red_args = jitdriver_sd.num_red_args diff --git a/rpython/rlib/rjitlog/rjitlog.py b/rpython/rlib/rjitlog/rjitlog.py --- a/rpython/rlib/rjitlog/rjitlog.py +++ b/rpython/rlib/rjitlog/rjitlog.py @@ -255,12 +255,13 @@ if __name__ == "__main__": print("# generated constants from rpython/rlib/jitlog.py") - print 'MARK_JITLOG_START = struct.pack("b", "%s")' % hex(0x10) + print('import struct') + print('MARK_JITLOG_START = struct.pack("b", %s)' % hex(0x10)) for mark, in marks: nmr = globals()['MARK_' + mark] h = hex(ord(nmr)) - print '%s = struct.pack("b", "%s")' % ('MARK_' + mark, h) - print 'MARK_JITLOG_END = struct.pack("b", "%s")' % hex(start) + print '%s = struct.pack("b", %s)' % ('MARK_' + mark, h) + print 'MARK_JITLOG_END = struct.pack("b", %s)' % hex(start) for key,value in locals().items(): if key.startswith("MP_"): print '%s = (%s,"%s")' % (key, hex(value[0]), value[1]) @@ -323,10 +324,10 @@ def tmp_callback(looptoken): mark_tmp_callback = ''.join([ - jl.MARK_TMP_CALLBACK, + MARK_TMP_CALLBACK, encode_le_addr(compute_unique_id(looptoken)), encode_le_64bit(looptoken.number)]) - jl.jitlog_write_marked(mark_tmp_callback, len(mark_tmp_callback)) + jitlog_write_marked(mark_tmp_callback, len(mark_tmp_callback)) class JitLogger(object): def __init__(self, cpu=None): From pypy.commits at gmail.com Mon Sep 5 03:29:31 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 05 Sep 2016 00:29:31 -0700 (PDT) Subject: [pypy-commit] pypy ppc-vsx-support: merge default Message-ID: <57cd1edb.121a1c0a.10647.88f5@mx.google.com> Author: Richard Plangger Branch: ppc-vsx-support Changeset: r86876:ab4c074e8f5f Date: 2016-09-05 08:35 +0200 http://bitbucket.org/pypy/pypy/changeset/ab4c074e8f5f/ Log: merge default diff too long, truncating to 2000 out of 213218 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -27,3 +27,6 @@ 40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2 c09c19272c990a0611b17569a0085ad1ab00c8ff release-pypy2.7-v5.3 7e8df3df96417c16c2d55b41352ec82c9c69c978 release-pypy2.7-v5.3.1 +68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 +68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 +77392ad263504df011ccfcabf6a62e21d04086d0 release-pypy2.7-v5.4.0 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -74,6 +74,7 @@ Seo Sanghyeon Ronny Pfannschmidt Justin Peel + Raffael Tfirst David Edelsohn Anders Hammarquist Jakub Gustak @@ -117,7 +118,6 @@ Wenzhu Man John Witulski Laurence Tratt - Raffael Tfirst Ivan Sichmann Freitas Greg Price Dario Bertini @@ -141,6 +141,7 @@ tav Taavi Burns Georg Brandl + Nicolas Truessel Bert Freudenberg Stian Andreassen Wanja Saatkamp @@ -211,6 +212,7 @@ Vaibhav Sood Alan McIntyre Alexander Sedov + p_zieschang at yahoo.de Attila Gobi Jasper.Schulz Christopher Pope @@ -221,6 +223,7 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + touilleMan Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -229,12 +232,14 @@ Gabriel Lukas Vacek Kunal Grover + Aaron Gallagher Andrew Dalke Sylvain Thenault Jakub Stasiak Nathan Taylor Vladimir Kryachko Omer Katz + Mark Williams Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -355,12 +360,15 @@ yasirs Michael Chermside Anna Ravencroft + pizi Andrey Churin Dan Crosta + Eli Stevens Tobias Diaz Julien Phalip Roman Podoliaka Dan Loewenherz + werat Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden diff --git a/_pytest/python.py b/_pytest/python.py --- a/_pytest/python.py +++ b/_pytest/python.py @@ -498,7 +498,10 @@ """ Collector for test methods. """ def collect(self): if hasinit(self.obj): - pytest.skip("class %s.%s with __init__ won't get collected" % ( + # XXX used to be skip(), but silently skipping classes + # XXX just because they have been written long ago is + # XXX imho a very, very, very bad idea + pytest.fail("class %s.%s with __init__ won't get collected" % ( self.obj.__module__, self.obj.__name__, )) diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -122,22 +122,24 @@ """Dummy method to let some easy_install packages that have optional C speedup components. """ + def customize(executable, flags): + command = compiler.executables[executable] + flags + setattr(compiler, executable, command) + if compiler.compiler_type == "unix": compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') if "CPPFLAGS" in os.environ: cppflags = shlex.split(os.environ["CPPFLAGS"]) - compiler.compiler.extend(cppflags) - compiler.compiler_so.extend(cppflags) - compiler.linker_so.extend(cppflags) + for executable in ('compiler', 'compiler_so', 'linker_so'): + customize(executable, cppflags) if "CFLAGS" in os.environ: cflags = shlex.split(os.environ["CFLAGS"]) - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) + for executable in ('compiler', 'compiler_so', 'linker_so'): + customize(executable, cflags) if "LDFLAGS" in os.environ: ldflags = shlex.split(os.environ["LDFLAGS"]) - compiler.linker_so.extend(ldflags) + customize('linker_so', ldflags) from sysconfig_cpython import ( diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -167,7 +167,7 @@ else: return self.value - def __buffer__(self): + def __buffer__(self, flags): return buffer(self._buffer) def _get_b_base(self): diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -342,7 +342,7 @@ thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) keepalives, newargs, argtypes, outargs, errcheckargs = ( self._convert_args(argtypes, args[1:], kwargs)) - newargs.insert(0, thisvalue.value) + newargs.insert(0, thisarg) argtypes.insert(0, c_void_p) else: thisarg = None diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.8.0 +Version: 1.8.1 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.8.0" -__version_info__ = (1, 8, 0) +__version__ = "1.8.1" +__version_info__ = (1, 8, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -1,4 +1,20 @@ #define _CFFI_ + +/* We try to define Py_LIMITED_API before including Python.h. + + Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and + Py_REF_DEBUG are not defined. This is a best-effort approximation: + we can learn about Py_DEBUG from pyconfig.h, but it is unclear if + the same works for the other two macros. Py_DEBUG implies them, + but not the other way around. +*/ +#ifndef _CFFI_USE_EMBEDDING +# include +# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) +# define Py_LIMITED_API +# endif +#endif + #include #ifdef __cplusplus extern "C" { diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.8.0" + "\ncompiled with cffi version: 1.8.1" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -652,7 +652,7 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0, target=None): + def compile(self, tmpdir='.', verbose=0, target=None, debug=None): """The 'target' argument gives the final file name of the compiled DLL. Use '*' to force distutils' choice, suitable for regular CPython C API modules. Use a file name ending in '.*' @@ -669,7 +669,7 @@ module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, target=target, source_extension=source_extension, - compiler_verbose=verbose, **kwds) + compiler_verbose=verbose, debug=debug, **kwds) def init_once(self, func, tag): # Read _init_once_cache[tag], which is either (False, lock) if diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -997,29 +997,43 @@ assert onerror is None # XXX not implemented return BType(source, error) + _weakref_cache_ref = None + def gcp(self, cdata, destructor): - BType = self.typeof(cdata) + if self._weakref_cache_ref is None: + import weakref + class MyRef(weakref.ref): + def __eq__(self, other): + myref = self() + return self is other or ( + myref is not None and myref is other()) + def __ne__(self, other): + return not (self == other) + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self()) + return self._hash + self._weakref_cache_ref = {}, MyRef + weak_cache, MyRef = self._weakref_cache_ref if destructor is None: - if not (hasattr(BType, '_gcp_type') and - BType._gcp_type is BType): + try: + del weak_cache[MyRef(cdata)] + except KeyError: raise TypeError("Can remove destructor only on a object " "previously returned by ffi.gc()") - cdata._destructor = None return None - try: - gcp_type = BType._gcp_type - except AttributeError: - class CTypesDataGcp(BType): - __slots__ = ['_orig', '_destructor'] - def __del__(self): - if self._destructor is not None: - self._destructor(self._orig) - gcp_type = BType._gcp_type = CTypesDataGcp - new_cdata = self.cast(gcp_type, cdata) - new_cdata._orig = cdata - new_cdata._destructor = destructor + def remove(k): + cdata, destructor = weak_cache.pop(k, (None, None)) + if destructor is not None: + destructor(cdata) + + new_cdata = self.cast(self.typeof(cdata), cdata) + assert new_cdata is not cdata + weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor) return new_cdata typeof = type diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -21,12 +21,12 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, debug=None): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, debug) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +36,7 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _build(tmpdir, ext, compiler_verbose=0, debug=None): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -44,6 +44,9 @@ dist = Distribution({'ext_modules': [ext]}) dist.parse_config_files() options = dist.get_option_dict('build_ext') + if debug is None: + debug = sys.flags.debug + options['debug'] = ('ffiplatform', debug) options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) options['build_temp'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -275,8 +275,8 @@ def write_c_source_to_f(self, f, preamble): self._f = f prnt = self._prnt - if self.ffi._embedding is None: - prnt('#define Py_LIMITED_API') + if self.ffi._embedding is not None: + prnt('#define _CFFI_USE_EMBEDDING') # # first the '#include' (actually done by inlining the file's content) lines = self._rel_readlines('_cffi_include.h') @@ -1431,7 +1431,7 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, target=None, **kwds): + compiler_verbose=1, target=None, debug=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: @@ -1467,7 +1467,8 @@ if target != '*': _patch_for_target(patchlist, target) os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, + compiler_verbose, debug) finally: os.chdir(cwd) _unpatch_meths(patchlist) diff --git a/lib_pypy/cffi/setuptools_ext.py b/lib_pypy/cffi/setuptools_ext.py --- a/lib_pypy/cffi/setuptools_ext.py +++ b/lib_pypy/cffi/setuptools_ext.py @@ -69,16 +69,36 @@ else: _add_c_module(dist, ffi, module_name, source, source_extension, kwds) +def _set_py_limited_api(Extension, kwds): + """ + Add py_limited_api to kwds if setuptools >= 26 is in use. + Do not alter the setting if it already exists. + Setuptools takes care of ignoring the flag on Python 2 and PyPy. + """ + if 'py_limited_api' not in kwds: + import setuptools + try: + setuptools_major_version = int(setuptools.__version__.partition('.')[0]) + if setuptools_major_version >= 26: + kwds['py_limited_api'] = True + except ValueError: # certain development versions of setuptools + # If we don't know the version number of setuptools, we + # try to set 'py_limited_api' anyway. At worst, we get a + # warning. + kwds['py_limited_api'] = True + return kwds def _add_c_module(dist, ffi, module_name, source, source_extension, kwds): from distutils.core import Extension - from distutils.command.build_ext import build_ext + # We are a setuptools extension. Need this build_ext for py_limited_api. + from setuptools.command.build_ext import build_ext from distutils.dir_util import mkpath from distutils import log from cffi import recompiler allsources = ['$PLACEHOLDER'] allsources.extend(kwds.pop('sources', [])) + kwds = _set_py_limited_api(Extension, kwds) ext = Extension(name=module_name, sources=allsources, **kwds) def make_mod(tmpdir, pre_run=None): diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -137,6 +137,8 @@ lib.gdbm_sync(self.__ll_dbm) def open(filename, flags='r', mode=0666): + if isinstance(filename, unicode): + filename = filename.encode() if flags[0] == 'r': iflags = lib.GDBM_READER elif flags[0] == 'w': diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -58,16 +58,16 @@ # General information about the project. project = u'PyPy' -copyright = u'2015, The PyPy Project' +copyright = u'2016, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '4.0' +version = '5.4' # The full version, including alpha/beta/rc tags. -release = '4.0.0' +release = '5.4.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -44,6 +44,7 @@ Seo Sanghyeon Ronny Pfannschmidt Justin Peel + Raffael Tfirst David Edelsohn Anders Hammarquist Jakub Gustak @@ -87,7 +88,6 @@ Wenzhu Man John Witulski Laurence Tratt - Raffael Tfirst Ivan Sichmann Freitas Greg Price Dario Bertini @@ -111,6 +111,7 @@ tav Taavi Burns Georg Brandl + Nicolas Truessel Bert Freudenberg Stian Andreassen Wanja Saatkamp @@ -181,6 +182,7 @@ Vaibhav Sood Alan McIntyre Alexander Sedov + p_zieschang at yahoo.de Attila Gobi Jasper.Schulz Christopher Pope @@ -191,6 +193,7 @@ Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + touilleMan Alexis Daboville Jens-Uwe Mager Carl Meyer @@ -199,12 +202,14 @@ Gabriel Lukas Vacek Kunal Grover + Aaron Gallagher Andrew Dalke Sylvain Thenault Jakub Stasiak Nathan Taylor Vladimir Kryachko Omer Katz + Mark Williams Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -325,9 +330,12 @@ yasirs Michael Chermside Anna Ravencroft + pizi Andrey Churin Dan Crosta + Eli Stevens Tobias Diaz Julien Phalip Roman Podoliaka Dan Loewenherz + werat diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-pypy2.7-v5.4.0.rst release-pypy2.7-v5.3.1.rst release-pypy2.7-v5.3.0.rst release-5.1.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-pypy2-5.4.0.rst whatsnew-pypy2-5.3.1.rst whatsnew-pypy2-5.3.0.rst whatsnew-5.1.0.rst diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -57,7 +57,7 @@ -------------- Our cpyext C-API compatiblity layer can now run upstream NumPy unmodified. -Release PyPy2.7-v5.3 still fails about 200 of the ~6000 test in the NumPy +Release PyPy2.7-v5.4 still fails about 60 of the ~6000 test in the NumPy test suite. We could use help analyzing the failures and fixing them either as patches to upstream NumPy, or as fixes to PyPy. diff --git a/pypy/doc/release-pypy2.7-v5.4.0.rst b/pypy/doc/release-pypy2.7-v5.4.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-pypy2.7-v5.4.0.rst @@ -0,0 +1,218 @@ +============ +PyPy2.7 v5.4 +============ + +We have released PyPy2.7 v5.4, a little under two months after PyPy2.7 v5.3. +This new PyPy2.7 release includes incremental improvements to our C-API +compatability layer (cpyext), enabling us to pass over 99% of the upstream +numpy `test suite`_. We updated built-in cffi_ support to version 1.8, +which now supports the "limited API" mode for c-extensions on +CPython >=3.2. + +We improved tooling for the PyPy JIT_, and expanded VMProf +support to OpenBSD and Dragon Fly BSD + +As always, this release fixed many issues and bugs raised by the +growing community of PyPy users. We strongly recommend updating. + +You can download the PyPy2.7 v5.4 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +.. _`test suite`: https://bitbucket.org/pypy/pypy/wiki/Adventures%20in%20cpyext%20compatibility +.. _cffi: https://cffi.readthedocs.org +.. _JIT: https://morepypy.blogspot.com.au/2016/08/pypy-tooling-upgrade-jitviewer-and.html +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other `dynamic languages`_ to see what RPython +can do for them. + +This release supports: + + * **x86** machines on most common operating systems + (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD) + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s390x** running Linux + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 5.3 released in June 2016) +========================================================= + +* New features: + + * Add `sys.{get,set}dlopenflags` + + * Improve CPython compatibility of 'is' for small and empty strings + + * Support for rgc.FinalizerQueue in the Boehm garbage collector + + * (RPython) support spawnv() if it is called in C `_spawnv` on windows + + * Fill in more slots when creating a PyTypeObject from a W_TypeObject, + like `__hex__`, `__sub__`, `__pow__` + + * Copy CPython's logic more closely for `isinstance()` and + `issubclass()` as well as `type.__instancecheck__()` and + `type.__subclasscheck__()` + + * Expose the name of CDLL objects + + * Rewrite the win32 dependencies of `subprocess` to use cffi + instead of ctypes + + * Improve the `JIT logging`_ facitilities + + * (RPython) make int * string work + + * Allocate all RPython strings with one extra byte, normally + unused. This now allows `ffi.from_buffer(string)` in CFFI with + no copy + + * Adds a new commandline option `-X track-resources` that will + produce a `ResourceWarning` when the GC closes a file or socket. + The traceback for the place where the file or socket was allocated + is given as well, which aids finding places where `close()` is + missing + + * Add missing `PyObject_Realloc`, `PySequence_GetSlice` + + * `type.__dict__` now returns a `dict_proxy` object, like on CPython. + Previously it returned what looked like a regular dict object (but + it was already read-only) + + * (RPython) add `rposix.{get,set}_inheritable()`, needed by Python 3.5 + + * (RPython) add `rposix_scandir` portably, needed for Python 3.5 + + * Increased but incomplete support for memoryview attributes (format, + itemsize, ...) which also adds support for `PyMemoryView_FromObject` + +* Bug Fixes + + * Reject `mkdir()` in read-only sandbox filesystems + + * Add include guards to pymem.h to enable c++ compilation + + * Fix build breakage on OpenBSD and FreeBSD + + * Support OpenBSD, Dragon Fly BSD in VMProf + + * Fix for `bytearray('').replace('a', 'ab')` for empty strings + + * Sync internal state before calling `PyFile_AsFile()` + + * Allow writing to a char* from `PyString_AsString()` until it is + forced, also refactor `PyStringObject` to look like CPython's + and allow subclassing `PyString_Type` and `PyUnicode_Type` + + * Rpython rffi's socket(2) wrapper did not preserve errno + + * Refactor `PyTupleObject` to look like CPython's and allow + subclassing `PyTuple_Type` + + * Allow c-level assignment to a function pointer in a C-API + user-defined type after calling PyTypeReady by retrieving + a pointer to the function via offsets + rather than storing the function pointer itself + + * Use `madvise(MADV_FREE)`, or if that doesn't exist + `MADV_DONTNEED` on freed arenas to release memory back to the + OS for resource monitoring + + * Fix overflow detection in conversion of float to 64-bit integer + in timeout argument to various thread/threading primitives + + * Fix win32 outputting `\r\r\n` in some cases + + * Make `hash(-1)` return -2, as CPython does, and fix all the + ancilary places this matters + + * Fix `PyNumber_Check()` to behave more like CPython + + * (VMProf) Try hard to not miss any Python-level frame in the + captured stacks, even if there is metainterp or blackhole interp + involved. Also fix the stacklet (greenlet) support + + * Fix a critical JIT bug where `raw_malloc` -equivalent functions + lost the additional flags + + * Fix the mapdict cache for subclasses of builtin types that + provide a dict + + * Issues reported with our previous release were resolved_ after + reports from users on our issue tracker at + https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy + +* Performance improvements: + + * Add a before_call()-like equivalent before a few operations like + `malloc_nursery`, to move values from registers into other registers + instead of to the stack. + + * More tightly pack the stack when calling with `release gil` + + * Support `int_floordiv()`, `int_mod()` in the JIT more efficiently + and add `rarithmetic.int_c_div()`, `rarithmetic.int_c_mod()` as + explicit interfaces. Clarify that `int_floordiv()` does python-style + rounding, unlike `llop.int_floordiv()`. + + * Use `ll_assert` (more often) in incminimark + + * (Testing) Simplify handling of interp-level tests and make it + more forward-compatible. Don't use interp-level RPython + machinery to test building app-level extensions in cpyext + + * Constant-fold `ffi.offsetof("structname", "fieldname")` in cffi + backend + + * Avoid a case in the JIT, where successive guard failures in + the same Python function end up as successive levels of + RPython functions, eventually exhausting the stack, while at + app-level the traceback is very short + + * Check for NULL returns from calls to the raw-malloc and raise, + rather than a guard + + * Improve `socket.recvfrom()` so that it copies less if possible + + * When generating C code, inline `goto` to blocks with only one + predecessor, generating less lines of code + + * When running the final backend-optimization phase before emitting + C code, constant-fold calls to we_are_jitted to return False. This + makes the generated C code a few percent smaller + + * Refactor the `uid_t/gid_t` handling in `rlib.rposix` and in + `interp_posix.py`, based on the clean-up of CPython 2.7.x + +.. _`JIT logging`: https://morepypy.blogspot.com/2016/08/pypy-tooling-upgrade-jitviewer-and.html +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.4.0.html + +Please update, and continue to help us make PyPy better. + +Cheers diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,152 +1,9 @@ ========================== -What's new in PyPy2.7 5.3+ +What's new in PyPy2.7 5.4+ ========================== -.. this is a revision shortly after release-pypy2.7-v5.3 -.. startrev: 873218a739f1 +.. this is a revision shortly after release-pypy2.7-v5.4 +.. startrev: 522736f816dc -.. 418b05f95db5 -Improve CPython compatibility for ``is``. Now code like ``if x is ():`` -works the same way as it does on CPython. See http://pypy.readthedocs.io/en/latest/cpython_differences.html#object-identity-of-primitive-values-is-and-id . - -.. pull request #455 -Add sys.{get,set}dlopenflags, for cpyext extensions. - -.. branch: fix-gen-dfa - -Resolves an issue with the generator script to build the dfa for Python syntax. - -.. branch: z196-support - -Fixes a critical issue in the register allocator and extends support on s390x. -PyPy runs and translates on the s390x revisions z10 (released February 2008, experimental) -and z196 (released August 2010) in addition to zEC12 and z13. -To target e.g. z196 on a zEC12 machine supply CFLAGS="-march=z196" to your shell environment. - -.. branch: s390x-5.3-catchup - -Implement the backend related changes for s390x. - -.. branch: incminimark-ll_assert -.. branch: vmprof-openbsd - -.. branch: testing-cleanup - -Simplify handling of interp-level tests and make it more forward- -compatible. - -.. branch: pyfile-tell -Sync w_file with the c-level FILE* before returning FILE* in PyFile_AsFile - -.. branch: rw-PyString_AS_STRING -Allow rw access to the char* returned from PyString_AS_STRING, also refactor -PyStringObject to look like cpython's and allow subclassing PyString_Type and -PyUnicode_Type - -.. branch: save_socket_errno - -Bug fix: if ``socket.socket()`` failed, the ``socket.error`` did not show -the errno of the failing system call, but instead some random previous -errno. - -.. branch: PyTuple_Type-subclass - -Refactor PyTupleObject to look like cpython's and allow subclassing -PyTuple_Type - -.. branch: call-via-pyobj - -Use offsets from PyTypeObject to find actual c function to call rather than -fixed functions, allows function override after PyType_Ready is called - -.. branch: issue2335 - -Avoid exhausting the stack in the JIT due to successive guard -failures in the same Python function ending up as successive levels of -RPython functions, while at app-level the traceback is very short - -.. branch: use-madv-free - -Try harder to memory to the OS. See e.g. issue #2336. Note that it does -not show up as a reduction of the VIRT column in ``top``, and the RES -column might also not show the reduction, particularly on Linux >= 4.5 or -on OS/X: it uses MADV_FREE, which only marks the pages as returnable to -the OS if the memory is low. - -.. branch: cpyext-slotdefs2 - -Fill in more slots when creating a PyTypeObject from a W_TypeObject -More slots are still TBD, like tp_print and richcmp - -.. branch: json-surrogates - -Align json module decode with the cpython's impl, fixes issue 2345 - -.. branch: issue2343 - -Copy CPython's logic more closely for handling of ``__instancecheck__()`` -and ``__subclasscheck__()``. Fixes issue 2343. - -.. branch: msvcrt-cffi - -Rewrite the Win32 dependencies of 'subprocess' to use cffi instead -of ctypes. This avoids importing ctypes in many small programs and -scripts, which in turn avoids enabling threads (because ctypes -creates callbacks at import time, and callbacks need threads). - -.. branch: new-jit-log - -The new logging facility that integrates with and adds features to vmprof.com. - -.. branch: jitlog-32bit - -Resolve issues to use the new logging facility on a 32bit system - -.. branch: ep2016sprint - -Trying harder to make hash(-1) return -2, like it does on CPython - -.. branch: jitlog-exact-source-lines - -Log exact line positions in debug merge points. - -.. branch: null_byte_after_str - -Allocate all RPython strings with one extra byte, normally unused. -It is used to hold a final zero in case we need some ``char *`` -representation of the string, together with checks like ``not -can_move()`` or object pinning. Main new thing that this allows: -``ffi.from_buffer(string)`` in CFFI. Additionally, and most -importantly, CFFI calls that take directly a string as argument don't -copy the string any more---this is like CFFI on CPython. - -.. branch: resource_warning - -Add a new command line option -X track-resources which will produce -ResourceWarnings when the GC closes unclosed files and sockets. - -.. branch: cpyext-realloc - -Implement PyObject_Realloc - -.. branch: inline-blocks - -Improve a little bit the readability of the generated C code - -.. branch: improve-vmprof-testing - -Improved vmprof support: now tries hard to not miss any Python-level -frame in the captured stacks, even if there is the metainterp or -blackhole interp involved. Also fix the stacklet (greenlet) support. - -.. branch: py2-mappingproxy - -``type.__dict__`` now returns a ``dict_proxy`` object, like on CPython. -Previously it returned what looked like a regular dict object (but it -was already read-only). - - -.. branch: const-fold-we-are-jitted - -Reduce the size of the generated C code by constant-folding ``we_are_jitted`` -in non-jitcode. +.. branch: rpython-resync +Backport rpython changes made directly on the py3k and py3.5 branches. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-pypy2-5.4.0.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-pypy2-5.4.0.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-pypy2-5.4.0.rst @@ -1,6 +1,6 @@ -========================== -What's new in PyPy2.7 5.3+ -========================== +========================= +What's new in PyPy2.7 5.4 +========================= .. this is a revision shortly after release-pypy2.7-v5.3 .. startrev: 873218a739f1 @@ -150,3 +150,16 @@ Reduce the size of the generated C code by constant-folding ``we_are_jitted`` in non-jitcode. + +.. branch: memoryview-attributes + +Support for memoryview attributes (format, itemsize, ...). +Extends the cpyext emulation layer. + +.. branch: redirect-assembler-jitlog + +Log more information to properly rebuild the redirected traces in jitviewer. + +.. branch: cpyext-subclass + +Copy Py_TPFLAGS_CHECKTYPES, Py_TPFLAGS_HAVE_INPLACEOPS when inheriting diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -208,7 +208,8 @@ def buffer_w(self, space, flags): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) + w_result = space.get_and_call_function(w_impl, self, + space.newint(flags)) if space.isinstance_w(w_result, space.w_buffer): return w_result.buffer_w(space, flags) raise BufferInterfaceNotFound @@ -216,7 +217,8 @@ def readbuf_w(self, space): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) + w_result = space.get_and_call_function(w_impl, self, + space.newint(space.BUF_FULL_RO)) if space.isinstance_w(w_result, space.w_buffer): return w_result.readbuf_w(space) raise BufferInterfaceNotFound @@ -224,7 +226,8 @@ def writebuf_w(self, space): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) + w_result = space.get_and_call_function(w_impl, self, + space.newint(space.BUF_FULL)) if space.isinstance_w(w_result, space.w_buffer): return w_result.writebuf_w(space) raise BufferInterfaceNotFound @@ -232,7 +235,8 @@ def charbuf_w(self, space): w_impl = space.lookup(self, '__buffer__') if w_impl is not None: - w_result = space.get_and_call_function(w_impl, self) + w_result = space.get_and_call_function(w_impl, self, + space.newint(space.BUF_FULL_RO)) if space.isinstance_w(w_result, space.w_buffer): return w_result.charbuf_w(space) raise BufferInterfaceNotFound diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -23,6 +23,14 @@ self.w_objtype = w_type self.w_self = w_obj_or_type + def descr_repr(self, space): + if self.w_objtype is not None: + objtype_name = "<%s object>" % self.w_objtype.getname(space) + else: + objtype_name = 'NULL' + return space.wrap(", %s>" % ( + self.w_starttype.getname(space), objtype_name)) + def get(self, space, w_obj, w_type=None): if self.w_self is None or space.is_w(w_obj, space.w_None): return self @@ -84,7 +92,10 @@ 'super', __new__ = generic_new_descr(W_Super), __init__ = interp2app(W_Super.descr_init), + __repr__ = interp2app(W_Super.descr_repr), __thisclass__ = interp_attrproperty_w("w_starttype", W_Super), + __self__ = interp_attrproperty_w("w_self", W_Super), + __self_class__ = interp_attrproperty_w("w_objtype", W_Super), __getattribute__ = interp2app(W_Super.getattribute), __get__ = interp2app(W_Super.get), __doc__ = """\ diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -38,6 +38,8 @@ class W_ClassObject(W_Root): + _immutable_fields_ = ['bases_w?[*]', 'w_dict?'] + def __init__(self, space, w_name, bases, w_dict): self.name = space.str_w(w_name) make_sure_not_resized(bases) @@ -75,6 +77,7 @@ "__bases__ items must be classes") self.bases_w = bases_w + @jit.unroll_safe def is_subclass_of(self, other): assert isinstance(other, W_ClassObject) if self is other: @@ -313,7 +316,7 @@ # This method ignores the instance dict and the __getattr__. # Returns None if not found. assert isinstance(name, str) - w_value = self.w_class.lookup(space, name) + w_value = jit.promote(self.w_class).lookup(space, name) if w_value is None: return None w_descr_get = space.lookup(w_value, '__get__') diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -250,6 +250,24 @@ assert super(B, B()).__thisclass__ is B assert super(A, B()).__thisclass__ is A + def test_super_self_selfclass(self): + class A(object): + pass + class B(A): + pass + b = B() + assert super(A, b).__self__ is b + assert super(A).__self__ is None + assert super(A, b).__self_class__ is B + assert super(A).__self_class__ is None + + def test_super_repr(self): + class A(object): + def __repr__(self): + return super(A, self).__repr__() + '!' + assert repr(A()).endswith('>!') + assert repr(super(A, A())) == ", >" + def test_property_docstring(self): assert property.__doc__.startswith('property') diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.8.0" +VERSION = "1.8.1" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.8.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.8.1", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/_sre/__init__.py b/pypy/module/_sre/__init__.py --- a/pypy/module/_sre/__init__.py +++ b/pypy/module/_sre/__init__.py @@ -1,4 +1,4 @@ -from pypy.interpreter.mixedmodule import MixedModule +from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): @@ -7,7 +7,7 @@ interpleveldefs = { 'CODESIZE': 'space.wrap(interp_sre.CODESIZE)', - 'MAGIC': 'space.wrap(interp_sre.MAGIC)', + 'MAGIC': 'space.newint(20031017)', 'MAXREPEAT': 'space.wrap(interp_sre.MAXREPEAT)', 'compile': 'interp_sre.W_SRE_Pattern', 'getlower': 'interp_sre.w_getlower', diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -14,7 +14,7 @@ # Constants and exposed functions from rpython.rlib.rsre import rsre_core -from rpython.rlib.rsre.rsre_char import MAGIC, CODESIZE, MAXREPEAT, getlower, set_unicode_db +from rpython.rlib.rsre.rsre_char import CODESIZE, MAXREPEAT, getlower, set_unicode_db @unwrap_spec(char_ord=int, flags=int) diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -358,9 +358,15 @@ elif typ == rwinreg.REG_SZ or typ == rwinreg.REG_EXPAND_SZ: if not buflen: - return space.wrap("") - s = rffi.charp2strn(rffi.cast(rffi.CCHARP, buf), buflen) - return space.wrap(s) + s = "" + else: + # may or may not have a trailing NULL in the buffer. + buf = rffi.cast(rffi.CCHARP, buf) + if buf[buflen - 1] == '\x00': + buflen -= 1 + s = rffi.charp2strn(buf, buflen) + w_s = space.wrap(s) + return space.call_method(w_s, 'decode', space.wrap('mbcs')) elif typ == rwinreg.REG_MULTI_SZ: if not buflen: @@ -460,7 +466,7 @@ return space.newtuple([ convert_from_regdata(space, databuf, length, retType[0]), - space.wrap(retType[0]), + space.wrap(intmask(retType[0])), ]) @unwrap_spec(subkey=str) @@ -612,7 +618,7 @@ space.wrap(rffi.charp2str(valuebuf)), convert_from_regdata(space, databuf, length, retType[0]), - space.wrap(retType[0]), + space.wrap(intmask(retType[0])), ]) @unwrap_spec(index=int) diff --git a/pypy/module/_winreg/test/test_winreg.py b/pypy/module/_winreg/test/test_winreg.py --- a/pypy/module/_winreg/test/test_winreg.py +++ b/pypy/module/_winreg/test/test_winreg.py @@ -151,6 +151,7 @@ def test_readValues(self): from _winreg import OpenKey, EnumValue, QueryValueEx, EnumKey + from _winreg import REG_SZ, REG_EXPAND_SZ key = OpenKey(self.root_key, self.test_key_name) sub_key = OpenKey(key, "sub_key") index = 0 @@ -164,7 +165,10 @@ assert index == len(self.test_data) for name, value, type in self.test_data: - assert QueryValueEx(sub_key, name) == (value, type) + result = QueryValueEx(sub_key, name) + assert result == (value, type) + if type == REG_SZ or type == REG_EXPAND_SZ: + assert isinstance(result[0], unicode) # not string assert EnumKey(key, 0) == "sub_key" raises(EnvironmentError, EnumKey, key, 1) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -120,8 +120,8 @@ constant_names = """ Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE -METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O -Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS +METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HAVE_INPLACEOPS +Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_TPFLAGS_HAVE_NEWBUFFER Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES """.split() for name in constant_names: @@ -649,6 +649,7 @@ #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), ('internal', rffi.VOIDP) )) +Py_bufferP = lltype.Ptr(Py_buffer) @specialize.memo() def is_PyObject(TYPE): @@ -976,8 +977,10 @@ py_type_ready(space, get_capsule_type()) INIT_FUNCTIONS.append(init_types) from pypy.module.posix.interp_posix import add_fork_hook - reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, - compilation_info=eci) + _reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], + lltype.Void, compilation_info=eci) + def reinit_tls(space): + _reinit_tls() add_fork_hook('child', reinit_tls) def init_function(func): diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,13 +1,17 @@ from pypy.interpreter.error import oefmt from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, CANNOT_FAIL, Py_buffer) + cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER) from pypy.module.cpyext.pyobject import PyObject @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyObject_CheckBuffer(space, w_obj): +def PyObject_CheckBuffer(space, pyobj): """Return 1 if obj supports the buffer interface otherwise 0.""" - return 0 # the bf_getbuffer field is never filled by cpyext + as_buffer = pyobj.c_ob_type.c_tp_as_buffer + flags = pyobj.c_ob_type.c_tp_flags + if (flags & Py_TPFLAGS_HAVE_NEWBUFFER and as_buffer.c_bf_getbuffer): + return 1 + return 0 @cpython_api([PyObject, lltype.Ptr(Py_buffer), rffi.INT_real], rffi.INT_real, error=-1) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -123,5 +123,4 @@ pathname = code.co_filename w_mod = importing.add_module(space, w_name) space.setattr(w_mod, space.wrap('__file__'), space.wrap(pathname)) - importing.exec_code_module(space, w_mod, code) - return w_mod + return importing.exec_code_module(space, w_mod, code, w_name) diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "5.3.2-alpha0" -#define PYPY_VERSION_NUM 0x05030200 +#define PYPY_VERSION "5.5.0-alpha0" +#define PYPY_VERSION_NUM 0x05050000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -12,7 +12,7 @@ @cpython_api([PyObject], PyObject) def PyMemoryView_GET_BASE(space, w_obj): # return the obj field of the Py_buffer created by PyMemoryView_GET_BUFFER - raise NotImplementedError + raise NotImplementedError('PyMemoryView_GET_BUFFER') @cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL) def PyMemoryView_GET_BUFFER(space, w_obj): diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -3,15 +3,16 @@ import re from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import widen from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, - mangle_name, pypy_decl) + mangle_name, pypy_decl, Py_buffer, Py_bufferP) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, ternaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, - readbufferproc, ssizessizeobjargproc) + readbufferproc, getbufferproc, ssizessizeobjargproc) from pypy.module.cpyext.pyobject import from_ref, make_ref, Py_DecRef from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State @@ -22,6 +23,9 @@ from rpython.rlib.objectmodel import specialize from rpython.tool.sourcetools import func_renamer from rpython.rtyper.annlowlevel import llhelper +from pypy.module.sys.version import CPYTHON_VERSION + +PY3 = CPYTHON_VERSION[0] == 3 # XXX: Also defined in object.h Py_LT = 0 @@ -298,11 +302,23 @@ # Similar to Py_buffer _immutable_ = True - def __init__(self, ptr, size, w_obj): + def __init__(self, ptr, size, w_obj, format='B', shape=None, + strides=None, ndim=1, itemsize=1, readonly=True): self.ptr = ptr self.size = size self.w_obj = w_obj # kept alive - self.readonly = True + self.format = format + if not shape: + self.shape = [size] + else: + self.shape = shape + if not strides: + self.strides = [1] + else: + self.strides = strides + self.ndim = ndim + self.itemsize = itemsize + self.readonly = readonly def getlength(self): return self.size @@ -313,6 +329,15 @@ def get_raw_address(self): return rffi.cast(rffi.CCHARP, self.ptr) + def getformat(self): + return self.format + + def getshape(self): + return self.shape + + def getitemsize(self): + return self.itemsize + def wrap_getreadbuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: @@ -322,6 +347,30 @@ space.fromcache(State).check_and_raise_exception(always=True) return space.newbuffer(CPyBuffer(ptr[0], size, w_self)) +def wrap_getbuffer(space, w_self, w_args, func): + func_target = rffi.cast(getbufferproc, func) + with lltype.scoped_alloc(Py_buffer) as pybuf: + _flags = 0 + if space.len_w(w_args) > 0: + _flags = space.int_w(space.listview(w_args)[0]) + flags = rffi.cast(rffi.INT_real,_flags) + size = generic_cpy_call(space, func_target, w_self, pybuf, flags) + if widen(size) < 0: + space.fromcache(State).check_and_raise_exception(always=True) + ptr = pybuf.c_buf + size = pybuf.c_len + ndim = widen(pybuf.c_ndim) + shape = [pybuf.c_shape[i] for i in range(ndim)] + strides = [pybuf.c_strides[i] for i in range(ndim)] + if pybuf.c_format: + format = rffi.charp2str(pybuf.c_format) + else: + format = 'B' + return space.newbuffer(CPyBuffer(ptr, size, w_self, format=format, + ndim=ndim, shape=shape, strides=strides, + itemsize=pybuf.c_itemsize, + readonly=widen(pybuf.c_readonly))) + def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): func_target = rffi.cast(richcmpfunc, func) @@ -486,7 +535,6 @@ def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) api_func = slot_tp_getattro.api_func - elif name == 'tp_call': call_fn = w_type.getdictvalue(space, '__call__') if call_fn is None: @@ -542,6 +590,21 @@ w_stararg=w_args, w_starstararg=w_kwds) return space.call_args(space.get(new_fn, w_self), args) api_func = slot_tp_new.api_func + elif name == 'tp_as_buffer.c_bf_getbuffer': + buff_fn = w_type.getdictvalue(space, '__buffer__') + if buff_fn is None: + return + @cpython_api([PyObject, Py_bufferP, rffi.INT_real], + rffi.INT_real, header=None, error=-1) + @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) + def buff_w(space, w_self, pybuf, flags): + # XXX this is wrong, needs a test + raise oefmt(space.w_NotImplemented, + "calling bf_getbuffer on a builtin type not supported yet") + #args = Arguments(space, [w_self], + # w_stararg=w_args, w_starstararg=w_kwds) + #return space.call_args(space.get(buff_fn, w_self), args) + api_func = buff_w.api_func else: # missing: tp_as_number.nb_nonzero, tp_as_number.nb_coerce # tp_as_sequence.c_sq_contains, tp_as_sequence.c_sq_length @@ -850,11 +913,19 @@ slotdefs = eval(slotdefs_str) # PyPy addition slotdefs += ( - TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), + # XXX that might not be what we want! + TPSLOT("__buffer__", "tp_as_buffer.c_bf_getbuffer", None, "wrap_getbuffer", ""), ) +if not PY3: + slotdefs += ( + TPSLOT("__buffer__", "tp_as_buffer.c_bf_getreadbuffer", None, "wrap_getreadbuffer", ""), + ) + + # partial sort to solve some slot conflicts: # Number slots before Mapping slots before Sequence slots. +# also prefer the new buffer interface # These are the only conflicts between __name__ methods def slotdef_sort_key(slotdef): if slotdef.slot_name.startswith('tp_as_number'): @@ -863,6 +934,10 @@ return 2 if slotdef.slot_name.startswith('tp_as_sequence'): return 3 + if slotdef.slot_name == 'tp_as_buffer.c_bf_getbuffer': + return 100 + if slotdef.slot_name == 'tp_as_buffer.c_bf_getreadbuffer': + return 101 return 0 slotdefs = sorted(slotdefs, key=slotdef_sort_key) diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/buffer_test.c @@ -0,0 +1,248 @@ +#ifdef _MSC_VER +#define _CRT_SECURE_NO_WARNINGS 1 +#endif +#include +#include +#include + +/* + * Adapted from https://jakevdp.github.io/blog/2014/05/05/introduction-to-the-python-buffer-protocol, + * which is copyright Jake Vanderplas and released under the BSD license + */ + +/* Structure defines a 1-dimensional strided array */ +typedef struct{ + int* arr; + Py_ssize_t length; +} MyArray; + +/* initialize the array with integers 0...length */ +void initialize_MyArray(MyArray* a, long length){ + int i; + a->length = length; + a->arr = (int*)malloc(length * sizeof(int)); + for(i=0; iarr[i] = i; + } +} + +/* free the memory when finished */ +void deallocate_MyArray(MyArray* a){ + free(a->arr); + a->arr = NULL; +} + +/* tools to print the array */ +char* stringify(MyArray* a, int nmax){ + char* output = (char*) malloc(nmax * 20); + int k, pos = sprintf(&output[0], "["); + + for (k=0; k < a->length && k < nmax; k++){ + pos += sprintf(&output[pos], " %d", a->arr[k]); + } + if(a->length > nmax) + pos += sprintf(&output[pos], "..."); + sprintf(&output[pos], " ]"); + return output; +} + +void print_MyArray(MyArray* a, int nmax){ + char* s = stringify(a, nmax); + printf("%s", s); + free(s); +} + +/* This is where we define the PyMyArray object structure */ +typedef struct { + PyObject_HEAD + /* Type-specific fields go below. */ + MyArray arr; +} PyMyArray; + + +/* This is the __init__ function, implemented in C */ +static int +PyMyArray_init(PyMyArray *self, PyObject *args, PyObject *kwds) +{ + int length = 0; + static char *kwlist[] = {"length", NULL}; + // init may have already been called + if (self->arr.arr != NULL) { + deallocate_MyArray(&self->arr); + } + + if (! PyArg_ParseTupleAndKeywords(args, kwds, "|i", kwlist, &length)) + return -1; + + if (length < 0) + length = 0; + + initialize_MyArray(&self->arr, length); + + return 0; +} + + +/* this function is called when the object is deallocated */ +static void +PyMyArray_dealloc(PyMyArray* self) +{ + deallocate_MyArray(&self->arr); + Py_TYPE(self)->tp_free((PyObject*)self); +} + + +/* This function returns the string representation of our object */ +static PyObject * +PyMyArray_str(PyMyArray * self) +{ + char* s = stringify(&self->arr, 10); + PyObject* ret = PyUnicode_FromString(s); + free(s); + return ret; +} + +/* Here is the buffer interface function */ +static int +PyMyArray_getbuffer(PyObject *obj, Py_buffer *view, int flags) +{ + PyMyArray* self = (PyMyArray*)obj; + fprintf(stdout, "in PyMyArray_getbuffer\n"); + if (view == NULL) { + fprintf(stdout, "view is NULL\n"); + PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer"); + return -1; + } + if (flags == 0) { + fprintf(stdout, "flags is 0\n"); + PyErr_SetString(PyExc_ValueError, "flags == 0 in getbuffer"); + return -1; + } + + view->obj = (PyObject*)self; + view->buf = (void*)self->arr.arr; + view->len = self->arr.length * sizeof(int); + view->readonly = 0; + view->itemsize = sizeof(int); + view->format = "i"; // integer + view->ndim = 1; + view->shape = &self->arr.length; // length-1 sequence of dimensions + view->strides = &view->itemsize; // for the simple case we can do this + view->suboffsets = NULL; + view->internal = NULL; + + Py_INCREF(self); // need to increase the reference count + return 0; +} + +static PyBufferProcs PyMyArray_as_buffer = { +#if PY_MAJOR_VERSION < 3 + (readbufferproc)0, + (writebufferproc)0, + (segcountproc)0, + (charbufferproc)0, +#endif + (getbufferproc)PyMyArray_getbuffer, + (releasebufferproc)0, // we do not require any special release function +}; + + +/* Here is the type structure: we put the above functions in the appropriate place + in order to actually define the Python object type */ +static PyTypeObject PyMyArrayType = { + PyVarObject_HEAD_INIT(NULL, 0) + "pymyarray.PyMyArray", /* tp_name */ + sizeof(PyMyArray), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor)PyMyArray_dealloc,/* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_reserved */ + (reprfunc)PyMyArray_str, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + (reprfunc)PyMyArray_str, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + &PyMyArray_as_buffer, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_NEWBUFFER, /* tp_flags */ + "PyMyArray object", /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)PyMyArray_init, /* tp_init */ +}; + +static PyMethodDef buffer_functions[] = { + {NULL, NULL} /* Sentinel */ +}; + +#if PY_MAJOR_VERSION >= 3 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "buffer_test", + "Module Doc", + -1, + buffer_functions, + NULL, + NULL, + NULL, + NULL, +}; +#define INITERROR return NULL + +/* Initialize this module. */ +#ifdef __GNUC__ +extern __attribute__((visibility("default"))) +#else +extern __declspec(dllexport) +#endif + +PyMODINIT_FUNC +PyInit_buffer_test(void) + +#else + +#define INITERROR return + +/* Initialize this module. */ +#ifdef __GNUC__ +extern __attribute__((visibility("default"))) +#else +#endif + +PyMODINIT_FUNC +initbuffer_test(void) +#endif +{ +#if PY_MAJOR_VERSION >= 3 + PyObject *m= PyModule_Create(&moduledef); +#else + PyObject *m= Py_InitModule("buffer_test", buffer_functions); +#endif + if (m == NULL) + INITERROR; + PyMyArrayType.tp_new = PyType_GenericNew; + if (PyType_Ready(&PyMyArrayType) < 0) + INITERROR; + Py_INCREF(&PyMyArrayType); + PyModule_AddObject(m, "PyMyArray", (PyObject *)&PyMyArrayType); +#if PY_MAJOR_VERSION >=3 + return m; +#endif +} diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -87,4 +87,13 @@ module.switch_multiply() res = [1, 2, 3] * arr assert res == [2, 4, 6] + + def test_subclass(self): + module = self.import_module(name='array') + class Sub(module.array): + pass + + arr = Sub('i', [2]) + res = [1, 2, 3] * arr + assert res == [1, 2, 3, 1, 2, 3] diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -92,10 +92,20 @@ link_extra=link_extra, libraries=libraries) from pypy.module.imp.importing import get_so_extension - pydname = soname.new(purebasename=modname, ext=get_so_extension(space)) + ext = get_so_extension(space) + pydname = soname.new(purebasename=modname, ext=ext) soname.rename(pydname) return str(pydname) +def get_so_suffix(): + from imp import get_suffixes, C_EXTENSION + for suffix, mode, typ in get_suffixes(): + if typ == C_EXTENSION: + return suffix + else: + raise RuntimeError("This interpreter does not define a filename " + "suffix for C extensions!") + def compile_extension_module_applevel(space, modname, include_dirs=[], source_files=None, source_strings=None): """ @@ -126,13 +136,9 @@ source_strings=source_strings, compile_extra=compile_extra, link_extra=link_extra) - from imp import get_suffixes, C_EXTENSION - pydname = soname - for suffix, mode, typ in get_suffixes(): - if typ == C_EXTENSION: - pydname = soname.new(purebasename=modname, ext=suffix) - soname.rename(pydname) - break + ext = get_so_suffix() + pydname = soname.new(purebasename=modname, ext=ext) + soname.rename(pydname) return str(pydname) def freeze_refcnts(self): @@ -145,6 +151,24 @@ #state.print_refcounts() self.frozen_ll2callocations = set(ll2ctypes.ALLOCATED.values()) +class FakeSpace(object): + """Like TinyObjSpace, but different""" + def __init__(self, config): + from distutils.sysconfig import get_python_inc + self.config = config + self.include_dir = get_python_inc() + + def passthrough(self, arg): + return arg + listview = passthrough + str_w = passthrough + + def unwrap(self, args): + try: + return args.str_w(None) + except: + return args + class LeakCheckingTest(object): """Base class for all cpyext tests.""" spaceconfig = dict(usemodules=['cpyext', 'thread', '_rawffi', 'array', @@ -433,21 +457,8 @@ self.imported_module_names = [] if self.runappdirect: + fake = FakeSpace(self.space.config) def interp2app(func): - from distutils.sysconfig import get_python_inc - class FakeSpace(object): - def passthrough(self, arg): - return arg - listview = passthrough - str_w = passthrough - def unwrap(self, args): - try: - return args.str_w(None) - except: - return args - fake = FakeSpace() - fake.include_dir = get_python_inc() - fake.config = self.space.config def run(*args, **kwargs): for k in kwargs.keys(): if k not in func.unwrap_spec and not k.startswith('w_'): diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -1,17 +1,26 @@ -import pytest from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase + class TestMemoryViewObject(BaseApiTest): def test_fromobject(self, space, api): - if space.is_true(space.lt(space.sys.get('version_info'), - space.wrap((2, 7)))): - py.test.skip("unsupported before Python 2.7") - w_hello = space.newbytes("hello") + assert api.PyObject_CheckBuffer(w_hello) w_view = api.PyMemoryView_FromObject(w_hello) + w_char = space.call_method(w_view, '__getitem__', space.wrap(0)) + assert space.eq_w(w_char, space.wrap('h')) w_bytes = space.call_method(w_view, "tobytes") assert space.unwrap(w_bytes) == "hello" - @pytest.mark.skipif(True, reason='write a test for this') - def test_get_base_and_get_buffer(self, space, api): - assert False # XXX test PyMemoryView_GET_BASE, PyMemoryView_GET_BUFFER + +class AppTestBufferProtocol(AppTestCpythonExtensionBase): + def test_buffer_protocol(self): + import struct + module = self.import_module(name='buffer_test') + arr = module.PyMyArray(10) + y = memoryview(arr) + assert y.format == 'i' + assert y.shape == (10,) + s = y[3] + assert len(s) == struct.calcsize('i') + assert s == struct.pack('i', 3) diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -32,9 +32,11 @@ assert module.py_minor_version == sys.version_info.minor assert module.py_micro_version == sys.version_info.micro - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') + #@pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') def test_pypy_versions(self): import sys + if '__pypy__' not in sys.builtin_module_names: + py.test.skip("pypy only test") init = """ if (Py_IsInitialized()) { PyObject *m = Py_InitModule("foo", NULL); diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -17,7 +17,9 @@ generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, Py_TPFLAGS_HAVE_GETCHARBUFFER, build_type_checkers, StaticObjectBuilder, - PyObjectFields, Py_TPFLAGS_BASETYPE, PyTypeObject, PyTypeObjectPtr) + PyObjectFields, Py_TPFLAGS_BASETYPE, PyTypeObject, PyTypeObjectPtr, + Py_TPFLAGS_HAVE_NEWBUFFER, Py_TPFLAGS_CHECKTYPES, + Py_TPFLAGS_HAVE_INPLACEOPS) from pypy.module.cpyext.methodobject import (W_PyCClassMethodObject, W_PyCWrapperObject, PyCFunction_NewEx, PyCFunction_typedef, PyMethodDef, W_PyCMethodObject, W_PyCFunctionObject) @@ -385,6 +387,8 @@ pto.c_tp_basicsize = base_pto.c_tp_basicsize if pto.c_tp_itemsize < base_pto.c_tp_itemsize: pto.c_tp_itemsize = base_pto.c_tp_itemsize + pto.c_tp_flags |= base_pto.c_tp_flags & Py_TPFLAGS_CHECKTYPES + pto.c_tp_flags |= base_pto.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS flags = rffi.cast(lltype.Signed, pto.c_tp_flags) base_object_pyo = make_ref(space, space.w_object) base_object_pto = rffi.cast(PyTypeObjectPtr, base_object_pyo) @@ -608,6 +612,7 @@ bf_getwritebuffer.api_func.get_wrapper(space)) pto.c_tp_as_buffer = c_buf pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER + pto.c_tp_flags |= Py_TPFLAGS_HAVE_NEWBUFFER @cpython_api([PyObject], lltype.Void, header=None) def type_dealloc(space, obj): @@ -774,6 +779,8 @@ pto.c_tp_setattro = base.c_tp_setattro if not pto.c_tp_getattro: pto.c_tp_getattro = base.c_tp_getattro + if not pto.c_tp_as_buffer: + pto.c_tp_as_buffer = base.c_tp_as_buffer finally: Py_DecRef(space, base_pyo) @@ -810,8 +817,13 @@ # inheriting tp_as_* slots base = py_type.c_tp_base if base: - if not py_type.c_tp_as_number: py_type.c_tp_as_number = base.c_tp_as_number - if not py_type.c_tp_as_sequence: py_type.c_tp_as_sequence = base.c_tp_as_sequence + if not py_type.c_tp_as_number: + py_type.c_tp_as_number = base.c_tp_as_number + py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_CHECKTYPES + py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS + if not py_type.c_tp_as_sequence: + py_type.c_tp_as_sequence = base.c_tp_as_sequence + py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS if not py_type.c_tp_as_mapping: py_type.c_tp_as_mapping = base.c_tp_as_mapping if not py_type.c_tp_as_buffer: py_type.c_tp_as_buffer = base.c_tp_as_buffer diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -5,6 +5,7 @@ Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.modsupport import PyMethodDef +from pypy.module.cpyext.api import Py_bufferP P, FT, PyO = Ptr, FuncType, PyObject @@ -58,8 +59,7 @@ writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) -## We don't support new buffer interface for now -getbufferproc = rffi.VOIDP +getbufferproc = P(FT([PyO, Py_bufferP, rffi.INT_real], rffi.INT_real)) releasebufferproc = rffi.VOIDP diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -597,6 +597,11 @@ @jit.dont_look_inside def load_module(space, w_modulename, find_info, reuse=False): + """Like load_module() in CPython's import.c, this will normally + make a module object, store it in sys.modules, execute code in it, + and then fetch it again from sys.modules. But this logic is not + used if we're calling a PEP302 loader. + """ if find_info is None: return @@ -625,17 +630,15 @@ try: if find_info.modtype == PY_SOURCE: - load_source_module( + return load_source_module( space, w_modulename, w_mod, find_info.filename, find_info.stream.readall(), find_info.stream.try_to_find_file_descriptor()) - return w_mod elif find_info.modtype == PY_COMPILED: magic = _r_long(find_info.stream) timestamp = _r_long(find_info.stream) - load_compiled_module(space, w_modulename, w_mod, find_info.filename, + return load_compiled_module(space, w_modulename, w_mod, find_info.filename, magic, timestamp, find_info.stream.readall()) From pypy.commits at gmail.com Mon Sep 5 03:29:33 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 05 Sep 2016 00:29:33 -0700 (PDT) Subject: [pypy-commit] pypy ppc-vsx-support: killed a bug in unpacking multiple Message-ID: <57cd1edd.06a81c0a.332c.3865@mx.google.com> Author: Richard Plangger Branch: ppc-vsx-support Changeset: r86877:0e4fa815252e Date: 2016-09-05 09:28 +0200 http://bitbucket.org/pypy/pypy/changeset/0e4fa815252e/ Log: killed a bug in unpacking multiple diff --git a/rpython/jit/backend/ppc/vector_ext.py b/rpython/jit/backend/ppc/vector_ext.py --- a/rpython/jit/backend/ppc/vector_ext.py +++ b/rpython/jit/backend/ppc/vector_ext.py @@ -534,13 +534,13 @@ self.mc.load_imm(r.SCRATCH2, PARAM_SAVE_AREA_OFFSET+16) self.mc.stvx(res, r.SCRATCH2.value, r.SP.value) if count * size == 8: - stidx = 0 if not IS_BIG_ENDIAN: - idx = (16 // size) - 1 - idx - stidx = 0 - off = PARAM_SAVE_AREA_OFFSET + idx * size + endian_off = 8 + off = PARAM_SAVE_AREA_OFFSET + off = off + endian_off - (idx * size) + assert idx * size + 8 <= 16 self.mc.load(r.SCRATCH.value, r.SP.value, off) - self.mc.store(r.SCRATCH.value, r.SP.value, PARAM_SAVE_AREA_OFFSET+16+stidx) + self.mc.store(r.SCRATCH.value, r.SP.value, PARAM_SAVE_AREA_OFFSET+16+endian_off) self.mc.lvx(res, r.SCRATCH2.value, r.SP.value) return diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_vector.py --- a/rpython/jit/metainterp/test/test_vector.py +++ b/rpython/jit/metainterp/test/test_vector.py @@ -826,7 +826,6 @@ (2**31-1 if i%2==0 else 0) def test_unpack_several(self): - # count == 2 values = [1,2,3,4] for i,v in enumerate(values): j = (i // 2) * 2 From pypy.commits at gmail.com Mon Sep 5 07:51:15 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 05 Sep 2016 04:51:15 -0700 (PDT) Subject: [pypy-commit] pypy ppc-vsx-support: not_implemented takes only one argument, remove pxor(result, result) which contained valid data Message-ID: <57cd5c33.87941c0a.801f7.fb89@mx.google.com> Author: Richard Plangger Branch: ppc-vsx-support Changeset: r86878:179c6a65d492 Date: 2016-09-05 13:50 +0200 http://bitbucket.org/pypy/pypy/changeset/179c6a65d492/ Log: not_implemented takes only one argument, remove pxor(result,result) which contained valid data diff --git a/rpython/jit/backend/x86/vector_ext.py b/rpython/jit/backend/x86/vector_ext.py --- a/rpython/jit/backend/x86/vector_ext.py +++ b/rpython/jit/backend/x86/vector_ext.py @@ -322,8 +322,7 @@ assert lhsloc is xmm0 maskloc = X86_64_XMM_SCRATCH_REG self.mc.MOVAPD(maskloc, heap(self.element_ones[get_scale(size)])) - self.mc.PXOR(resloc, resloc) - # note that xmm0 contains true false for each element by the last compare operation + # note that resloc contains true false for each element by the last compare operation self.mc.PBLENDVB_xx(resloc.value, maskloc.value) def genop_vec_float_ne(self, op, arglocs, resloc): @@ -532,7 +531,7 @@ self.mc.UNPCKHPD(resloc, srcloc) # if they are equal nothing is to be done else: - not_implemented("pack/unpack for size %d", size) + not_implemented("pack/unpack for size %d" % size) genop_vec_unpack_f = genop_vec_pack_f From pypy.commits at gmail.com Mon Sep 5 09:53:29 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 05 Sep 2016 06:53:29 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: rewrite _array_from_buffer_3118 without explicit W_MemeoryView (arigato) Message-ID: <57cd78d9.041f1c0a.fbb56.3d45@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86879:52eb4d9855de Date: 2016-09-04 22:26 +0300 http://bitbucket.org/pypy/pypy/changeset/52eb4d9855de/ Log: rewrite _array_from_buffer_3118 without explicit W_MemeoryView (arigato) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -460,6 +460,9 @@ def getdictvalue(self, space, key): return self.items[key] + def descr_memoryview(self, space, buf): + raise oefmt(space.w_TypeError, "error") + class IterDictObject(W_Root): def __init__(self, space, w_dict): self.space = space diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -1,4 +1,5 @@ from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.baseobjspace import BufferInterfaceNotFound from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces @@ -11,7 +12,6 @@ from pypy.module.micronumpy.converters import shape_converter, order_converter import pypy.module.micronumpy.constants as NPY from .casting import scalar2dtype -from pypy.objspace.std.memoryobject import W_MemoryView def build_scalar(space, w_dtype, w_state): @@ -123,16 +123,23 @@ return None def _array_from_buffer_3118(space, w_object, dtype): - buf = w_object.buf - if buf.getformat(): - descr = _descriptor_from_pep3118_format(space, buf.getformat()) + try: + w_buf = space.call_method(space.builtin, "memoryview", w_object) + except OperationError as e: + if e.match(space, space.w_TypeError): + # object does not have buffer interface + return w_object + raise + format = space.getattr(w_buf,space.newbytes('format')) + if format: + descr = _descriptor_from_pep3118_format(space, space.str_w(format)) if not descr: return w_object if dtype and descr: raise oefmt(space.w_NotImplementedError, "creating an array from a memoryview while specifying dtype " "not supported") - if descr.elsize != buf.getitemsize(): + if descr.elsize != space.int_w(space.getattr(w_buf, space.newbytes('itemsize'))): msg = ("Item size computed from the PEP 3118 buffer format " "string does not match the actual item size.") space.warn(space.wrap(msg), space.w_RuntimeWarning) @@ -140,14 +147,17 @@ dtype = descr elif not dtype: dtype = descriptor.get_dtype_cache(space).w_stringdtype - dtype.elsize = buf.getitemsize() - nd = buf.getndim() - shape = buf.getshape() + dtype.elsize = space.int_w(space.getattr(w_buf, space.newbytes('itemsize'))) + nd = space.int_w(space.getattr(w_buf, space.newbytes('ndim'))) + shape = [space.int_w(d) for d in space.listview( + space.getattr(w_buf, space.newbytes('shape')))] strides = [] + buflen = space.len_w(w_buf) * dtype.elsize if shape: - strides = buf.getstrides() + strides = [space.int_w(d) for d in space.listview( + space.getattr(w_buf, space.newbytes('strides')))] if not strides: - d = len(buf) + d = buflen strides = [0] * nd for k in range(nd): if shape[k] > 0: @@ -155,17 +165,23 @@ strides[k] = d else: if nd == 1: - shape = [len(buf) / dtype.elsize, ] + shape = [buflen / dtype.elsize, ] strides = [dtype.elsize, ] elif nd > 1: msg = ("ndim computed from the PEP 3118 buffer format " "is greater than 1, but shape is NULL.") space.warn(space.wrap(msg), space.w_RuntimeWarning) return w_object - storage = buf.get_raw_address() - writable = not buf.readonly - w_ret = W_NDimArray.from_shape_and_storage(space, shape, storage, - storage_bytes=len(buf), dtype=dtype, w_base=w_object, + try: + w_data = rffi.cast(RAW_STORAGE_PTR, space.int_w(space.call_method(w_buf, '_pypy_raw_address'))) + except OperationError as e: + if e.match(space, space.w_ValueError): + return w_object + else: + raise e + writable = not space.bool_w(space.getattr(w_buf, space.newbytes('readonly'))) + w_ret = W_NDimArray.from_shape_and_storage(space, shape, w_data, + storage_bytes=buflen, dtype=dtype, w_base=w_object, writable=writable, strides=strides) if w_ret: return w_ret @@ -187,6 +203,7 @@ def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): + from pypy.module.micronumpy.boxes import W_GenericBox # numpy testing calls array(type(array([]))) and expects a ValueError if space.isinstance_w(w_object, space.w_type): raise oefmt(space.w_ValueError, "cannot create ndarray from type instance") @@ -194,14 +211,17 @@ dtype = descriptor.decode_w_dtype(space, w_dtype) if not isinstance(w_object, W_NDimArray): w_array = try_array_method(space, w_object, w_dtype) - if w_array is not None: + if w_array is None: + if ( not space.isinstance_w(w_object, space.w_str) and + not space.isinstance_w(w_object, space.w_unicode) and + not isinstance(w_object, W_GenericBox)): + # use buffer interface + w_object = _array_from_buffer_3118(space, w_object, dtype) + else: # continue with w_array, but do further operations in place w_object = w_array copy = False dtype = w_object.get_dtype() - elif isinstance(w_object, W_MemoryView): - # use buffer interface - w_object = _array_from_buffer_3118(space, w_object, dtype) if not isinstance(w_object, W_NDimArray): w_array, _copy = try_interface_method(space, w_object, copy) if w_array is not None: diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -97,16 +97,7 @@ print "ERROR: did not implement return type for interpreter" raise TypeError(w_res) - if self.graph is not None: - return - - from pypy.module.micronumpy import ctors - def unimplemented(*args): - "Only for W_MemoryView objects, which are not compiled in" - raise NotImplementedError - prev_3118 = ctors._array_from_buffer_3118 - ctors._array_from_buffer_3118 = unimplemented - try: + if self.graph is None: interp, graph = self.meta_interp(f, [0], listops=True, listcomp=True, @@ -116,8 +107,6 @@ vec=True) self.__class__.interp = interp self.__class__.graph = graph - finally: - ctors._array_from_buffer_3118 = prev_3118 def check_vectorized(self, expected_tried, expected_success): profiler = get_profiler() From pypy.commits at gmail.com Mon Sep 5 09:55:45 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 05 Sep 2016 06:55:45 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: use tobytes if get_raw_address fails, for StringBuffer type buffers Message-ID: <57cd7961.0558c20a.67473.4ed2@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86880:da7d464c1703 Date: 2016-09-05 16:54 +0300 http://bitbucket.org/pypy/pypy/changeset/da7d464c1703/ Log: use tobytes if get_raw_address fails, for StringBuffer type buffers diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -26,13 +26,17 @@ return view try: view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) + view.c_obj = as_pyobj(space, w_obj) + rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) + isstr = False except ValueError: - return view + w_s = w_obj.descr_tobytes(space) + view.c_obj = as_pyobj(space, w_s) + rffi.setintfield(view, 'c_readonly', 1) + isstr = True view.c_len = w_obj.getlength() - view.c_obj = as_pyobj(space, w_obj) incref(space, view.c_obj) view.c_itemsize = w_obj.buf.getitemsize() - rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) ndim = w_obj.buf.getndim() rffi.setintfield(view, 'c_ndim', ndim) view.c_format = rffi.str2charp(w_obj.buf.getformat()) diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -1,6 +1,6 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase - +from rpython.rlib.buffer import StringBuffer class TestMemoryViewObject(BaseApiTest): def test_fromobject(self, space, api): @@ -12,6 +12,12 @@ w_bytes = space.call_method(w_view, "tobytes") assert space.unwrap(w_bytes) == "hello" + def test_frombuffer(self, space, api): + w_buf = space.newbuffer(StringBuffer("hello")) + w_memoryview = api.PyMemoryView_FromObject(w_buf) + w_view = api.PyMemoryView_GET_BUFFER(w_memoryview) + ndim = w_view.c_ndim + assert ndim == 1 class AppTestBufferProtocol(AppTestCpythonExtensionBase): def test_buffer_protocol(self): From pypy.commits at gmail.com Mon Sep 5 10:40:43 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 05 Sep 2016 07:40:43 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: use make_ref which has a keepalive_until_here() (arigato) Message-ID: <57cd83eb.081dc20a.e249d.049e@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86881:8b66ea6779d0 Date: 2016-09-05 17:17 +0300 http://bitbucket.org/pypy/pypy/changeset/8b66ea6779d0/ Log: use make_ref which has a keepalive_until_here() (arigato) diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,6 +1,6 @@ from pypy.module.cpyext.api import (cpython_api, Py_buffer, CANNOT_FAIL, build_type_checkers, Py_ssize_tP) -from pypy.module.cpyext.pyobject import PyObject, as_pyobj, incref +from pypy.module.cpyext.pyobject import PyObject, make_ref, incref from rpython.rtyper.lltypesystem import lltype, rffi from pypy.objspace.std.memoryobject import W_MemoryView @@ -26,16 +26,15 @@ return view try: view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) - view.c_obj = as_pyobj(space, w_obj) + view.c_obj = make_ref(space, w_obj) rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) isstr = False except ValueError: w_s = w_obj.descr_tobytes(space) - view.c_obj = as_pyobj(space, w_s) + view.c_obj = make_ref(space, w_s) rffi.setintfield(view, 'c_readonly', 1) isstr = True view.c_len = w_obj.getlength() - incref(space, view.c_obj) view.c_itemsize = w_obj.buf.getitemsize() ndim = w_obj.buf.getndim() rffi.setintfield(view, 'c_ndim', ndim) From pypy.commits at gmail.com Mon Sep 5 10:40:45 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 05 Sep 2016 07:40:45 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: use make_ref which has a keepalive_until_here() (arigato) Message-ID: <57cd83ed.915c1c0a.bba0e.40c1@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86882:948af0843de0 Date: 2016-09-05 17:40 +0300 http://bitbucket.org/pypy/pypy/changeset/948af0843de0/ Log: use make_ref which has a keepalive_until_here() (arigato) diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -3,7 +3,7 @@ from rpython.rlib.rarithmetic import widen from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER, Py_ssize_tP) -from pypy.module.cpyext.pyobject import PyObject, as_pyobj, incref +from pypy.module.cpyext.pyobject import PyObject, make_ref, incref @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def PyObject_CheckBuffer(space, pyobj): @@ -41,8 +41,7 @@ except ValueError: raise BufferError("could not create buffer from object") view.c_len = buf.getlength() - view.c_obj = as_pyobj(space, w_obj) - incref(space, view.c_obj) + view.c_obj = make_ref(space, w_obj) ndim = buf.getndim() view.c_itemsize = buf.getitemsize() rffi.setintfield(view, 'c_readonly', int(buf.readonly)) From pypy.commits at gmail.com Mon Sep 5 10:42:27 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 05 Sep 2016 07:42:27 -0700 (PDT) Subject: [pypy-commit] pypy ppc-vsx-support: return register immediately if it is the right one (base case) Message-ID: <57cd8453.e16ec20a.59a7a.a8e4@mx.google.com> Author: Richard Plangger Branch: ppc-vsx-support Changeset: r86883:199d8fd839ac Date: 2016-09-05 16:41 +0200 http://bitbucket.org/pypy/pypy/changeset/199d8fd839ac/ Log: return register immediately if it is the right one (base case) diff --git a/rpython/jit/backend/x86/vector_ext.py b/rpython/jit/backend/x86/vector_ext.py --- a/rpython/jit/backend/x86/vector_ext.py +++ b/rpython/jit/backend/x86/vector_ext.py @@ -308,7 +308,7 @@ self.mc.CMPPD_xxi(lhsloc.value, rhsloc.value, 0) self.flush_vec_cc(rx86.Conditions["E"], lhsloc, resloc, sizeloc.value) - def flush_vec_cc(self, rev_cond, lhsloc, resloc, size): + def flush_vec_cc(self, rev_cond, lhsloc, resloc, size) # After emitting an instruction that leaves a boolean result in # a condition code (cc), call this. In the common case, result_loc # will be set to SPP by the regalloc, which in this case means @@ -322,6 +322,7 @@ assert lhsloc is xmm0 maskloc = X86_64_XMM_SCRATCH_REG self.mc.MOVAPD(maskloc, heap(self.element_ones[get_scale(size)])) + self.mc.PXOR(resloc, resloc) # note that resloc contains true false for each element by the last compare operation self.mc.PBLENDVB_xx(resloc.value, maskloc.value) @@ -643,7 +644,12 @@ instructions. """ xrm = self.xrm + curloc = self.loc(arg) + if curloc is selected_reg: + # nothing to do, it is already in the correct register + return selected_reg if selected_reg not in xrm.free_regs: + # we need to move some registers variable = None candidate_to_spill = None for var, reg in self.xrm.reg_bindings.items(): diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_vector.py --- a/rpython/jit/metainterp/test/test_vector.py +++ b/rpython/jit/metainterp/test/test_vector.py @@ -156,9 +156,9 @@ i += size la = data.draw(st.lists(st.floats(), min_size=10, max_size=150)) - #la = [0.0,0.0,0.0,0.0,0.0,0.0,0.0] - #lb = [0.0,0.0,0.0,0.0,1.7976931348623157e+308,0.0,0.0] + #la = [0.0, 0.0, 0.0, 0.0, 5e-324, 0.0, 0.0, 5e-324, 0.0, 0.0] l = len(la) + #lb = [0.0] * l lb = data.draw(st.lists(st.floats(), min_size=l, max_size=l)) rawstorage = RawStorage() @@ -215,6 +215,9 @@ la = data.draw(st.lists(integers, min_size=10, max_size=150)) l = len(la) lb = data.draw(st.lists(integers, min_size=l, max_size=l)) + #la = [0] * 10 + #l = 10 + #lb = [0] * 10 rawstorage = RawStorage() va = rawstorage.new(la, type) From pypy.commits at gmail.com Mon Sep 5 11:18:54 2016 From: pypy.commits at gmail.com (sbauman) Date: Mon, 05 Sep 2016 08:18:54 -0700 (PDT) Subject: [pypy-commit] pypy force-virtual-state: Explicitly pass force_boxes arg in all cases (suggested by cfbolz) Message-ID: <57cd8cde.262ec20a.f163a.afb3@mx.google.com> Author: Spenser Andrew Bauman Branch: force-virtual-state Changeset: r86884:3ae2ff6dceac Date: 2016-09-05 11:18 -0400 http://bitbucket.org/pypy/pypy/changeset/3ae2ff6dceac/ Log: Explicitly pass force_boxes arg in all cases (suggested by cfbolz) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -184,8 +184,8 @@ self.optimizer._newoperations) try: - new_virtual_state = self.jump_to_existing_trace(end_jump, label_op, - state.runtime_boxes) + new_virtual_state = self.jump_to_existing_trace( + end_jump, label_op, state.runtime_boxes force_boxes=False) except InvalidLoop: # inlining short preamble failed, jump to preamble self.jump_to_preamble(celltoken, end_jump, info) @@ -252,7 +252,8 @@ for a in jump_op.getarglist(): self.optimizer.force_box_for_end_of_preamble(a) try: - vs = self.jump_to_existing_trace(jump_op, None, runtime_boxes, False) + vs = self.jump_to_existing_trace(jump_op, None, runtime_boxes, + force_boxes=False) except InvalidLoop: return self.jump_to_preamble(cell_token, jump_op, info) if vs is None: @@ -265,7 +266,8 @@ else: # Try forcing boxes to avoid jumping to the preamble try: - vs = self.jump_to_existing_trace(jump_op, None, runtime_boxes, True) + vs = self.jump_to_existing_trace(jump_op, None, runtime_boxes, + force_boxes=True) except InvalidLoop: pass if vs is None: From pypy.commits at gmail.com Mon Sep 5 12:36:33 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 05 Sep 2016 09:36:33 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: Added tag release-pypy2.7-v5.4.1 for changeset 050d84dd7899 Message-ID: <57cd9f11.02d31c0a.cb3a3.7a23@mx.google.com> Author: Matti Picus Branch: release-5.x Changeset: r86885:4efaca9d74d6 Date: 2016-09-05 19:34 +0300 http://bitbucket.org/pypy/pypy/changeset/4efaca9d74d6/ Log: Added tag release-pypy2.7-v5.4.1 for changeset 050d84dd7899 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -29,3 +29,4 @@ 7e8df3df96417c16c2d55b41352ec82c9c69c978 release-pypy2.7-v5.3.1 68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 77392ad263504df011ccfcabf6a62e21d04086d0 release-pypy2.7-v5.4.0 +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 From pypy.commits at gmail.com Mon Sep 5 12:36:35 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 05 Sep 2016 09:36:35 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: update for 5.4.1 Message-ID: <57cd9f13.6937c20a.8dc4e.8207@mx.google.com> Author: Matti Picus Branch: release-5.x Changeset: r86886:a58f1f2dc35b Date: 2016-09-05 19:35 +0300 http://bitbucket.org/pypy/pypy/changeset/a58f1f2dc35b/ Log: update for 5.4.1 diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,7 +1,7 @@ # Edit these appropriately before running this script maj=5 min=4 -rev=0 +rev=1 branchname=release-$maj.x # ==OR== release-$maj.$min.x tagname=release-pypy2.7-v$maj.$min.$rev # ==OR== release-$maj.$min From pypy.commits at gmail.com Mon Sep 5 12:36:37 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 05 Sep 2016 09:36:37 -0700 (PDT) Subject: [pypy-commit] pypy default: Added tag release-pypy2.7-v5.4.1 for changeset 050d84dd7899 Message-ID: <57cd9f15.121a1c0a.10647.7144@mx.google.com> Author: Matti Picus Branch: Changeset: r86887:5558faa5f5f9 Date: 2016-09-05 19:35 +0300 http://bitbucket.org/pypy/pypy/changeset/5558faa5f5f9/ Log: Added tag release-pypy2.7-v5.4.1 for changeset 050d84dd7899 (grafted from 4efaca9d74d6913fcd60f0af46988fee4317e295) diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -30,3 +30,4 @@ 68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 77392ad263504df011ccfcabf6a62e21d04086d0 release-pypy2.7-v5.4.0 +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 From pypy.commits at gmail.com Mon Sep 5 12:36:39 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 05 Sep 2016 09:36:39 -0700 (PDT) Subject: [pypy-commit] pypy default: update for 5.4.1 Message-ID: <57cd9f17.c41f1c0a.59642.6d6f@mx.google.com> Author: Matti Picus Branch: Changeset: r86888:1a16bd6a1d86 Date: 2016-09-05 19:35 +0300 http://bitbucket.org/pypy/pypy/changeset/1a16bd6a1d86/ Log: update for 5.4.1 (grafted from a58f1f2dc35bd2d3b5bda83ed86317676bbb55b4) diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,7 +1,7 @@ # Edit these appropriately before running this script maj=5 min=4 -rev=0 +rev=1 branchname=release-$maj.x # ==OR== release-$maj.$min.x tagname=release-pypy2.7-v$maj.$min.$rev # ==OR== release-$maj.$min From pypy.commits at gmail.com Mon Sep 5 13:04:02 2016 From: pypy.commits at gmail.com (sbauman) Date: Mon, 05 Sep 2016 10:04:02 -0700 (PDT) Subject: [pypy-commit] pypy force-virtual-state: Merge with known working version Message-ID: <57cda582.919a1c0a.2dc4b.83c5@mx.google.com> Author: Spenser Andrew Bauman Branch: force-virtual-state Changeset: r86889:e06660b2588f Date: 2016-09-05 13:03 -0400 http://bitbucket.org/pypy/pypy/changeset/e06660b2588f/ Log: Merge with known working version diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.8.0 +Version: 1.8.1 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.8.0" -__version_info__ = (1, 8, 0) +__version__ = "1.8.1" +__version_info__ = (1, 8, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -1,4 +1,20 @@ #define _CFFI_ + +/* We try to define Py_LIMITED_API before including Python.h. + + Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and + Py_REF_DEBUG are not defined. This is a best-effort approximation: + we can learn about Py_DEBUG from pyconfig.h, but it is unclear if + the same works for the other two macros. Py_DEBUG implies them, + but not the other way around. +*/ +#ifndef _CFFI_USE_EMBEDDING +# include +# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) +# define Py_LIMITED_API +# endif +#endif + #include #ifdef __cplusplus extern "C" { diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.8.0" + "\ncompiled with cffi version: 1.8.1" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -652,7 +652,7 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.', verbose=0, target=None): + def compile(self, tmpdir='.', verbose=0, target=None, debug=None): """The 'target' argument gives the final file name of the compiled DLL. Use '*' to force distutils' choice, suitable for regular CPython C API modules. Use a file name ending in '.*' @@ -669,7 +669,7 @@ module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, target=target, source_extension=source_extension, - compiler_verbose=verbose, **kwds) + compiler_verbose=verbose, debug=debug, **kwds) def init_once(self, func, tag): # Read _init_once_cache[tag], which is either (False, lock) if diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -997,29 +997,43 @@ assert onerror is None # XXX not implemented return BType(source, error) + _weakref_cache_ref = None + def gcp(self, cdata, destructor): - BType = self.typeof(cdata) + if self._weakref_cache_ref is None: + import weakref + class MyRef(weakref.ref): + def __eq__(self, other): + myref = self() + return self is other or ( + myref is not None and myref is other()) + def __ne__(self, other): + return not (self == other) + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self()) + return self._hash + self._weakref_cache_ref = {}, MyRef + weak_cache, MyRef = self._weakref_cache_ref if destructor is None: - if not (hasattr(BType, '_gcp_type') and - BType._gcp_type is BType): + try: + del weak_cache[MyRef(cdata)] + except KeyError: raise TypeError("Can remove destructor only on a object " "previously returned by ffi.gc()") - cdata._destructor = None return None - try: - gcp_type = BType._gcp_type - except AttributeError: - class CTypesDataGcp(BType): - __slots__ = ['_orig', '_destructor'] - def __del__(self): - if self._destructor is not None: - self._destructor(self._orig) - gcp_type = BType._gcp_type = CTypesDataGcp - new_cdata = self.cast(gcp_type, cdata) - new_cdata._orig = cdata - new_cdata._destructor = destructor + def remove(k): + cdata, destructor = weak_cache.pop(k, (None, None)) + if destructor is not None: + destructor(cdata) + + new_cdata = self.cast(self.typeof(cdata), cdata) + assert new_cdata is not cdata + weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor) return new_cdata typeof = type diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -21,12 +21,12 @@ allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext, compiler_verbose=0): +def compile(tmpdir, ext, compiler_verbose=0, debug=None): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext, compiler_verbose) + outputfilename = _build(tmpdir, ext, compiler_verbose, debug) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -36,7 +36,7 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext, compiler_verbose=0): +def _build(tmpdir, ext, compiler_verbose=0, debug=None): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log @@ -44,6 +44,9 @@ dist = Distribution({'ext_modules': [ext]}) dist.parse_config_files() options = dist.get_option_dict('build_ext') + if debug is None: + debug = sys.flags.debug + options['debug'] = ('ffiplatform', debug) options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) options['build_temp'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -275,8 +275,8 @@ def write_c_source_to_f(self, f, preamble): self._f = f prnt = self._prnt - if self.ffi._embedding is None: - prnt('#define Py_LIMITED_API') + if self.ffi._embedding is not None: + prnt('#define _CFFI_USE_EMBEDDING') # # first the '#include' (actually done by inlining the file's content) lines = self._rel_readlines('_cffi_include.h') @@ -1431,7 +1431,7 @@ def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, target=None, **kwds): + compiler_verbose=1, target=None, debug=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: @@ -1467,7 +1467,8 @@ if target != '*': _patch_for_target(patchlist, target) os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, compiler_verbose) + outputfilename = ffiplatform.compile('.', ext, + compiler_verbose, debug) finally: os.chdir(cwd) _unpatch_meths(patchlist) diff --git a/lib_pypy/cffi/setuptools_ext.py b/lib_pypy/cffi/setuptools_ext.py --- a/lib_pypy/cffi/setuptools_ext.py +++ b/lib_pypy/cffi/setuptools_ext.py @@ -69,16 +69,36 @@ else: _add_c_module(dist, ffi, module_name, source, source_extension, kwds) +def _set_py_limited_api(Extension, kwds): + """ + Add py_limited_api to kwds if setuptools >= 26 is in use. + Do not alter the setting if it already exists. + Setuptools takes care of ignoring the flag on Python 2 and PyPy. + """ + if 'py_limited_api' not in kwds: + import setuptools + try: + setuptools_major_version = int(setuptools.__version__.partition('.')[0]) + if setuptools_major_version >= 26: + kwds['py_limited_api'] = True + except ValueError: # certain development versions of setuptools + # If we don't know the version number of setuptools, we + # try to set 'py_limited_api' anyway. At worst, we get a + # warning. + kwds['py_limited_api'] = True + return kwds def _add_c_module(dist, ffi, module_name, source, source_extension, kwds): from distutils.core import Extension - from distutils.command.build_ext import build_ext + # We are a setuptools extension. Need this build_ext for py_limited_api. + from setuptools.command.build_ext import build_ext from distutils.dir_util import mkpath from distutils import log from cffi import recompiler allsources = ['$PLACEHOLDER'] allsources.extend(kwds.pop('sources', [])) + kwds = _set_py_limited_api(Extension, kwds) ext = Extension(name=module_name, sources=allsources, **kwds) def make_mod(tmpdir, pre_run=None): diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.8.0" +VERSION = "1.8.1" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.8.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.8.1", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "5.4.1-alpha0" -#define PYPY_VERSION_NUM 0x05040100 +#define PYPY_VERSION "5.5.0-alpha0" +#define PYPY_VERSION_NUM 0x05050000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -32,9 +32,11 @@ assert module.py_minor_version == sys.version_info.minor assert module.py_micro_version == sys.version_info.micro - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') + #@pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') def test_pypy_versions(self): import sys + if '__pypy__' not in sys.builtin_module_names: + py.test.skip("pypy only test") init = """ if (Py_IsInitialized()) { PyObject *m = Py_InitModule("foo", NULL); diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (5, 4, 1, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 5, 0, "alpha", 0) #XXX # sync patchlevel.h import pypy diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1479,6 +1479,7 @@ assert p1[0] == 123 seen.append(1) q = ffi.gc(p, destructor) + assert ffi.typeof(q) is ffi.typeof(p) import gc; gc.collect() assert seen == [] del q diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py @@ -149,3 +149,28 @@ p = snip_setuptools_verify2.C.getpwuid(0) assert snip_setuptools_verify2.ffi.string(p.pw_name) == b"root" ''') + + def test_set_py_limited_api(self): + from cffi.setuptools_ext import _set_py_limited_api + try: + import setuptools + except ImportError as e: + py.test.skip(str(e)) + orig_version = setuptools.__version__ + try: + setuptools.__version__ = '26.0.0' + from setuptools import Extension + + kwds = _set_py_limited_api(Extension, {}) + assert kwds['py_limited_api'] == True + + setuptools.__version__ = '25.0' + kwds = _set_py_limited_api(Extension, {}) + assert not kwds + + setuptools.__version__ = 'development' + kwds = _set_py_limited_api(Extension, {}) + assert kwds['py_limited_api'] == True + + finally: + setuptools.__version__ = orig_version diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1975,9 +1975,9 @@ def test_function_returns_partial_struct(): ffi = FFI() - ffi.cdef("struct a { int a; ...; }; struct a f1(int);") + ffi.cdef("struct aaa { int a; ...; }; struct aaa f1(int);") lib = verify(ffi, "test_function_returns_partial_struct", """ - struct a { int b, a, c; }; - static struct a f1(int x) { struct a s = {0}; s.a = x; return s; } + struct aaa { int b, a, c; }; + static struct aaa f1(int x) { struct aaa s = {0}; s.a = x; return s; } """) assert lib.f1(52).a == 52 diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -97,6 +97,21 @@ OPENSSL_VERSION_NUMBER = cconfig["OPENSSL_VERSION_NUMBER"] HAVE_TLSv1_2 = OPENSSL_VERSION_NUMBER >= 0x10001000 +if OPENSSL_VERSION_NUMBER >= 0x10100000: + eci.pre_include_bits = () + eci.post_include_bits = () + raise Exception("""OpenSSL version >= 1.1 not supported yet. + + This program requires OpenSSL version 1.0.x, and may also + work with LibreSSL or OpenSSL 0.9.x. OpenSSL 1.1 is quite + some work to update to; contributions are welcome. Sorry, + you need to install an older version of OpenSSL for now. + Make sure this older version is the one picked up by this + program when it runs the compiler. + + This is the configuration used: %r""" % (eci,)) + + class CConfig: _compilation_info_ = eci diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -1,5 +1,5 @@ import sys -from rpython.rlib.objectmodel import specialize, we_are_translated +from rpython.rlib.objectmodel import specialize, we_are_translated, enforceargs from rpython.rlib.rstring import StringBuilder, UnicodeBuilder from rpython.rlib.rarithmetic import r_uint, intmask, widen from rpython.rlib.unicodedata import unicodedb @@ -145,19 +145,21 @@ _invalid_byte_3_of_4 = _invalid_cont_byte _invalid_byte_4_of_4 = _invalid_cont_byte - at specialize.arg(2) + at enforceargs(allow_surrogates=bool) def _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): return (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0) # surrogates shouldn't be valid UTF-8! - or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f)) + or (ordch1 == 0xed and ordch2 > 0x9f and not allow_surrogates)) def _invalid_byte_2_of_4(ordch1, ordch2): return (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)) - at specialize.arg(5) +# note: this specialize() is here for rtyper/rstr.py, which calls this +# function too but with its own fixed errorhandler + at specialize.arg_or_var(4) def str_decode_utf_8_impl(s, size, errors, final, errorhandler, allow_surrogates, result): if size == 0: @@ -330,6 +332,9 @@ return unicode_encode_utf_8_impl(s, size, errors, errorhandler, allow_surrogates=allow_surrogates) +# note: this specialize() is here for rtyper/rstr.py, which calls this +# function too but with its own fixed errorhandler + at specialize.arg_or_var(3) def unicode_encode_utf_8_impl(s, size, errors, errorhandler, allow_surrogates=False): assert(size >= 0) diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -55,7 +55,7 @@ s = s.encode(encoding) except LookupError as e: py.test.skip(e) - result, consumed = decoder(s, len(s), True) + result, consumed = decoder(s, len(s), 'strict', final=True) assert consumed == len(s) self.typeequals(trueresult, result) @@ -69,7 +69,7 @@ s = s.decode(encoding) except LookupError as e: py.test.skip(e) - result = encoder(s, len(s), True) + result = encoder(s, len(s), 'strict') self.typeequals(trueresult, result) def checkencodeerror(self, s, encoding, start, stop): @@ -823,9 +823,15 @@ def f(x): s1 = "".join(["\xd7\x90\xd6\x96\xeb\x96\x95\xf0\x90\x91\x93"] * x) - u, consumed = runicode.str_decode_utf_8(s1, len(s1), True) - s2 = runicode.unicode_encode_utf_8(u, len(u), True) - return s1 == s2 + u, consumed = runicode.str_decode_utf_8(s1, len(s1), 'strict', + allow_surrogates=True) + s2 = runicode.unicode_encode_utf_8(u, len(u), 'strict', + allow_surrogates=True) + u3, consumed3 = runicode.str_decode_utf_8(s1, len(s1), 'strict', + allow_surrogates=False) + s3 = runicode.unicode_encode_utf_8(u3, len(u3), 'strict', + allow_surrogates=False) + return s1 == s2 == s3 res = interpret(f, [2]) assert res diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -540,7 +540,7 @@ def ll_ullong_py_mod_zer(x, y): if y == 0: raise ZeroDivisionError - return llop.ullong_mod(UnsignedLongLong, x, y) + return ll_ullong_py_mod(x, y) @jit.dont_look_inside def ll_lllong_py_mod(x, y): diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -30,12 +30,13 @@ assert value is not None result = UnicodeBuilder(len(value)) self.rstr_decode_utf_8( - value, len(value), 'strict', final=False, + value, len(value), 'strict', final=True, errorhandler=self.ll_raise_unicode_exception_decode, allow_surrogates=False, result=result) return self.ll.llunicode(result.build()) - def ll_raise_unicode_exception_decode(self, errors, encoding, msg, s, + @staticmethod + def ll_raise_unicode_exception_decode(errors, encoding, msg, s, startingpos, endingpos): raise UnicodeDecodeError(encoding, s, startingpos, endingpos, msg) @@ -411,7 +412,8 @@ allow_surrogates=False) return self.ll.llstr(bytes) - def ll_raise_unicode_exception_encode(self, errors, encoding, msg, u, + @staticmethod + def ll_raise_unicode_exception_encode(errors, encoding, msg, u, startingpos, endingpos): raise UnicodeEncodeError(encoding, u, startingpos, endingpos, msg) diff --git a/rpython/rtyper/test/test_runicode.py b/rpython/rtyper/test/test_runicode.py --- a/rpython/rtyper/test/test_runicode.py +++ b/rpython/rtyper/test/test_runicode.py @@ -162,6 +162,18 @@ assert self.ll_to_string(self.interpret(f, [0])) == f(0) + def test_unicode_decode_final(self): + strings = ['\xc3', ''] + def f(n): + try: + strings[n].decode('utf-8') + except UnicodeDecodeError: + return True + return False + + assert f(0) + assert self.interpret(f, [0]) + def test_utf_8_decoding_annotation(self): from rpython.rlib.runicode import str_decode_utf_8 def errorhandler(errors, encoding, msg, s, From pypy.commits at gmail.com Mon Sep 5 13:31:49 2016 From: pypy.commits at gmail.com (sbauman) Date: Mon, 05 Sep 2016 10:31:49 -0700 (PDT) Subject: [pypy-commit] pypy force-virtual-state: Missing comma Message-ID: <57cdac05.c3f0c20a.54624.f434@mx.google.com> Author: Spenser Andrew Bauman Branch: force-virtual-state Changeset: r86890:63719b0c0290 Date: 2016-09-05 13:30 -0400 http://bitbucket.org/pypy/pypy/changeset/63719b0c0290/ Log: Missing comma diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -185,7 +185,7 @@ try: new_virtual_state = self.jump_to_existing_trace( - end_jump, label_op, state.runtime_boxes force_boxes=False) + end_jump, label_op, state.runtime_boxes, force_boxes=False) except InvalidLoop: # inlining short preamble failed, jump to preamble self.jump_to_preamble(celltoken, end_jump, info) From pypy.commits at gmail.com Mon Sep 5 14:32:17 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 05 Sep 2016 11:32:17 -0700 (PDT) Subject: [pypy-commit] pypy py3k: Skip test_random_attr.py on pypy3 Message-ID: <57cdba31.4676c20a.2e4e.3266@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r86891:0b9f47589e08 Date: 2016-08-31 16:50 +0100 http://bitbucket.org/pypy/pypy/changeset/0b9f47589e08/ Log: Skip test_random_attr.py on pypy3 diff --git a/pypy/objspace/std/test/test_random_attr.py b/pypy/objspace/std/test/test_random_attr.py --- a/pypy/objspace/std/test/test_random_attr.py +++ b/pypy/objspace/std/test/test_random_attr.py @@ -1,4 +1,5 @@ import pytest +pytest.skip("This cannot possibly work on pypy3") import sys try: import __pypy__ From pypy.commits at gmail.com Mon Sep 5 14:32:43 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 05 Sep 2016 11:32:43 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: md5/sha1/sha256 for pypy 5.4.1 Message-ID: <57cdba4b.861b1c0a.6b21.9b0b@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r785:f8f81b14f7ad Date: 2016-09-05 20:32 +0200 http://bitbucket.org/pypy/pypy.org/changeset/f8f81b14f7ad/ Log: md5/sha1/sha256 for pypy 5.4.1 diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -448,20 +448,20 @@ Here are the checksums for each of the downloads -pypy2.7-v5.4.0 md5:: +pypy2.7-v5.4.1 md5:: - 50ea504e66f4d9297f5228d7a3b026ec pypy2-v5.4.0-linux-armel.tar.bz2 - e838ba554bc53c793f23c378a898fa0f pypy2-v5.4.0-linux-armhf-raring.tar.bz2 - b1b9b755631ef85d400d7690ece50210 pypy2-v5.4.0-linux-armhf-raspbian.tar.bz2 - df7180d5070ac19a234fc6c39b88f420 pypy2-v5.4.0-linux32.tar.bz2 - 5e228ba05b6eaa0af37321fd3f425891 pypy2-v5.4.0-linux64.tar.bz2 - b32d4c97275901665945f1f2813b6f26 pypy2-v5.4.0-osx64.tar.bz2 - 1d32ef8036a9fe718f397813bd070be8 pypy2-v5.4.0-ppc64.tar.bz2 - d8abb09416b4370ea40c51a710d12b18 pypy2-v5.4.0-ppc64le.tar.bz2 - b560c2811a3089f22b21db9beea7f273 pypy2-v5.4.0-s390x.tar.bz2 - c806bea7ecbb999fffeea3a06e6462e8 pypy2-v5.4.0-src.tar.bz2 - 26c2ab1c891651eb620dbde499088c1f pypy2-v5.4.0-src.zip - bd25b15c0d6c0f7c7f6fa75f1da35014 pypy2-v5.4.0-win32.zip + 70958c05af6628a66db5072ef1c72522 pypy2-v5.4.1-linux-armel.tar.bz2 + 5246b7b963689ec5b70291c2b104476a pypy2-v5.4.1-linux-armhf-raring.tar.bz2 + 106cfa49756df7ae3bf531ce6659d0ed pypy2-v5.4.1-linux-armhf-raspbian.tar.bz2 + f561bedb338fa09f011eaf1edac0faf1 pypy2-v5.4.1-linux32.tar.bz2 + df1327fc3cd41a1ff860e90b5a901899 pypy2-v5.4.1-linux64.tar.bz2 + b24ebe9f4825fc05afd76a9e8f47018c pypy2-v5.4.1-osx64.tar.bz2 + b8e90edf11639d1757413c8bf5a11d49 pypy2-v5.4.1-ppc64.tar.bz2 + 55f6747988d981699e52a7c60aed8a7f pypy2-v5.4.1-ppc64le.tar.bz2 + 082cca9bb948c8b1389c35db7174396a pypy2-v5.4.1-s390x.tar.bz2 + 129e730c84f55133b9694bc48e2d4812 pypy2-v5.4.1-src.tar.bz2 + f20a420d90475b72c6ef8b4ab90377f0 pypy2-v5.4.1-src.zip + 125874d61b4ac4e2fd7d0b7c2db3b041 pypy2-v5.4.1-win32.zip pypy3.3-v5.2-alpha md5:: @@ -481,20 +481,20 @@ 009c970b5fa75754ae4c32a5d108a8d4 pypy-1.8-sandbox-linux.tar.bz2 -pypy2.7-5.4.0 sha1:: +pypy2.7-5.4.1 sha1:: - c50062a83e4bb9fc59b76901c92e7bf1ecd0351f pypy2-v5.4.0-linux-armel.tar.bz2 - f4ebad7a9a31dfa55b35cc01b0533ef8e31ab7c4 pypy2-v5.4.0-linux-armhf-raring.tar.bz2 - c0becdcb7f44e09947afab9df759313ec94563ef pypy2-v5.4.0-linux-armhf-raspbian.tar.bz2 - 63be7254bdecd4f3272bcc47f0da7f5db82435a0 pypy2-v5.4.0-linux32.tar.bz2 - b0e0405ca8f3b143e16122767eb5605d3388af0c pypy2-v5.4.0-linux64.tar.bz2 - 9c97f54d492886fcaae8611733bcc40a625c8245 pypy2-v5.4.0-osx64.tar.bz2 - 4a263167bbc89447e5adc2ed687ed44798bbca08 pypy2-v5.4.0-ppc64.tar.bz2 - b3554db74a826fd8e86f1132e9c2cb2e49caac1c pypy2-v5.4.0-ppc64le.tar.bz2 - 165920a2d0eeda83e8808e7fce93f2a9db7f736a pypy2-v5.4.0-s390x.tar.bz2 - 95163f8f3c8e9e52e126fc1807d8d94e3d224aec pypy2-v5.4.0-src.tar.bz2 - b26546821836cb4bfda0160d37d4dd31fd3aace8 pypy2-v5.4.0-src.zip - 5ec0ca235cc68b557770b8cf5e1e49bd7b1a0aad pypy2-v5.4.0-win32.zip + 74fea5a7a0a3d8c899404ddc8c0296d4bf4ca3d0 pypy2-v5.4.1-linux-armel.tar.bz2 + 4c72d94325567d2079ca5021da3cba6cbc835744 pypy2-v5.4.1-linux-armhf-raring.tar.bz2 + 18486da20d2513c083be308f8222f83f80c74671 pypy2-v5.4.1-linux-armhf-raspbian.tar.bz2 + c38c40dfcbe9cba712a508fed0ea3dc6da5e2b7c pypy2-v5.4.1-linux32.tar.bz2 + 2c9a45c1bf67d8f2fac9d09e082f059ce892f291 pypy2-v5.4.1-linux64.tar.bz2 + 765bed9e45fa58a6f65ae20ff28b5e39beb56793 pypy2-v5.4.1-osx64.tar.bz2 + 6e73fa972ca01d58e95b36f133319bff0cc66876 pypy2-v5.4.1-ppc64.tar.bz2 + 4410c7514328d936084b00ac9d01af1aecdc7289 pypy2-v5.4.1-ppc64le.tar.bz2 + 8726412fcbaea859f21fb4b1c21fd5832e4c56d7 pypy2-v5.4.1-s390x.tar.bz2 + 0d865c16a3779f492b7f4687cd46c21bbfc05609 pypy2-v5.4.1-src.tar.bz2 + 8f898a052786d3b60e9effe162c15fa572a5f52d pypy2-v5.4.1-src.zip + 21958b782dc727a0be3bbc248e0ca9af18305654 pypy2-v5.4.1-win32.zip pypy3.3-v5.2-alpha sha1:: @@ -508,20 +508,20 @@ 4b31ab492716ea375dd090bbacdf3d7c2d483059 pypy3.3-v5.2.0-alpha1-src.tar.bz2 d9f5b64f144ebec1a200156809fbbe04fdf7eb7e pypy3.3-v5.2.0-alpha1-src.zip -pypy2.7-5.4.0 sha256:: +pypy2.7-5.4.1 sha256:: - 04509044f21bb41ee6d3fafcf637fc0c586c248d4cdae6ac3357606a7b660fdb pypy2-v5.4.0-linux-armel.tar.bz2 - 95c690bcae6771ebce6cf06c7c2842e0662e007e35162afc963337aa597b471a pypy2-v5.4.0-linux-armhf-raring.tar.bz2 - 839b08db89b7e20cb670b8cf02596e033ea0b76fb8336af7bedfbb04b6b502da pypy2-v5.4.0-linux-armhf-raspbian.tar.bz2 - ce581270464b14cdecd13dedb9bd7bf98232f767ac4ac282229a405d8e807af1 pypy2-v5.4.0-linux32.tar.bz2 - bdfea513d59dcd580970cb6f79f3a250d00191fd46b68133d5327e924ca845f8 pypy2-v5.4.0-linux64.tar.bz2 - 3adf21c2bf3432759c99123f21240d71a72aba81d73129e48ef912c34631b723 pypy2-v5.4.0-osx64.tar.bz2 - dc09a057264dafb7e4bceca57b6a6ba3b0a5273e125a9b29da32b8439f980270 pypy2-v5.4.0-ppc64.tar.bz2 - 4feb0711e7c235b247f8ea0b22e8a676f89e8831488b7a4e9c7f3a6943d07052 pypy2-v5.4.0-ppc64le.tar.bz2 - 6bceb2760b1c7d6105d20207102862160ddddfd9b1a2707b3a8d866ac29e08d3 pypy2-v5.4.0-s390x.tar.bz2 - d9568ebe9a14d0eaefde887d78f3cba63d665e95c0d234bb583932341f55a655 pypy2-v5.4.0-src.tar.bz2 - 3c165676be8df3b482727438836a9a240ea641392ddd60593f825e1d50029022 pypy2-v5.4.0-src.zip - 442c0a917781b6155bf78d2648f1ccd9a36c321926a043f83efcea22a99960b4 pypy2-v5.4.0-win32.zip + 8925b76fe9ca6f960d8f914ed67f7a3c52ce2b4c65fa71a5ef7d4b285c2c3a36 pypy2-v5.4.1-linux-armel.tar.bz2 + 0213b0d948ae0afea8b4cb93f08e55b0562522b3ab8f2706c4e22ffe8cd86f84 pypy2-v5.4.1-linux-armhf-raring.tar.bz2 + 2daee13ec1836c1041c89c18d9514134ff606dc3648fc6304611eb1ec0819289 pypy2-v5.4.1-linux-armhf-raspbian.tar.bz2 + 85bccf8679f908c08850115fe74325474fe1b2e1e793c147d1fa484b56472b12 pypy2-v5.4.1-linux32.tar.bz2 + 06c29d59565d9fdb618ed8aa730e05cf975da21158955591dff38d9e305af074 pypy2-v5.4.1-linux64.tar.bz2 + 507c81af9ca302c67a582255306529f88fba56760d353e17d667a114eee1f7e2 pypy2-v5.4.1-osx64.tar.bz2 + c11b37f5e97b647003426987e223d75a0dc0da1ecc35675ddad0af8a9add972d pypy2-v5.4.1-ppc64.tar.bz2 + 652d97fbd574d0349f7aa8b37c8c5a1238ed0cd3d6b68cf2ea8280b7ead4c7ad pypy2-v5.4.1-ppc64le.tar.bz2 + 987b3354dcbed5fd3f0d8d9d1484a259f0dff97da5d11a84b354c6e61a4af891 pypy2-v5.4.1-s390x.tar.bz2 + 92af82664ace96d721c66dbe8726d4f39c7d01f568d9df56c11149be2960238f pypy2-v5.4.1-src.tar.bz2 + 08148d1157dd16f402c7844fc0cdfde9e7d187c7fd1549a93e888e2fd13828bf pypy2-v5.4.1-src.zip + b703224af4e99243d090783a7b685063da7ba01ef28bb99a89cacfce2fb0dfc2 pypy2-v5.4.1-win32.zip pypy3.3-v5.2-alpha sha256:: From pypy.commits at gmail.com Mon Sep 5 15:16:08 2016 From: pypy.commits at gmail.com (ntruessel) Date: Mon, 05 Sep 2016 12:16:08 -0700 (PDT) Subject: [pypy-commit] pypy quad-color-gc: Update qcgc codebase Message-ID: <57cdc478.c75dc20a.fc357.14ed@mx.google.com> Author: Nicolas Truessel Branch: quad-color-gc Changeset: r86892:37cdb0f205ea Date: 2016-09-05 21:15 +0200 http://bitbucket.org/pypy/pypy/changeset/37cdb0f205ea/ Log: Update qcgc codebase diff --git a/rpython/memory/gctransform/qcgcframework.py b/rpython/memory/gctransform/qcgcframework.py --- a/rpython/memory/gctransform/qcgcframework.py +++ b/rpython/memory/gctransform/qcgcframework.py @@ -2,7 +2,8 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper import rmodel from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.memory.gctransform.framework import (BaseFrameworkGCTransformer, BaseRootWalker) +from rpython.memory.gctransform.framework import (BaseFrameworkGCTransformer, + BaseRootWalker, TYPE_ID, WEAKREF, WEAKREFPTR) VISIT_FPTR = lltype.Ptr(lltype.FuncType([llmemory.Address], lltype.Void)) @@ -34,6 +35,12 @@ [SomeAddress(), SomePtr(VISIT_FPTR)], s_None)) + #Compilation error when overriding, no idea why + #def finish_tables(self): + # BaseFrameworkGCTransformer.finish_tables(self) + #Makes test fail, works when translating pypy (but compiling still fails) + #assert len(self.layoutbuilder.addresses_of_static_ptrs_in_nongc) == 2 + def gc_header_for(self, obj, needs_hash=False): hdr = self.gcdata.gc.gcheaderbuilder.header_of_object(obj) if needs_hash: @@ -77,6 +84,40 @@ # hop.genop("cast_adr_to_ptr", [v_adr], # resultvar = hop.spaceop.result) + def gct_weakref_create(self, hop): + # Custom weakref creation as their registration is slightly different + op = hop.spaceop + + type_id = self.get_type_id(WEAKREF) + + c_type_id = rmodel.inputconst(TYPE_ID, type_id) + info = self.layoutbuilder.get_info(type_id) + c_size = rmodel.inputconst(lltype.Signed, info.fixedsize) + malloc_ptr = self.malloc_fixedsize_ptr + c_false = rmodel.inputconst(lltype.Bool, False) + c_has_weakptr = rmodel.inputconst(lltype.Bool, True) + args = [self.c_const_gc, c_type_id, c_size, + c_false, c_false, c_has_weakptr] + + # push and pop the current live variables *including* the argument + # to the weakref_create operation, which must be kept alive if the GC + # needs to collect + livevars = self.push_roots(hop, keep_current_args=True) + v_result = hop.genop("direct_call", [malloc_ptr] + args, + resulttype=llmemory.GCREF) + v_result = hop.genop("cast_opaque_ptr", [v_result], + resulttype=WEAKREFPTR) + self.pop_roots(hop, livevars) + # + v_instance, = op.args + v_addr = hop.genop("cast_ptr_to_adr", [v_instance], + resulttype=llmemory.Address) + hop.genop("bare_setfield", + [v_result, rmodel.inputconst(lltype.Void, "weakptr"), v_addr]) + v_weakref = hop.genop("cast_ptr_to_weakrefptr", [v_result], + resulttype=llmemory.WeakRefPtr) + hop.cast_result(v_weakref) + class QcgcRootWalker(BaseRootWalker): def walk_stack_roots(self, collect_stack_root, is_minor=False): raise NotImplementedError diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -515,10 +515,9 @@ # can malloc a GC object. # __________ qcgc operations __________ - 'qcgc_allocate': LLOp(canmallocgc=True), - 'qcgc_collect': LLOp(canmallocgc=True), - 'qcgc_is_prebuilt': LLOp(), - 'qcgc_write_barrier': LLOp(), + 'qcgc_allocate': LLOp(canmallocgc=True), + 'qcgc_collect': LLOp(canmallocgc=True), + 'qcgc_write_barrier': LLOp(), # __________ weakrefs __________ diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -960,12 +960,6 @@ def OP_QCGC_COLLECT(self, op): return 'qcgc_collect();' - def OP_QCGC_IS_PREBUILT(self, op): - obj = self.expr(op.args[0]) - result = self.expr(op.result) - return '%s = (((object_t *) %s)->flags & QCGC_PREBUILT_OBJECT) != 0;' % ( - result, obj) - def OP_QCGC_WRITE_BARRIER(self, op): obj = self.expr(op.args[0]) return 'qcgc_write(%s);' % (obj,) diff --git a/rpython/translator/c/src/qcgc/bag.c b/rpython/translator/c/src/qcgc/bag.c --- a/rpython/translator/c/src/qcgc/bag.c +++ b/rpython/translator/c/src/qcgc/bag.c @@ -6,3 +6,4 @@ DEFINE_BAG(linear_free_list, cell_t *); DEFINE_BAG(exp_free_list, struct exp_free_list_item_s); DEFINE_BAG(hbbucket, struct hbtable_entry_s); +DEFINE_BAG(weakref_bag, struct weakref_bag_item_s); diff --git a/rpython/translator/c/src/qcgc/bag.h b/rpython/translator/c/src/qcgc/bag.h --- a/rpython/translator/c/src/qcgc/bag.h +++ b/rpython/translator/c/src/qcgc/bag.h @@ -92,7 +92,13 @@ bool mark_flag; }; +struct weakref_bag_item_s { + object_t *weakrefobj; + object_t **target; +}; + DECLARE_BAG(arena_bag, arena_t *); DECLARE_BAG(linear_free_list, cell_t *); DECLARE_BAG(exp_free_list, struct exp_free_list_item_s); DECLARE_BAG(hbbucket, struct hbtable_entry_s); +DECLARE_BAG(weakref_bag, struct weakref_bag_item_s); diff --git a/rpython/translator/c/src/qcgc/config.h b/rpython/translator/c/src/qcgc/config.h --- a/rpython/translator/c/src/qcgc/config.h +++ b/rpython/translator/c/src/qcgc/config.h @@ -1,15 +1,14 @@ #pragma once -#define CHECKED 0 // Enable runtime sanity checks - // warning: huge performance impact -#define DEBUG_ZERO_ON_SWEEP 0 // Zero memory on sweep (debug only) +#define CHECKED 1 // Enable runtime sanity checks +#define DEBUG_ZERO_ON_SWEEP 1 // Zero memory on sweep (debug only) #define QCGC_INIT_ZERO 1 // Init new objects with zero bytes /** * Event logger */ -#define EVENT_LOG 0 // Enable event log +#define EVENT_LOG 1 // Enable event log #define LOGFILE "./qcgc_events.log" // Default logfile #define LOG_ALLOCATION 0 // Enable allocation log (warning: // significant performance impact) @@ -31,6 +30,12 @@ #define QCGC_SMALL_FREE_LIST_INIT_SIZE 16 // Initial size for small free lists /** + * Auto Mark/Collect + */ +#define QCGC_MAJOR_COLLECTION_THRESHOLD (5 * (1< +#include "bag.h" +#include "gray_stack.h" #include "shadow_stack.h" /** @@ -25,7 +27,10 @@ struct qcgc_state { shadow_stack_t *shadow_stack; shadow_stack_t *prebuilt_objects; + weakref_bag_t *weakrefs; gray_stack_t *gp_gray_stack; size_t gray_stack_size; gc_phase_t phase; + size_t bytes_since_collection; + size_t bytes_since_incmark; } qcgc_state; diff --git a/rpython/translator/c/src/qcgc/hugeblocktable.c b/rpython/translator/c/src/qcgc/hugeblocktable.c --- a/rpython/translator/c/src/qcgc/hugeblocktable.c +++ b/rpython/translator/c/src/qcgc/hugeblocktable.c @@ -45,6 +45,17 @@ return false; } +bool qcgc_hbtable_has(object_t *object) { + hbbucket_t *b = qcgc_hbtable.bucket[bucket(object)]; + size_t count = b->count; + for (size_t i = 0; i < count; i++) { + if (b->items[i].object == object) { + return true; + } + } + return false; +} + bool qcgc_hbtable_is_marked(object_t *object) { hbbucket_t *b = qcgc_hbtable.bucket[bucket(object)]; size_t count = b->count; diff --git a/rpython/translator/c/src/qcgc/hugeblocktable.h b/rpython/translator/c/src/qcgc/hugeblocktable.h --- a/rpython/translator/c/src/qcgc/hugeblocktable.h +++ b/rpython/translator/c/src/qcgc/hugeblocktable.h @@ -20,5 +20,6 @@ void qcgc_hbtable_destroy(void); void qcgc_hbtable_insert(object_t *object); bool qcgc_hbtable_mark(object_t *object); +bool qcgc_hbtable_has(object_t *object); bool qcgc_hbtable_is_marked(object_t *object); void qcgc_hbtable_sweep(void); diff --git a/rpython/translator/c/src/qcgc/qcgc.c b/rpython/translator/c/src/qcgc/qcgc.c --- a/rpython/translator/c/src/qcgc/qcgc.c +++ b/rpython/translator/c/src/qcgc/qcgc.c @@ -10,20 +10,41 @@ #include "hugeblocktable.h" #include "event_logger.h" +#define env_or_fallback(var, env_name, fallback) while(0) { \ + char *env_val = getenv(env_name); \ + if (env_val != NULL) { \ + if (1 != sscanf(env_val, "%zu", &var)) { \ + var = fallback; \ + } \ + } \ +} + void qcgc_mark(bool incremental); void qcgc_pop_object(object_t *object); void qcgc_push_object(object_t *object); void qcgc_sweep(void); +static size_t major_collection_threshold = QCGC_MAJOR_COLLECTION_THRESHOLD; +static size_t incmark_threshold = QCGC_INCMARK_THRESHOLD; + +QCGC_STATIC void update_weakrefs(void); + void qcgc_initialize(void) { qcgc_state.shadow_stack = qcgc_shadow_stack_create(QCGC_SHADOWSTACK_SIZE); - qcgc_state.prebuilt_objects = qcgc_shadow_stack_create(16); //XXX + qcgc_state.prebuilt_objects = qcgc_shadow_stack_create(16); // XXX + qcgc_state.weakrefs = qcgc_weakref_bag_create(16); // XXX qcgc_state.gp_gray_stack = qcgc_gray_stack_create(16); // XXX qcgc_state.gray_stack_size = 0; qcgc_state.phase = GC_PAUSE; + qcgc_state.bytes_since_collection = 0; + qcgc_state.bytes_since_incmark = 0; qcgc_allocator_initialize(); qcgc_hbtable_initialize(); qcgc_event_logger_initialize(); + + env_or_fallback(major_collection_threshold, "QCGC_MAJOR_COLLECTION", + QCGC_MAJOR_COLLECTION_THRESHOLD); + env_or_fallback(incmark_threshold, "QCGC_INCMARK", QCGC_INCMARK_THRESHOLD); } void qcgc_destroy(void) { @@ -32,6 +53,7 @@ qcgc_allocator_destroy(); free(qcgc_state.shadow_stack); free(qcgc_state.prebuilt_objects); + free(qcgc_state.weakrefs); free(qcgc_state.gp_gray_stack); } @@ -113,6 +135,14 @@ (uint8_t *) &size); #endif object_t *result; + + if (qcgc_state.bytes_since_collection > major_collection_threshold) { + qcgc_collect(); + } + if (qcgc_state.bytes_since_incmark > incmark_threshold) { + qcgc_mark(true); + } + if (size <= 1<flags & QCGC_PREBUILT_OBJECT) == 0); + assert((object_t *) qcgc_arena_addr((cell_t *) weakrefobj) != weakrefobj); +#endif + // NOTE: At this point, the target must point to a pointer to a valid + // object. We don't register any weakrefs to prebuilt objects as they + // are always valid. + if (((*target)->flags & QCGC_PREBUILT_OBJECT) == 0) { + qcgc_state.weakrefs = qcgc_weakref_bag_add(qcgc_state.weakrefs, + (struct weakref_bag_item_s) { + .weakrefobj = weakrefobj, + .target = target}); + } +} + +QCGC_STATIC void update_weakrefs(void) { + size_t i = 0; + while (i < qcgc_state.weakrefs->count) { + struct weakref_bag_item_s item = qcgc_state.weakrefs->items[i]; + // Check whether weakref object itself was collected + // We know the weakref object is a normal object + switch(qcgc_arena_get_blocktype((cell_t *) item.weakrefobj)) { + case BLOCK_EXTENT: // Fall through + case BLOCK_FREE: + // Weakref itself was collected, forget it + qcgc_state.weakrefs = qcgc_weakref_bag_remove_index( + qcgc_state.weakrefs, i); + continue; + case BLOCK_BLACK: + case BLOCK_WHITE: + // Weakref object is still valid, continue + break; + } + + // Check whether the weakref target is still valid + object_t *points_to = *item.target; + if ((object_t *) qcgc_arena_addr((cell_t *) points_to) == + points_to) { + // Huge object + if (qcgc_hbtable_has(points_to)) { + // Still valid + i++; + } else { + // Invalid + *(item.target) = NULL; + qcgc_state.weakrefs = qcgc_weakref_bag_remove_index( + qcgc_state.weakrefs, i); + } + } else { + // Normal object + switch(qcgc_arena_get_blocktype((cell_t *) points_to)) { + case BLOCK_BLACK: // Still valid + case BLOCK_WHITE: + i++; + break; + case BLOCK_EXTENT: // Fall through + case BLOCK_FREE: + // Invalid + *(item.target) = NULL; + qcgc_state.weakrefs = qcgc_weakref_bag_remove_index( + qcgc_state.weakrefs, i); + break; + } + } + } +} diff --git a/rpython/translator/c/src/qcgc/qcgc.h b/rpython/translator/c/src/qcgc/qcgc.h --- a/rpython/translator/c/src/qcgc/qcgc.h +++ b/rpython/translator/c/src/qcgc/qcgc.h @@ -83,6 +83,15 @@ object_t *qcgc_shadowstack_pop(void); /** + * Weakref registration + * + * @param weakrefobj Pointer to the weakref itself + * @param target Doublepointer to referenced object. + * The referenced object must be a valid object. + */ +void qcgc_register_weakref(object_t *weakrefobj, object_t **target); + +/** * Tracing function. * * This function traces an object, i.e. calls visit on every object referenced diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -1280,6 +1280,9 @@ GC_CAN_MOVE = False GC_CAN_SHRINK_ARRAY = False removetypeptr = True + + def test_framework_static_roots(self): + py.test.skip("not implemented") From pypy.commits at gmail.com Mon Sep 5 16:03:20 2016 From: pypy.commits at gmail.com (ntruessel) Date: Mon, 05 Sep 2016 13:03:20 -0700 (PDT) Subject: [pypy-commit] pypy quad-color-gc: Skip correct test, disable checks in qcgc and prepare weakref registration Message-ID: <57cdcf88.581d1c0a.fa58a.4f36@mx.google.com> Author: Nicolas Truessel Branch: quad-color-gc Changeset: r86893:a2b97ba105dc Date: 2016-09-05 22:02 +0200 http://bitbucket.org/pypy/pypy/changeset/a2b97ba105dc/ Log: Skip correct test, disable checks in qcgc and prepare weakref registration diff --git a/rpython/memory/gctransform/qcgcframework.py b/rpython/memory/gctransform/qcgcframework.py --- a/rpython/memory/gctransform/qcgcframework.py +++ b/rpython/memory/gctransform/qcgcframework.py @@ -112,10 +112,15 @@ v_instance, = op.args v_addr = hop.genop("cast_ptr_to_adr", [v_instance], resulttype=llmemory.Address) + c_weakptr = rmodel.inputconst(lltype.Void, "weakptr") hop.genop("bare_setfield", - [v_result, rmodel.inputconst(lltype.Void, "weakptr"), v_addr]) + [v_result, c_weakptr, v_addr]) v_weakref = hop.genop("cast_ptr_to_weakrefptr", [v_result], resulttype=llmemory.WeakRefPtr) + # Register weakref + v_fieldaddr = hop.genop("direct_fieldptr", [v_result, c_weakptr], + resulttype=llmemory.Address) + #hop.genop("qcgc_register_weakref", [v_result, v_fieldaddr]) hop.cast_result(v_weakref) class QcgcRootWalker(BaseRootWalker): diff --git a/rpython/translator/c/src/qcgc/config.h b/rpython/translator/c/src/qcgc/config.h --- a/rpython/translator/c/src/qcgc/config.h +++ b/rpython/translator/c/src/qcgc/config.h @@ -1,6 +1,6 @@ #pragma once -#define CHECKED 1 // Enable runtime sanity checks +#define CHECKED 0 // Enable runtime sanity checks #define DEBUG_ZERO_ON_SWEEP 1 // Zero memory on sweep (debug only) #define QCGC_INIT_ZERO 1 // Init new objects with zero bytes @@ -8,7 +8,7 @@ /** * Event logger */ -#define EVENT_LOG 1 // Enable event log +#define EVENT_LOG 0 // Enable event log #define LOGFILE "./qcgc_events.log" // Default logfile #define LOG_ALLOCATION 0 // Enable allocation log (warning: // significant performance impact) diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -1281,7 +1281,7 @@ GC_CAN_SHRINK_ARRAY = False removetypeptr = True - def test_framework_static_roots(self): + def test_framework_nongc_static_root(self): py.test.skip("not implemented") From pypy.commits at gmail.com Mon Sep 5 16:15:42 2016 From: pypy.commits at gmail.com (ntruessel) Date: Mon, 05 Sep 2016 13:15:42 -0700 (PDT) Subject: [pypy-commit] pypy quad-color-gc: Fix weakref test Message-ID: <57cdd26e.411d1c0a.3eba2.cc39@mx.google.com> Author: Nicolas Truessel Branch: quad-color-gc Changeset: r86895:b88d51ba77e6 Date: 2016-09-05 22:12 +0200 http://bitbucket.org/pypy/pypy/changeset/b88d51ba77e6/ Log: Fix weakref test diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -1280,6 +1280,50 @@ GC_CAN_MOVE = False GC_CAN_SHRINK_ARRAY = False removetypeptr = True + + # Hoping I can do this here + def define_weakref(cls): + import weakref + + class A: + pass + + keepalive = [] + def collect(): + # Make sure the bump pointer leaves the current arena + for i in range(2**16): + a = A() + rgc.collect() + + def fn(): + n = 7000 + weakrefs = [] + a = None + for i in range(n): + if i & 1 == 0: + a = A() + a.index = i + assert a is not None + weakrefs.append(weakref.ref(a)) + if i % 7 == 6: + keepalive.append(a) + collect() + count_free = 0 + for i in range(n): + a = weakrefs[i]() + if i % 7 == 6: + assert a is not None + if a is not None: + assert a.index == i & ~1 + else: + count_free += 1 + return count_free + return fn + + def test_weakref(self): + res = self.run('weakref') + # more than half of them should have been freed, ideally up to 6000 + assert 3500 <= res <= 6000 def test_framework_nongc_static_root(self): py.test.skip("not implemented") From pypy.commits at gmail.com Mon Sep 5 16:15:40 2016 From: pypy.commits at gmail.com (ntruessel) Date: Mon, 05 Sep 2016 13:15:40 -0700 (PDT) Subject: [pypy-commit] pypy quad-color-gc: Register weakrefs (test failing due to not collected targets) Message-ID: <57cdd26c.8f8e1c0a.41a0.c7a8@mx.google.com> Author: Nicolas Truessel Branch: quad-color-gc Changeset: r86894:7c5c4f76938f Date: 2016-09-05 22:08 +0200 http://bitbucket.org/pypy/pypy/changeset/7c5c4f76938f/ Log: Register weakrefs (test failing due to not collected targets) diff --git a/rpython/memory/gctransform/qcgcframework.py b/rpython/memory/gctransform/qcgcframework.py --- a/rpython/memory/gctransform/qcgcframework.py +++ b/rpython/memory/gctransform/qcgcframework.py @@ -120,7 +120,7 @@ # Register weakref v_fieldaddr = hop.genop("direct_fieldptr", [v_result, c_weakptr], resulttype=llmemory.Address) - #hop.genop("qcgc_register_weakref", [v_result, v_fieldaddr]) + hop.genop("qcgc_register_weakref", [v_result, v_fieldaddr]) hop.cast_result(v_weakref) class QcgcRootWalker(BaseRootWalker): diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -963,3 +963,8 @@ def OP_QCGC_WRITE_BARRIER(self, op): obj = self.expr(op.args[0]) return 'qcgc_write(%s);' % (obj,) + + def OP_QCGC_REGISTER_WEAKREF(self, op): + weakref = self.expr(op.args[0]) + fieldaddr = self.expr(op.args[1]) + return 'qcgc_register_weakref(%s, %s);' % (weakref, fieldaddr) From pypy.commits at gmail.com Mon Sep 5 16:57:01 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 05 Sep 2016 13:57:01 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: try to update documentation Message-ID: <57cddc1d.6740c20a.d88a6.e49a@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86896:59716a90a56a Date: 2016-09-05 23:53 +0300 http://bitbucket.org/pypy/pypy/changeset/59716a90a56a/ Log: try to update documentation diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -29,19 +29,17 @@ ## Solution ## -------- ## -## PyBytesObject contains two additional members: the ob_size and a pointer to a -## char ob_sval; it may be NULL. +## PyBytesObject contains two additional members: the ob_size and an array +## char ob_sval which holds a \x0 terminated string. ## ## - A string allocated by pypy will be converted into a PyBytesObject with a -## NULL buffer. The first time PyString_AsString() is called, memory is -## allocated (with flavor='raw') and content is copied. +## buffer holding \x0. The first time PyString_AsString() is called, the +## PyStringObject is reallocated, and the string copied into the buffer. The +## ob_size reflects the length of the string. ## ## - A string allocated with PyString_FromStringAndSize(NULL, size) will ## allocate a PyBytesObject structure, and a buffer with the specified -## size+1, but the reference won't be stored in the global map; there is no -## corresponding object in pypy. When from_ref() or Py_INCREF() is called, -## the pypy string is created, and added to the global map of tracked -## objects. The buffer is then supposed to be immutable. +## size+1, as part of the object. The buffer is then supposed to be immutable. ## ##- A buffer obtained from PyString_AS_STRING() could be mutable iff ## there is no corresponding pypy object for the string From pypy.commits at gmail.com Mon Sep 5 16:57:03 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 05 Sep 2016 13:57:03 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: create fixed arrays of Py_MAX_NDIMS, arbitrarily set to 32, in Py_buffer rather than malloc and leak memory Message-ID: <57cddc1f.121a1c0a.10647.ce51@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86897:d765d56cd700 Date: 2016-09-05 23:56 +0300 http://bitbucket.org/pypy/pypy/changeset/d765d56cd700/ Log: create fixed arrays of Py_MAX_NDIMS, arbitrarily set to 32, in Py_buffer rather than malloc and leak memory diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -122,7 +122,7 @@ METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HAVE_INPLACEOPS Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_TPFLAGS_HAVE_NEWBUFFER -Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES +Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES Py_MAX_NDIMS """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) @@ -645,6 +645,9 @@ ('format', rffi.CCHARP), ('shape', Py_ssize_tP), ('strides', Py_ssize_tP), + ('_format', rffi.UCHAR), + ('_shape', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), + ('_strides', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), ('suboffsets', Py_ssize_tP), #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), ('internal', rffi.VOIDP) diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -142,7 +142,8 @@ typedef Py_ssize_t (*segcountproc)(PyObject *, Py_ssize_t *); typedef Py_ssize_t (*charbufferproc)(PyObject *, Py_ssize_t, char **); -/* Py3k buffer interface */ +/* Py3k buffer interface, adapted for PyPy */ +#define Py_MAX_NDIMS 32 typedef struct bufferinfo { void *buf; PyObject *obj; /* owned reference */ @@ -156,12 +157,14 @@ char *format; Py_ssize_t *shape; Py_ssize_t *strides; - Py_ssize_t *suboffsets; - + Py_ssize_t *suboffsets; /* alway NULL for app-level objects*/ + unsigned char _format; + Py_ssize_t _strides[Py_MAX_NDIMS]; + Py_ssize_t _shape[Py_MAX_NDIMS]; /* static store for shape and strides of mono-dimensional buffers. */ /* Py_ssize_t smalltable[2]; */ - void *internal; + void *internal; /* always NULL for app-level objects */ } Py_buffer; diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,5 +1,5 @@ from pypy.module.cpyext.api import (cpython_api, Py_buffer, CANNOT_FAIL, - build_type_checkers, Py_ssize_tP) + Py_MAX_NDIMS, build_type_checkers, Py_ssize_tP) from pypy.module.cpyext.pyobject import PyObject, make_ref, incref from rpython.rtyper.lltypesystem import lltype, rffi from pypy.objspace.std.memoryobject import W_MemoryView @@ -24,6 +24,10 @@ view = lltype.malloc(Py_buffer, flavor='raw', zero=True) if not isinstance(w_obj, W_MemoryView): return view + ndim = w_obj.buf.getndim() + if ndim >= Py_MAX_NDIMS: + # XXX warn? + return view try: view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) view.c_obj = make_ref(space, w_obj) @@ -36,11 +40,11 @@ isstr = True view.c_len = w_obj.getlength() view.c_itemsize = w_obj.buf.getitemsize() - ndim = w_obj.buf.getndim() rffi.setintfield(view, 'c_ndim', ndim) - view.c_format = rffi.str2charp(w_obj.buf.getformat()) - view.c_shape = lltype.malloc(Py_ssize_tP.TO, ndim, flavor='raw') - view.c_strides = lltype.malloc(Py_ssize_tP.TO, ndim, flavor='raw') + view.c__format = rffi.cast(rffi.UCHAR, w_obj.buf.getformat()) + view.c_format = rffi.cast(rffi.CCHARP, view.c__format) + view.c_shape = rffi.cast(Py_ssize_tP, view.c__shape) + view.c_strides = rffi.cast(Py_ssize_tP, view.c__strides) shape = w_obj.buf.getshape() strides = w_obj.buf.getstrides() for i in range(ndim): @@ -50,4 +54,3 @@ view.c_internal = lltype.nullptr(rffi.VOIDP.TO) return view - From pypy.commits at gmail.com Mon Sep 5 17:08:43 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 05 Sep 2016 14:08:43 -0700 (PDT) Subject: [pypy-commit] pypy default: add release note for 5.4.1 Message-ID: <57cddedb.81091c0a.d9c91.d67d@mx.google.com> Author: Matti Picus Branch: Changeset: r86898:0c20173f73ec Date: 2016-09-06 00:08 +0300 http://bitbucket.org/pypy/pypy/changeset/0c20173f73ec/ Log: add release note for 5.4.1 diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-pypy2.7-v5.4.1.rst release-pypy2.7-v5.4.0.rst release-pypy2.7-v5.3.1.rst release-pypy2.7-v5.3.0.rst diff --git a/pypy/doc/release-pypy2.7-v5.4.1.rst b/pypy/doc/release-pypy2.7-v5.4.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-pypy2.7-v5.4.1.rst @@ -0,0 +1,61 @@ +========== +PyPy 5.4.1 +========== + +We have released a bugfix for PyPy2.7-v5.4.0, released last week, +due to the following issues: + + * Update list of contributors in documentation and LICENSE file, + this was unfortunately left out of 5.4.0. My apoligies to the new + contributors + + * Allow tests run with `-A` to find `libm.so` even if it is a script not a + dynamically loadable file + + * Bump `sys.setrecursionlimit()` when translating PyPy, for translating with CPython + + * Tweak a float comparison with 0 in `backendopt.inline` to avoid rounding errors + + * Fix for an issue where os.access() accepted a float for mode + + * Fix for and issue where `unicode.decode('utf8', 'custom_replace')` messed up + the last byte of a unicode string sometimes + + * Update built-in cffi_ to the soon-to-be-released 1.8.1 version + + * Explicitly detect that we found as-yet-unsupported OpenSSL 1.1, and crash + translation with a message asking for help porting it + +Thanks to those who reported the issues. + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports: + + * **x86** machines on most common operating systems + (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s390x** running Linux + +.. _cffi: https://cffi.readthedocs.io +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + From pypy.commits at gmail.com Mon Sep 5 18:00:20 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 05 Sep 2016 15:00:20 -0700 (PDT) Subject: [pypy-commit] cffi default: Issue #283: initializer for nested anonymous structs inside unions Message-ID: <57cdeaf4.a3a3c20a.2eacd.0f24@mx.google.com> Author: Armin Rigo Branch: Changeset: r2761:58bab5bcadd2 Date: 2016-09-05 23:55 +0200 http://bitbucket.org/cffi/cffi/changeset/58bab5bcadd2/ Log: Issue #283: initializer for nested anonymous structs inside unions diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -187,10 +187,12 @@ Py_ssize_t cf_offset; short cf_bitshift; /* >= 0: bitshift; or BS_REGULAR or BS_EMPTY_ARRAY */ short cf_bitsize; + unsigned char cf_flags; /* BF_... */ struct cfieldobject_s *cf_next; } CFieldObject; #define BS_REGULAR (-1) /* a regular field, not with bitshift */ #define BS_EMPTY_ARRAY (-2) /* a field which is an array 'type[0]' */ +#define BF_IGNORE_IN_CTOR 0x01 /* union field not in the first place */ static PyTypeObject CTypeDescr_Type; static PyTypeObject CField_Type; @@ -657,6 +659,7 @@ {"offset", T_PYSSIZET, OFF(cf_offset), READONLY}, {"bitshift", T_SHORT, OFF(cf_bitshift), READONLY}, {"bitsize", T_SHORT, OFF(cf_bitsize), READONLY}, + {"flags", T_UBYTE, OFF(cf_flags), READONLY}, {NULL} /* Sentinel */ }; #undef OFF @@ -1274,24 +1277,14 @@ return -1; } - if (ct->ct_flags & CT_UNION) { - Py_ssize_t n = PyObject_Size(init); - if (n < 0) - return -1; - if (n > 1) { - PyErr_Format(PyExc_ValueError, - "initializer for '%s': %zd items given, but " - "only one supported (use a dict if needed)", - ct->ct_name, n); - return -1; - } - } if (PyList_Check(init) || PyTuple_Check(init)) { PyObject **items = PySequence_Fast_ITEMS(init); Py_ssize_t i, n = PySequence_Fast_GET_SIZE(init); CFieldObject *cf = (CFieldObject *)ct->ct_extra; for (i=0; icf_flags & BF_IGNORE_IN_CTOR)) + cf = cf->cf_next; if (cf == NULL) { PyErr_Format(PyExc_ValueError, "too many initializers for '%s' (got %zd)", @@ -4085,7 +4078,7 @@ static CFieldObject * _add_field(PyObject *interned_fields, PyObject *fname, CTypeDescrObject *ftype, - Py_ssize_t offset, int bitshift, int fbitsize) + Py_ssize_t offset, int bitshift, int fbitsize, int flags) { int err; Py_ssize_t prev_size; @@ -4098,6 +4091,7 @@ cf->cf_offset = offset; cf->cf_bitshift = bitshift; cf->cf_bitsize = fbitsize; + cf->cf_flags = flags; Py_INCREF(fname); PyText_InternInPlace(&fname); @@ -4184,7 +4178,7 @@ int totalalignment = -1; CFieldObject **previous; int prev_bitfield_size, prev_bitfield_free; - int sflags = 0; + int sflags = 0, fflags; if (!PyArg_ParseTuple(args, "O!O!|Onii:complete_struct_or_union", &CTypeDescr_Type, &ct, @@ -4270,6 +4264,8 @@ if (alignment < falign && do_align) alignment = falign; + fflags = (is_union && i > 0) ? BF_IGNORE_IN_CTOR : 0; + if (fbitsize < 0) { /* not a bitfield: common case */ int bs_flag; @@ -4305,7 +4301,8 @@ cfsrc->cf_type, boffset / 8 + cfsrc->cf_offset, cfsrc->cf_bitshift, - cfsrc->cf_bitsize); + cfsrc->cf_bitsize, + cfsrc->cf_flags | fflags); if (*previous == NULL) goto error; previous = &(*previous)->cf_next; @@ -4315,7 +4312,7 @@ } else { *previous = _add_field(interned_fields, fname, ftype, - boffset / 8, bs_flag, -1); + boffset / 8, bs_flag, -1, fflags); if (*previous == NULL) goto error; previous = &(*previous)->cf_next; @@ -4445,7 +4442,8 @@ bitshift = 8 * ftype->ct_size - fbitsize - bitshift; *previous = _add_field(interned_fields, fname, ftype, - field_offset_bytes, bitshift, fbitsize); + field_offset_bytes, bitshift, fbitsize, + fflags); if (*previous == NULL) goto error; previous = &(*previous)->cf_next; diff --git a/testing/cffi0/backend_tests.py b/testing/cffi0/backend_tests.py --- a/testing/cffi0/backend_tests.py +++ b/testing/cffi0/backend_tests.py @@ -1414,6 +1414,7 @@ assert p.b == 12 assert p.c == 14 assert p.d == 14 + py.test.raises(ValueError, ffi.new, "struct foo_s *", [0, 0, 0, 0]) def test_nested_field_offset_align(self): ffi = FFI(backend=self.Backend()) @@ -1453,14 +1454,42 @@ assert p.b == 0 assert p.c == 14 assert p.d == 14 - p = ffi.new("union foo_u *", {'b': 12}) - assert p.a == 0 + p = ffi.new("union foo_u *", {'a': -63, 'b': 12}) + assert p.a == -63 assert p.b == 12 - assert p.c == 0 - assert p.d == 0 - # we cannot specify several items in the dict, even though - # in theory in this particular case it would make sense - # to give both 'a' and 'b' + assert p.c == -63 + assert p.d == -63 + p = ffi.new("union foo_u *", [123, 456]) + assert p.a == 123 + assert p.b == 456 + assert p.c == 123 + assert p.d == 123 + py.test.raises(ValueError, ffi.new, "union foo_u *", [0, 0, 0]) + + def test_nested_anonymous_struct_2(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + struct foo_s { + int a; + union { int b; union { int c, d; }; }; + int e; + }; + """) + assert ffi.sizeof("struct foo_s") == 3 * SIZE_OF_INT + p = ffi.new("struct foo_s *", [11, 22, 33]) + assert p.a == 11 + assert p.b == p.c == p.d == 22 + assert p.e == 33 + py.test.raises(ValueError, ffi.new, "struct foo_s *", [11, 22, 33, 44]) + FOO = ffi.typeof("struct foo_s") + fields = [(name, fld.offset, fld.flags) for (name, fld) in FOO.fields] + assert fields == [ + ('a', 0 * SIZE_OF_INT, 0), + ('b', 1 * SIZE_OF_INT, 0), + ('c', 1 * SIZE_OF_INT, 1), + ('d', 1 * SIZE_OF_INT, 1), + ('e', 2 * SIZE_OF_INT, 0), + ] def test_cast_to_array_type(self): ffi = FFI(backend=self.Backend()) From pypy.commits at gmail.com Mon Sep 5 22:08:24 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 05 Sep 2016 19:08:24 -0700 (PDT) Subject: [pypy-commit] pypy union-side-effects: Revert change to run_formatter() and fix unicode vs str issues. Message-ID: <57ce2518.a710c20a.5f195.890b@mx.google.com> Author: Ronan Lamy Branch: union-side-effects Changeset: r86899:5dc79f574fb9 Date: 2016-09-06 02:20 +0100 http://bitbucket.org/pypy/pypy/changeset/5dc79f574fb9/ Log: Revert change to run_formatter() and fix unicode vs str issues. diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -564,7 +564,7 @@ return space.wrap(self._pad(string)) def _get_locale(self, tp): - if tp == "n": + if tp == _lit("n"): dec, thousands, grouping = rlocale.numeric_formatting() elif self._thousands_sep: dec = "." @@ -608,10 +608,10 @@ spec.sign = "\0" spec.n_sign = 0 sign = self._sign - if sign == "+": + if sign == _lit("+"): spec.n_sign = 1 spec.sign = "-" if sign_char == "-" else "+" - elif sign == " ": + elif sign == _lit(" "): spec.n_sign = 1 spec.sign = "-" if sign_char == "-" else " " elif sign_char == "-": @@ -619,7 +619,7 @@ spec.sign = "-" extra_length = (spec.n_sign + spec.n_prefix + spec.n_decimal + spec.n_remainder) # Not padding or digits - if self._fill_char == "0" and self._align == "=": + if self._fill_char ==_lit( "0") and self._align == _lit("="): spec.n_min_width = self._width - extra_length if self._loc_thousands: self._group_digits(spec, digits[to_number:]) @@ -629,14 +629,14 @@ n_padding = self._width - (extra_length + n_grouped_digits) if n_padding > 0: align = self._align - if align == "<": + if align == _lit("<"): spec.n_rpadding = n_padding - elif align == ">": + elif align == _lit(">"): spec.n_lpadding = n_padding - elif align == "^": + elif align == _lit("^"): spec.n_lpadding = n_padding // 2 spec.n_rpadding = n_padding - spec.n_lpadding - elif align == "=": + elif align == _lit("="): spec.n_spadding = n_padding else: raise AssertionError("shouldn't reach") @@ -653,7 +653,7 @@ for i in range(d_state - 1, d_state - n_chars - 1, -1): buf.append(digits[i]) for i in range(n_zeros): - buf.append("0") + buf.append(_lit("0")) def _group_digits(self, spec, digits): buf = [] @@ -702,7 +702,7 @@ for c in s: index = ord(c) if ord("a") <= index <= ord("z"): - c = chr(index - 32) + c = _lit(chr(index - 32)) buf.append(c) return self.empty.join(buf) @@ -791,7 +791,7 @@ result = self._long_to_base(base, space.bigint_w(w_num)) n_prefix = skip_leading if self._alternate else 0 to_prefix = 0 - if result[0] == "-": + if result[0] == _lit("-"): sign_char = "-" skip_leading += 1 to_prefix += 1 @@ -874,21 +874,21 @@ return space.call_function(space.w_unicode, w_num) return self.space.str(w_num) tp = self._type - if (tp == "b" or - tp == "c" or - tp == "d" or - tp == "o" or - tp == "x" or - tp == "X" or - tp == "n"): + if (tp == _lit("b") or + tp == _lit("c") or + tp == _lit("d") or + tp == _lit("o") or + tp == _lit("x") or + tp == _lit("X") or + tp == _lit("n")): return self._format_int_or_long(w_num, kind) - elif (tp == "e" or - tp == "E" or - tp == "f" or - tp == "F" or - tp == "g" or - tp == "G" or - tp == "%"): + elif (tp == _lit("e") or + tp == _lit("E") or + tp == _lit("f") or + tp == _lit("F") or + tp == _lit("g") or + tp == _lit("G") or + tp == _lit("%")): w_float = space.float(w_num) return self._format_float(w_float) else: @@ -932,7 +932,7 @@ add_pct = False if self._precision == -1: self._precision = default_precision - result, special = rfloat.double_to_string(value, tp, + result, special = rfloat.double_to_string(value, str(tp)[0], self._precision, flags) if add_pct: result += "%" @@ -963,15 +963,15 @@ return space.call_function(space.w_unicode, w_float) return space.str(w_float) tp = self._type - if (tp == "\0" or - tp == "e" or - tp == "E" or - tp == "f" or - tp == "F" or - tp == "g" or - tp == "G" or - tp == "n" or - tp == "%"): + if (tp == _lit("\0") or + tp == _lit("e") or + tp == _lit("E") or + tp == _lit("f") or + tp == _lit("F") or + tp == _lit("g") or + tp == _lit("G") or + tp == _lit("n") or + tp == _lit("%")): return self._format_float(w_float) self._unknown_presentation("float") @@ -980,12 +980,12 @@ tp = self._type self._get_locale(tp) default_precision = 6 - if self._align == "=": + if self._align == _lit("="): # '=' alignment is invalid raise oefmt(space.w_ValueError, "'=' alignment flag is not allowed in complex " "format specifier") - if self._fill_char == "0": + if self._fill_char == _lit("0"): # zero padding is invalid raise oefmt(space.w_ValueError, "Zero padding is not allowed in complex format " @@ -1018,8 +1018,8 @@ #might want to switch to double_to_string from formatd #in CPython it's named 're' - clashes with re module - re_num = formatd(w_complex.realval, tp, self._precision) - im_num = formatd(w_complex.imagval, tp, self._precision) + re_num = formatd(w_complex.realval, str(tp)[0], self._precision) + im_num = formatd(w_complex.imagval, str(tp)[0], self._precision) n_re_digits = len(re_num) n_im_digits = len(im_num) @@ -1042,8 +1042,8 @@ tmp_fill_char = self._fill_char tmp_align = self._align tmp_width = self._width - self._fill_char = "\0" - self._align = "<" + self._fill_char = _lit("\0") + self._align = _lit("<") self._width = -1 #determine if we have remainder, might include dec or exponent or both @@ -1125,14 +1125,14 @@ if self._parse_spec(_lit("\0"), _lit(">")): return space.str(w_complex) tp = self._type - if (tp == "\0" or - tp == "e" or - tp == "E" or - tp == "f" or - tp == "F" or - tp == "g" or - tp == "G" or - tp == "n"): + if (tp == _lit("\0") or + tp == _lit("e") or + tp == _lit("E") or + tp == _lit("f") or + tp == _lit("F") or + tp == _lit("g") or + tp == _lit("G") or + tp == _lit("n")): return self._format_complex(w_complex) self._unknown_presentation("complex") return Formatter @@ -1143,5 +1143,9 @@ @specialize.arg(2) def run_formatter(space, w_format_spec, meth, *args): - formatter = str_formatter(space, space.str_w(w_format_spec)) - return getattr(formatter, meth)(*args) + if space.isinstance_w(w_format_spec, space.w_unicode): + formatter = unicode_formatter(space, space.unicode_w(w_format_spec)) + return getattr(formatter, meth)(*args) + else: + formatter = str_formatter(space, space.str_w(w_format_spec)) + return getattr(formatter, meth)(*args) From pypy.commits at gmail.com Tue Sep 6 04:17:46 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Sep 2016 01:17:46 -0700 (PDT) Subject: [pypy-commit] cffi default: Update and document v1.8.2 Message-ID: <57ce7baa.898b1c0a.2cfd1.8161@mx.google.com> Author: Armin Rigo Branch: Changeset: r2762:48bb1b21620f Date: 2016-09-06 09:49 +0200 http://bitbucket.org/cffi/cffi/changeset/48bb1b21620f/ Log: Update and document v1.8.2 diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -2,7 +2,7 @@ #include #include "structmember.h" -#define CFFI_VERSION "1.8.1" +#define CFFI_VERSION "1.8.2" #ifdef MS_WIN32 #include diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -12,7 +12,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.8.1", ("This test_c.py file is for testing a version" +assert __version__ == "1.8.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.8.1" -__version_info__ = (1, 8, 1) +__version__ = "1.8.2" +__version_info__ = (1, 8, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.8.1" + "\ncompiled with cffi version: 1.8.2" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '1.8' # The full version, including alpha/beta/rc tags. -release = '1.8.1' +release = '1.8.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -51,7 +51,7 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-1.8.1.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-1.8.2.tar.gz - MD5: ... diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -3,6 +3,15 @@ ====================== +v1.8.2 +====== + +* Issue #283: fixed ``ffi.new()`` on structures/unions with nested + anonymous structures/unions, when there is at least one union in + the mix. When initialized with a list or a dict, it should now + behave more closely like the ``{ }`` syntax does in GCC. + + v1.8.1 ====== diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -144,7 +144,7 @@ `Mailing list `_ """, - version='1.8.1', + version='1.8.2', packages=['cffi'] if cpython else [], package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h', '_embedding.h']} From pypy.commits at gmail.com Tue Sep 6 04:17:48 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Sep 2016 01:17:48 -0700 (PDT) Subject: [pypy-commit] cffi default: skip test Message-ID: <57ce7bac.45c8c20a.4d79b.ecc7@mx.google.com> Author: Armin Rigo Branch: Changeset: r2763:29ac108030f8 Date: 2016-09-06 09:57 +0200 http://bitbucket.org/cffi/cffi/changeset/29ac108030f8/ Log: skip test diff --git a/testing/cffi0/test_ctypes.py b/testing/cffi0/test_ctypes.py --- a/testing/cffi0/test_ctypes.py +++ b/testing/cffi0/test_ctypes.py @@ -34,6 +34,9 @@ def test_nested_anonymous_union(self): py.test.skip("ctypes backend: not supported: nested anonymous union") + def test_nested_anonymous_struct_2(self): + py.test.skip("ctypes backend: not supported: nested anonymous union") + def test_CData_CType_2(self): if sys.version_info >= (3,): py.test.skip("ctypes backend: not supported in Python 3: CType") From pypy.commits at gmail.com Tue Sep 6 04:17:50 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Sep 2016 01:17:50 -0700 (PDT) Subject: [pypy-commit] cffi default: A direct test Message-ID: <57ce7bae.6740c20a.d88a6.91fb@mx.google.com> Author: Armin Rigo Branch: Changeset: r2764:b518a1326d6c Date: 2016-09-06 10:03 +0200 http://bitbucket.org/cffi/cffi/changeset/b518a1326d6c/ Log: A direct test diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -2536,6 +2536,25 @@ assert d[2][1].bitshift == -1 assert d[2][1].bitsize == -1 +def test_nested_anonymous_struct_2(): + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BInnerUnion = new_union_type("union bar") + complete_struct_or_union(BInnerUnion, [('a1', BInt, -1), + ('a2', BInt, -1)]) + complete_struct_or_union(BStruct, [('b1', BInt, -1), + ('', BInnerUnion, -1), + ('b2', BInt, -1)]) + assert sizeof(BInnerUnion) == sizeof(BInt) + assert sizeof(BStruct) == sizeof(BInt) * 3 + fields = [(name, fld.offset, fld.flags) for (name, fld) in BStruct.fields] + assert fields == [ + ('b1', 0 * sizeof(BInt), 0), + ('a1', 1 * sizeof(BInt), 0), + ('a2', 1 * sizeof(BInt), 1), + ('b2', 2 * sizeof(BInt), 0), + ] + def test_sizeof_union(): # a union has the largest alignment of its members, and a total size # that is the largest of its items *possibly further aligned* if From pypy.commits at gmail.com Tue Sep 6 04:22:48 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Sep 2016 01:22:48 -0700 (PDT) Subject: [pypy-commit] pypy default: update to cffi/b518a1326d6c Message-ID: <57ce7cd8.05d71c0a.db689.843c@mx.google.com> Author: Armin Rigo Branch: Changeset: r86900:39d096586b61 Date: 2016-09-06 10:21 +0200 http://bitbucket.org/pypy/pypy/changeset/39d096586b61/ Log: update to cffi/b518a1326d6c diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.8.1 +Version: 1.8.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.8.1" -__version_info__ = (1, 8, 1) +__version__ = "1.8.2" +__version_info__ = (1, 8, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.8.1" + "\ncompiled with cffi version: 1.8.2" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.8.1" +VERSION = "1.8.2" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -105,9 +105,6 @@ return True return False - def _check_only_one_argument_for_union(self, w_ob): - pass - def convert_from_object(self, cdata, w_ob): if not self._copy_from_same(cdata, w_ob): self.convert_struct_from_object(cdata, w_ob, optvarsize=-1) @@ -117,19 +114,24 @@ ) def convert_struct_from_object(self, cdata, w_ob, optvarsize): self.force_lazy_struct() - self._check_only_one_argument_for_union(w_ob) space = self.space if (space.isinstance_w(w_ob, space.w_list) or space.isinstance_w(w_ob, space.w_tuple)): lst_w = space.listview(w_ob) - if len(lst_w) > len(self._fields_list): - raise oefmt(space.w_ValueError, - "too many initializers for '%s' (got %d)", - self.name, len(lst_w)) - for i in range(len(lst_w)): - optvarsize = self._fields_list[i].write_v(cdata, lst_w[i], + j = 0 + for w_obj in lst_w: + try: + while (self._fields_list[j].flags & + W_CField.BF_IGNORE_IN_CTOR): + j += 1 + except IndexError: + raise oefmt(space.w_ValueError, + "too many initializers for '%s' (got %d)", + self.name, len(lst_w)) + optvarsize = self._fields_list[j].write_v(cdata, w_obj, optvarsize) + j += 1 return optvarsize elif space.isinstance_w(w_ob, space.w_dict): @@ -185,14 +187,6 @@ class W_CTypeUnion(W_CTypeStructOrUnion): kind = "union" - def _check_only_one_argument_for_union(self, w_ob): - space = self.space - n = space.int_w(space.len(w_ob)) - if n > 1: - raise oefmt(space.w_ValueError, - "initializer for '%s': %d items given, but only one " - "supported (use a dict if needed)", self.name, n) - class W_CField(W_Root): _immutable_ = True @@ -200,18 +194,21 @@ BS_REGULAR = -1 BS_EMPTY_ARRAY = -2 - def __init__(self, ctype, offset, bitshift, bitsize): + BF_IGNORE_IN_CTOR = 0x01 + + def __init__(self, ctype, offset, bitshift, bitsize, flags): self.ctype = ctype self.offset = offset self.bitshift = bitshift # >= 0: bitshift; or BS_REGULAR/BS_EMPTY_ARRAY self.bitsize = bitsize + self.flags = flags # BF_xxx def is_bitfield(self): return self.bitshift >= 0 - def make_shifted(self, offset): + def make_shifted(self, offset, fflags): return W_CField(self.ctype, offset + self.offset, - self.bitshift, self.bitsize) + self.bitshift, self.bitsize, self.flags | fflags) def read(self, cdata): cdata = rffi.ptradd(cdata, self.offset) @@ -341,5 +338,6 @@ offset = interp_attrproperty('offset', W_CField), bitshift = interp_attrproperty('bitshift', W_CField), bitsize = interp_attrproperty('bitsize', W_CField), + flags = interp_attrproperty('flags', W_CField), ) W_CField.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -345,6 +345,11 @@ if alignment < falign and do_align: alignment = falign # + if is_union and i > 0: + fflags = ctypestruct.W_CField.BF_IGNORE_IN_CTOR + else: + fflags = 0 + # if fbitsize < 0: # not a bitfield: common case @@ -372,7 +377,7 @@ for name, srcfld in ftype._fields_dict.items(): srcfield2names[srcfld] = name for srcfld in ftype._fields_list: - fld = srcfld.make_shifted(boffset // 8) + fld = srcfld.make_shifted(boffset // 8, fflags) fields_list.append(fld) try: fields_dict[srcfield2names[srcfld]] = fld @@ -382,7 +387,8 @@ w_ctype._custom_field_pos = True else: # a regular field - fld = ctypestruct.W_CField(ftype, boffset // 8, bs_flag, -1) + fld = ctypestruct.W_CField(ftype, boffset // 8, bs_flag, -1, + fflags) fields_list.append(fld) fields_dict[fname] = fld @@ -489,7 +495,7 @@ bitshift = 8 * ftype.size - fbitsize- bitshift fld = ctypestruct.W_CField(ftype, field_offset_bytes, - bitshift, fbitsize) + bitshift, fbitsize, fflags) fields_list.append(fld) fields_dict[fname] = fld diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.8.1", ("This test_c.py file is for testing a version" +assert __version__ == "1.8.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): @@ -2525,6 +2525,25 @@ assert d[2][1].bitshift == -1 assert d[2][1].bitsize == -1 +def test_nested_anonymous_struct_2(): + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BInnerUnion = new_union_type("union bar") + complete_struct_or_union(BInnerUnion, [('a1', BInt, -1), + ('a2', BInt, -1)]) + complete_struct_or_union(BStruct, [('b1', BInt, -1), + ('', BInnerUnion, -1), + ('b2', BInt, -1)]) + assert sizeof(BInnerUnion) == sizeof(BInt) + assert sizeof(BStruct) == sizeof(BInt) * 3 + fields = [(name, fld.offset, fld.flags) for (name, fld) in BStruct.fields] + assert fields == [ + ('b1', 0 * sizeof(BInt), 0), + ('a1', 1 * sizeof(BInt), 0), + ('a2', 1 * sizeof(BInt), 1), + ('b2', 2 * sizeof(BInt), 0), + ] + def test_sizeof_union(): # a union has the largest alignment of its members, and a total size # that is the largest of its items *possibly further aligned* if diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1415,6 +1415,7 @@ assert p.b == 12 assert p.c == 14 assert p.d == 14 + py.test.raises(ValueError, ffi.new, "struct foo_s *", [0, 0, 0, 0]) def test_nested_field_offset_align(self): ffi = FFI(backend=self.Backend()) @@ -1454,14 +1455,42 @@ assert p.b == 0 assert p.c == 14 assert p.d == 14 - p = ffi.new("union foo_u *", {'b': 12}) - assert p.a == 0 + p = ffi.new("union foo_u *", {'a': -63, 'b': 12}) + assert p.a == -63 assert p.b == 12 - assert p.c == 0 - assert p.d == 0 - # we cannot specify several items in the dict, even though - # in theory in this particular case it would make sense - # to give both 'a' and 'b' + assert p.c == -63 + assert p.d == -63 + p = ffi.new("union foo_u *", [123, 456]) + assert p.a == 123 + assert p.b == 456 + assert p.c == 123 + assert p.d == 123 + py.test.raises(ValueError, ffi.new, "union foo_u *", [0, 0, 0]) + + def test_nested_anonymous_struct_2(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + struct foo_s { + int a; + union { int b; union { int c, d; }; }; + int e; + }; + """) + assert ffi.sizeof("struct foo_s") == 3 * SIZE_OF_INT + p = ffi.new("struct foo_s *", [11, 22, 33]) + assert p.a == 11 + assert p.b == p.c == p.d == 22 + assert p.e == 33 + py.test.raises(ValueError, ffi.new, "struct foo_s *", [11, 22, 33, 44]) + FOO = ffi.typeof("struct foo_s") + fields = [(name, fld.offset, fld.flags) for (name, fld) in FOO.fields] + assert fields == [ + ('a', 0 * SIZE_OF_INT, 0), + ('b', 1 * SIZE_OF_INT, 0), + ('c', 1 * SIZE_OF_INT, 1), + ('d', 1 * SIZE_OF_INT, 1), + ('e', 2 * SIZE_OF_INT, 0), + ] def test_cast_to_array_type(self): ffi = FFI(backend=self.Backend()) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py @@ -35,6 +35,9 @@ def test_nested_anonymous_union(self): py.test.skip("ctypes backend: not supported: nested anonymous union") + def test_nested_anonymous_struct_2(self): + py.test.skip("ctypes backend: not supported: nested anonymous union") + def test_CData_CType_2(self): if sys.version_info >= (3,): py.test.skip("ctypes backend: not supported in Python 3: CType") From pypy.commits at gmail.com Tue Sep 6 04:27:46 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Sep 2016 01:27:46 -0700 (PDT) Subject: [pypy-commit] cffi release-1.8: Release branch, starting at 1.8.2 for CPython Message-ID: <57ce7e02.c3f0c20a.54624.ed90@mx.google.com> Author: Armin Rigo Branch: release-1.8 Changeset: r2765:f745eccd83e3 Date: 2016-09-06 10:21 +0200 http://bitbucket.org/cffi/cffi/changeset/f745eccd83e3/ Log: Release branch, starting at 1.8.2 for CPython From pypy.commits at gmail.com Tue Sep 6 04:27:47 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Sep 2016 01:27:47 -0700 (PDT) Subject: [pypy-commit] cffi release-1.8: md5/sha Message-ID: <57ce7e03.436ec20a.434cf.df7b@mx.google.com> Author: Armin Rigo Branch: release-1.8 Changeset: r2766:ac64324fb262 Date: 2016-09-06 10:26 +0200 http://bitbucket.org/cffi/cffi/changeset/ac64324fb262/ Log: md5/sha diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -53,11 +53,11 @@ * http://pypi.python.org/packages/source/c/cffi/cffi-1.8.2.tar.gz - - MD5: ... + - MD5: 538f307b6c5169bba41fbfda2b070762 - - SHA: ... + - SHA: 9d2722ba9241b232b980bb9243e12451513a8000 - - SHA256: ... + - SHA256: 2b636db1a179439d73ae0a090479e179a43df5d4eddc7e4c4067f960d4038530 * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From pypy.commits at gmail.com Tue Sep 6 04:27:49 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Sep 2016 01:27:49 -0700 (PDT) Subject: [pypy-commit] cffi default: hg merge release-1.8 Message-ID: <57ce7e05.e97ac20a.1eabd.eb2b@mx.google.com> Author: Armin Rigo Branch: Changeset: r2767:477e1350143e Date: 2016-09-06 10:26 +0200 http://bitbucket.org/cffi/cffi/changeset/477e1350143e/ Log: hg merge release-1.8 diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -53,11 +53,11 @@ * http://pypi.python.org/packages/source/c/cffi/cffi-1.8.2.tar.gz - - MD5: ... + - MD5: 538f307b6c5169bba41fbfda2b070762 - - SHA: ... + - SHA: 9d2722ba9241b232b980bb9243e12451513a8000 - - SHA256: ... + - SHA256: 2b636db1a179439d73ae0a090479e179a43df5d4eddc7e4c4067f960d4038530 * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From pypy.commits at gmail.com Tue Sep 6 06:59:08 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 06 Sep 2016 03:59:08 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: test, fix issue #2395 - do not force pyobj Message-ID: <57cea17c.c310c20a.ee2ea.2b92@mx.google.com> Author: Matti Picus Branch: release-5.x Changeset: r86902:0e2d9a73f5a1 Date: 2016-09-06 13:46 +0300 http://bitbucket.org/pypy/pypy/changeset/0e2d9a73f5a1/ Log: test, fix issue #2395 - do not force pyobj diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -156,9 +156,6 @@ "expected string or Unicode object, %T found", from_ref(space, ref)) ref_str = rffi.cast(PyBytesObject, ref) - if not pyobj_has_w_obj(ref): - # XXX Force the ref? - bytes_realize(space, ref) return ref_str.c_ob_sval @cpython_api([rffi.VOIDP], rffi.CCHARP, error=0) diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -183,8 +183,27 @@ Py_INCREF(Py_None); return Py_None; """), + ("c_only", "METH_NOARGS", + """ + int ret; + char * buf2; + PyObject * obj = PyBytes_FromStringAndSize(NULL, 1024); + if (!obj) + return NULL; + buf2 = PyBytes_AsString(obj); + if (!buf2) + return NULL; + /* buf should not have been forced, issue #2395 */ + ret = _PyBytes_Resize(&obj, 512); + if (ret < 0) + return NULL; + Py_DECREF(obj); + Py_INCREF(Py_None); + return Py_None; + """), ]) module.getbytes() + module.c_only() def test_py_string_as_string_Unicode(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Tue Sep 6 06:59:10 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 06 Sep 2016 03:59:10 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: Moved tag release-pypy2.7-v5.4.1 to changeset 0e2d9a73f5a1 (from changeset 050d84dd7899) Message-ID: <57cea17e.94071c0a.8c4bb.c3d0@mx.google.com> Author: Matti Picus Branch: release-5.x Changeset: r86903:c95650101a99 Date: 2016-09-06 13:47 +0300 http://bitbucket.org/pypy/pypy/changeset/c95650101a99/ Log: Moved tag release-pypy2.7-v5.4.1 to changeset 0e2d9a73f5a1 (from changeset 050d84dd7899) diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -30,3 +30,5 @@ 68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 77392ad263504df011ccfcabf6a62e21d04086d0 release-pypy2.7-v5.4.0 050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 +0e2d9a73f5a1818d0245d75daccdbe21b2d5c3ef release-pypy2.7-v5.4.1 From pypy.commits at gmail.com Tue Sep 6 06:59:11 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 06 Sep 2016 03:59:11 -0700 (PDT) Subject: [pypy-commit] pypy default: Moved tag release-pypy2.7-v5.4.1 to changeset 0e2d9a73f5a1 (from changeset 050d84dd7899) Message-ID: <57cea17f.c19d1c0a.c520.cb9f@mx.google.com> Author: Matti Picus Branch: Changeset: r86904:6c7485fc8b69 Date: 2016-09-06 13:47 +0300 http://bitbucket.org/pypy/pypy/changeset/6c7485fc8b69/ Log: Moved tag release-pypy2.7-v5.4.1 to changeset 0e2d9a73f5a1 (from changeset 050d84dd7899) diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -31,3 +31,5 @@ 68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 77392ad263504df011ccfcabf6a62e21d04086d0 release-pypy2.7-v5.4.0 050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 +0e2d9a73f5a1818d0245d75daccdbe21b2d5c3ef release-pypy2.7-v5.4.1 From pypy.commits at gmail.com Tue Sep 6 06:59:06 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 06 Sep 2016 03:59:06 -0700 (PDT) Subject: [pypy-commit] pypy default: test, fix issue #2395 - do not force pyobj Message-ID: <57cea17a.a710c20a.5f195.3803@mx.google.com> Author: Matti Picus Branch: Changeset: r86901:9eb6ddd96e0e Date: 2016-09-06 13:46 +0300 http://bitbucket.org/pypy/pypy/changeset/9eb6ddd96e0e/ Log: test, fix issue #2395 - do not force pyobj diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -156,9 +156,6 @@ "expected string or Unicode object, %T found", from_ref(space, ref)) ref_str = rffi.cast(PyBytesObject, ref) - if not pyobj_has_w_obj(ref): - # XXX Force the ref? - bytes_realize(space, ref) return ref_str.c_ob_sval @cpython_api([rffi.VOIDP], rffi.CCHARP, error=0) diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -183,8 +183,27 @@ Py_INCREF(Py_None); return Py_None; """), + ("c_only", "METH_NOARGS", + """ + int ret; + char * buf2; + PyObject * obj = PyBytes_FromStringAndSize(NULL, 1024); + if (!obj) + return NULL; + buf2 = PyBytes_AsString(obj); + if (!buf2) + return NULL; + /* buf should not have been forced, issue #2395 */ + ret = _PyBytes_Resize(&obj, 512); + if (ret < 0) + return NULL; + Py_DECREF(obj); + Py_INCREF(Py_None); + return Py_None; + """), ]) module.getbytes() + module.c_only() def test_py_string_as_string_Unicode(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Tue Sep 6 06:59:13 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 06 Sep 2016 03:59:13 -0700 (PDT) Subject: [pypy-commit] pypy buffer-interface: close branch to be merged Message-ID: <57cea181.436ec20a.434cf.22c5@mx.google.com> Author: Matti Picus Branch: buffer-interface Changeset: r86905:b33e2ca4e535 Date: 2016-09-06 13:50 +0300 http://bitbucket.org/pypy/pypy/changeset/b33e2ca4e535/ Log: close branch to be merged From pypy.commits at gmail.com Tue Sep 6 06:59:16 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 06 Sep 2016 03:59:16 -0700 (PDT) Subject: [pypy-commit] pypy default: merge buffer-interface, which implements buffer-protocol parts of cpyext and numpypy Message-ID: <57cea184.861b1c0a.6b21.c1a0@mx.google.com> Author: Matti Picus Branch: Changeset: r86906:03fc4cb79e37 Date: 2016-09-06 13:52 +0300 http://bitbucket.org/pypy/pypy/changeset/03fc4cb79e37/ Log: merge buffer-interface, which implements buffer-protocol parts of cpyext and numpypy diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1428,6 +1428,9 @@ BUF_FORMAT = 0x0004 BUF_ND = 0x0008 BUF_STRIDES = 0x0010 | BUF_ND + BUF_C_CONTIGUOUS = 0x0020 | BUF_STRIDES + BUF_F_CONTIGUOUS = 0x0040 | BUF_STRIDES + BUF_ANY_CONTIGUOUS = 0x0080 | BUF_STRIDES BUF_INDIRECT = 0x0100 | BUF_STRIDES BUF_CONTIG_RO = BUF_ND diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -597,6 +597,18 @@ def getlength(self): return self.array.len * self.array.itemsize + def getformat(self): + return self.array.typecode + + def getitemsize(self): + return self.array.itemsize + + def getndim(self): + return 1 + + def getstrides(self): + return [self.getitemsize()] + def getitem(self, index): array = self.array data = array._charbuf_start() diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -122,7 +122,7 @@ METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HAVE_INPLACEOPS Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_TPFLAGS_HAVE_NEWBUFFER -Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES +Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES Py_MAX_NDIMS """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) @@ -645,6 +645,9 @@ ('format', rffi.CCHARP), ('shape', Py_ssize_tP), ('strides', Py_ssize_tP), + ('_format', rffi.UCHAR), + ('_shape', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), + ('_strides', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), ('suboffsets', Py_ssize_tP), #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), ('internal', rffi.VOIDP) diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,8 +1,9 @@ from pypy.interpreter.error import oefmt from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import widen from pypy.module.cpyext.api import ( - cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER) -from pypy.module.cpyext.pyobject import PyObject + cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER, Py_ssize_tP) +from pypy.module.cpyext.pyobject import PyObject, make_ref, incref @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def PyObject_CheckBuffer(space, pyobj): @@ -33,13 +34,82 @@ raise an error if the object can't support a simpler view of its memory. 0 is returned on success and -1 on error.""" - raise oefmt(space.w_TypeError, - "PyPy does not yet implement the new buffer interface") + flags = widen(flags) + buf = space.buffer_w(w_obj, flags) + try: + view.c_buf = rffi.cast(rffi.VOIDP, buf.get_raw_address()) + except ValueError: + raise BufferError("could not create buffer from object") + view.c_len = buf.getlength() + view.c_obj = make_ref(space, w_obj) + ndim = buf.getndim() + view.c_itemsize = buf.getitemsize() + rffi.setintfield(view, 'c_readonly', int(buf.readonly)) + rffi.setintfield(view, 'c_ndim', ndim) + view.c_format = rffi.str2charp(buf.getformat()) + view.c_shape = lltype.malloc(Py_ssize_tP.TO, ndim, flavor='raw') + view.c_strides = lltype.malloc(Py_ssize_tP.TO, ndim, flavor='raw') + shape = buf.getshape() + strides = buf.getstrides() + for i in range(ndim): + view.c_shape[i] = shape[i] + view.c_strides[i] = strides[i] + view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO) + view.c_internal = lltype.nullptr(rffi.VOIDP.TO) + return 0 + +def _IsFortranContiguous(view): + ndim = widen(view.c_ndim) + if ndim == 0: + return 1 + if not view.c_strides: + return ndim == 1 + sd = view.c_itemsize + if ndim == 1: + return view.c_shape[0] == 1 or sd == view.c_strides[0] + for i in range(view.c_ndim): + dim = view.c_shape[i] + if dim == 0: + return 1 + if view.c_strides[i] != sd: + return 0 + sd *= dim + return 1 + +def _IsCContiguous(view): + ndim = widen(view.c_ndim) + if ndim == 0: + return 1 + if not view.c_strides: + return ndim == 1 + sd = view.c_itemsize + if ndim == 1: + return view.c_shape[0] == 1 or sd == view.c_strides[0] + for i in range(ndim - 1, -1, -1): + dim = view.c_shape[i] + if dim == 0: + return 1 + if view.c_strides[i] != sd: + return 0 + sd *= dim + return 1 + @cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) -def PyBuffer_IsContiguous(space, view, fortran): +def PyBuffer_IsContiguous(space, view, fort): """Return 1 if the memory defined by the view is C-style (fortran is 'C') or Fortran-style (fortran is 'F') contiguous or either one (fortran is 'A'). Return 0 otherwise.""" - # PyPy only supports contiguous Py_buffers for now. - return 1 + # traverse the strides, checking for consistent stride increases from + # right-to-left (c) or left-to-right (fortran). Copied from cpython + if not view.c_suboffsets: + return 0 + if (fort == 'C'): + return _IsCContiguous(view) + elif (fort == 'F'): + return _IsFortranContiguous(view) + elif (fort == 'A'): + return (_IsCContiguous(view) or _IsFortranContiguous(view)) + return 0 + + diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -29,19 +29,17 @@ ## Solution ## -------- ## -## PyBytesObject contains two additional members: the ob_size and a pointer to a -## char ob_sval; it may be NULL. +## PyBytesObject contains two additional members: the ob_size and an array +## char ob_sval which holds a \x0 terminated string. ## ## - A string allocated by pypy will be converted into a PyBytesObject with a -## NULL buffer. The first time PyString_AsString() is called, memory is -## allocated (with flavor='raw') and content is copied. +## buffer holding \x0. The first time PyString_AsString() is called, the +## PyStringObject is reallocated, and the string copied into the buffer. The +## ob_size reflects the length of the string. ## ## - A string allocated with PyString_FromStringAndSize(NULL, size) will ## allocate a PyBytesObject structure, and a buffer with the specified -## size+1, but the reference won't be stored in the global map; there is no -## corresponding object in pypy. When from_ref() or Py_INCREF() is called, -## the pypy string is created, and added to the global map of tracked -## objects. The buffer is then supposed to be immutable. +## size+1, as part of the object. The buffer is then supposed to be immutable. ## ##- A buffer obtained from PyString_AS_STRING() could be mutable iff ## there is no corresponding pypy object for the string diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -142,7 +142,8 @@ typedef Py_ssize_t (*segcountproc)(PyObject *, Py_ssize_t *); typedef Py_ssize_t (*charbufferproc)(PyObject *, Py_ssize_t, char **); -/* Py3k buffer interface */ +/* Py3k buffer interface, adapted for PyPy */ +#define Py_MAX_NDIMS 32 typedef struct bufferinfo { void *buf; PyObject *obj; /* owned reference */ @@ -156,12 +157,14 @@ char *format; Py_ssize_t *shape; Py_ssize_t *strides; - Py_ssize_t *suboffsets; - + Py_ssize_t *suboffsets; /* alway NULL for app-level objects*/ + unsigned char _format; + Py_ssize_t _strides[Py_MAX_NDIMS]; + Py_ssize_t _shape[Py_MAX_NDIMS]; /* static store for shape and strides of mono-dimensional buffers. */ /* Py_ssize_t smalltable[2]; */ - void *internal; + void *internal; /* always NULL for app-level objects */ } Py_buffer; diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,7 +1,8 @@ from pypy.module.cpyext.api import (cpython_api, Py_buffer, CANNOT_FAIL, - build_type_checkers) -from pypy.module.cpyext.pyobject import PyObject -from rpython.rtyper.lltypesystem import lltype + Py_MAX_NDIMS, build_type_checkers, Py_ssize_tP) +from pypy.module.cpyext.pyobject import PyObject, make_ref, incref +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.objspace.std.memoryobject import W_MemoryView PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView", "w_memoryview") @@ -12,6 +13,7 @@ @cpython_api([PyObject], PyObject) def PyMemoryView_GET_BASE(space, w_obj): # return the obj field of the Py_buffer created by PyMemoryView_GET_BUFFER + # XXX needed for numpy on py3k raise NotImplementedError('PyMemoryView_GET_BUFFER') @cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL) @@ -20,21 +22,35 @@ object. The object must be a memoryview instance; this macro doesn't check its type, you must do it yourself or you will risk crashes.""" view = lltype.malloc(Py_buffer, flavor='raw', zero=True) - # TODO - fill in fields - ''' - view.c_buf = buf - view.c_len = length - view.c_obj = obj - Py_IncRef(space, obj) - view.c_itemsize = 1 - rffi.setintfield(view, 'c_readonly', readonly) - rffi.setintfield(view, 'c_ndim', 0) - view.c_format = lltype.nullptr(rffi.CCHARP.TO) - view.c_shape = lltype.nullptr(Py_ssize_tP.TO) - view.c_strides = lltype.nullptr(Py_ssize_tP.TO) + if not isinstance(w_obj, W_MemoryView): + return view + ndim = w_obj.buf.getndim() + if ndim >= Py_MAX_NDIMS: + # XXX warn? + return view + try: + view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) + view.c_obj = make_ref(space, w_obj) + rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) + isstr = False + except ValueError: + w_s = w_obj.descr_tobytes(space) + view.c_obj = make_ref(space, w_s) + rffi.setintfield(view, 'c_readonly', 1) + isstr = True + view.c_len = w_obj.getlength() + view.c_itemsize = w_obj.buf.getitemsize() + rffi.setintfield(view, 'c_ndim', ndim) + view.c__format = rffi.cast(rffi.UCHAR, w_obj.buf.getformat()) + view.c_format = rffi.cast(rffi.CCHARP, view.c__format) + view.c_shape = rffi.cast(Py_ssize_tP, view.c__shape) + view.c_strides = rffi.cast(Py_ssize_tP, view.c__strides) + shape = w_obj.buf.getshape() + strides = w_obj.buf.getstrides() + for i in range(ndim): + view.c_shape[i] = shape[i] + view.c_strides[i] = strides[i] view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO) view.c_internal = lltype.nullptr(rffi.VOIDP.TO) - ''' return view - diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -508,10 +508,9 @@ @cpython_api([lltype.Ptr(Py_buffer)], lltype.Void, error=CANNOT_FAIL) def PyBuffer_Release(space, view): """ - Releases a Py_buffer obtained from getbuffer ParseTuple's s*. - - This is not a complete re-implementation of the CPython API; it only - provides a subset of CPython's behavior. + Release the buffer view. This should be called when the buffer is + no longer being used as it may free memory from it """ Py_DecRef(space, view.c_obj) view.c_obj = lltype.nullptr(PyObject.TO) + # XXX do other fields leak memory? diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -335,9 +335,15 @@ def getshape(self): return self.shape + def getstrides(self): + return self.strides + def getitemsize(self): return self.itemsize + def getndim(self): + return self.ndim + def wrap_getreadbuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c --- a/pypy/module/cpyext/test/buffer_test.c +++ b/pypy/module/cpyext/test/buffer_test.c @@ -107,14 +107,11 @@ PyMyArray_getbuffer(PyObject *obj, Py_buffer *view, int flags) { PyMyArray* self = (PyMyArray*)obj; - fprintf(stdout, "in PyMyArray_getbuffer\n"); if (view == NULL) { - fprintf(stdout, "view is NULL\n"); PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer"); return -1; } if (flags == 0) { - fprintf(stdout, "flags is 0\n"); PyErr_SetString(PyExc_ValueError, "flags == 0 in getbuffer"); return -1; } @@ -188,7 +185,131 @@ (initproc)PyMyArray_init, /* tp_init */ }; +static PyObject* +test_buffer(PyObject* self, PyObject* args) +{ + Py_buffer* view = NULL; + PyObject* obj = PyTuple_GetItem(args, 0); + PyObject* memoryview = PyMemoryView_FromObject(obj); + if (memoryview == NULL) + return PyInt_FromLong(-1); + view = PyMemoryView_GET_BUFFER(memoryview); + Py_DECREF(memoryview); + return PyInt_FromLong(view->len); +} + +/* Copied from numpy tests */ +/* + * Create python string from a FLAG and or the corresponding PyBuf flag + * for the use in get_buffer_info. + */ +#define GET_PYBUF_FLAG(FLAG) \ + buf_flag = PyUnicode_FromString(#FLAG); \ + flag_matches = PyObject_RichCompareBool(buf_flag, tmp, Py_EQ); \ + Py_DECREF(buf_flag); \ + if (flag_matches == 1) { \ + Py_DECREF(tmp); \ + flags |= PyBUF_##FLAG; \ + continue; \ + } \ + else if (flag_matches == -1) { \ + Py_DECREF(tmp); \ + return NULL; \ + } + + +/* + * Get information for a buffer through PyBuf_GetBuffer with the + * corresponding flags or'ed. Note that the python caller has to + * make sure that or'ing those flags actually makes sense. + * More information should probably be returned for future tests. + */ +static PyObject * +get_buffer_info(PyObject *self, PyObject *args) +{ + PyObject *buffer_obj, *pyflags; + PyObject *tmp, *buf_flag; + Py_buffer buffer; + PyObject *shape, *strides; + Py_ssize_t i, n; + int flag_matches; + int flags = 0; + + if (!PyArg_ParseTuple(args, "OO", &buffer_obj, &pyflags)) { + return NULL; + } + + n = PySequence_Length(pyflags); + if (n < 0) { + return NULL; + } + + for (i=0; i < n; i++) { + tmp = PySequence_GetItem(pyflags, i); + if (tmp == NULL) { + return NULL; + } + + GET_PYBUF_FLAG(SIMPLE); + GET_PYBUF_FLAG(WRITABLE); + GET_PYBUF_FLAG(STRIDES); + GET_PYBUF_FLAG(ND); + GET_PYBUF_FLAG(C_CONTIGUOUS); + GET_PYBUF_FLAG(F_CONTIGUOUS); + GET_PYBUF_FLAG(ANY_CONTIGUOUS); + GET_PYBUF_FLAG(INDIRECT); + GET_PYBUF_FLAG(FORMAT); + GET_PYBUF_FLAG(STRIDED); + GET_PYBUF_FLAG(STRIDED_RO); + GET_PYBUF_FLAG(RECORDS); + GET_PYBUF_FLAG(RECORDS_RO); + GET_PYBUF_FLAG(FULL); + GET_PYBUF_FLAG(FULL_RO); + GET_PYBUF_FLAG(CONTIG); + GET_PYBUF_FLAG(CONTIG_RO); + + Py_DECREF(tmp); + + /* One of the flags must match */ + PyErr_SetString(PyExc_ValueError, "invalid flag used."); + return NULL; + } + + if (PyObject_GetBuffer(buffer_obj, &buffer, flags) < 0) { + return NULL; + } + + if (buffer.shape == NULL) { + Py_INCREF(Py_None); + shape = Py_None; + } + else { + shape = PyTuple_New(buffer.ndim); + for (i=0; i < buffer.ndim; i++) { + PyTuple_SET_ITEM(shape, i, PyLong_FromSsize_t(buffer.shape[i])); + } + } + + if (buffer.strides == NULL) { + Py_INCREF(Py_None); + strides = Py_None; + } + else { + strides = PyTuple_New(buffer.ndim); + for (i=0; i < buffer.ndim; i++) { + PyTuple_SET_ITEM(strides, i, PyLong_FromSsize_t(buffer.strides[i])); + } + } + + PyBuffer_Release(&buffer); + return Py_BuildValue("(NN)", shape, strides); +} + + + static PyMethodDef buffer_functions[] = { + {"test_buffer", (PyCFunction)test_buffer, METH_VARARGS, NULL}, + {"get_buffer_info", (PyCFunction)get_buffer_info, METH_VARARGS, NULL}, {NULL, NULL} /* Sentinel */ }; diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -1,6 +1,6 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase - +from rpython.rlib.buffer import StringBuffer class TestMemoryViewObject(BaseApiTest): def test_fromobject(self, space, api): @@ -12,6 +12,12 @@ w_bytes = space.call_method(w_view, "tobytes") assert space.unwrap(w_bytes) == "hello" + def test_frombuffer(self, space, api): + w_buf = space.newbuffer(StringBuffer("hello")) + w_memoryview = api.PyMemoryView_FromObject(w_buf) + w_view = api.PyMemoryView_GET_BUFFER(w_memoryview) + ndim = w_view.c_ndim + assert ndim == 1 class AppTestBufferProtocol(AppTestCpythonExtensionBase): def test_buffer_protocol(self): @@ -21,6 +27,25 @@ y = memoryview(arr) assert y.format == 'i' assert y.shape == (10,) + assert len(y) == 10 s = y[3] assert len(s) == struct.calcsize('i') assert s == struct.pack('i', 3) + viewlen = module.test_buffer(arr) + assert viewlen == y.itemsize * len(y) + + def test_buffer_info(self): + from _numpypy import multiarray as np + module = self.import_module(name='buffer_test') + get_buffer_info = module.get_buffer_info + # test_export_flags from numpy test_multiarray + raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',)) + # test_relaxed_strides from numpy test_multiarray + arr = np.zeros((1, 10)) + if arr.flags.f_contiguous: + shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS']) + assert strides[0] == 8 + arr = np.ones((10, 1), order='F') + shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS']) + assert strides[-1] == 8 + diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -293,6 +293,8 @@ STRUCT_TYPE = PyNumberMethods elif slot_names[0] == 'c_tp_as_sequence': STRUCT_TYPE = PySequenceMethods + elif slot_names[0] == 'c_tp_as_buffer': + STRUCT_TYPE = PyBufferProcs else: raise AssertionError( "Structure not allocated: %s" % (slot_names[0],)) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -460,6 +460,9 @@ def getdictvalue(self, space, key): return self.items[key] + def descr_memoryview(self, space, buf): + raise oefmt(space.w_TypeError, "error") + class IterDictObject(W_Root): def __init__(self, space, w_dict): self.space = space diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -377,7 +377,25 @@ def __exit__(self, typ, value, traceback): keepalive_until_here(self) - def get_buffer(self, space, readonly): + def get_buffer(self, space, flags): + errtype = space.w_ValueError # should be BufferError, numpy does this instead + if ((flags & space.BUF_C_CONTIGUOUS) == space.BUF_C_CONTIGUOUS and + not self.flags & NPY.ARRAY_C_CONTIGUOUS): + raise oefmt(errtype, "ndarray is not C-contiguous") + if ((flags & space.BUF_F_CONTIGUOUS) == space.BUF_F_CONTIGUOUS and + not self.flags & NPY.ARRAY_F_CONTIGUOUS): + raise oefmt(errtype, "ndarray is not Fortran contiguous") + if ((flags & space.BUF_ANY_CONTIGUOUS) == space.BUF_ANY_CONTIGUOUS and + not (self.flags & NPY.ARRAY_F_CONTIGUOUS and + self.flags & NPY.ARRAY_C_CONTIGUOUS)): + raise oefmt(errtype, "ndarray is not contiguous") + if ((flags & space.BUF_STRIDES) != space.BUF_STRIDES and + not self.flags & NPY.ARRAY_C_CONTIGUOUS): + raise oefmt(errtype, "ndarray is not C-contiguous") + if ((flags & space.BUF_WRITABLE) == space.BUF_WRITABLE and + not self.flags & NPY.ARRAY_WRITEABLE): + raise oefmt(errtype, "buffer source array is read-only") + readonly = not (flags & space.BUF_WRITABLE) == space.BUF_WRITABLE return ArrayBuffer(self, readonly) def astype(self, space, dtype, order, copy=True): @@ -695,6 +713,7 @@ index + self.impl.start) def setitem(self, index, v): + # XXX what if self.readonly? raw_storage_setitem(self.impl.storage, index + self.impl.start, rffi.cast(lltype.Char, v)) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -1,4 +1,5 @@ from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.baseobjspace import BufferInterfaceNotFound from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces @@ -42,7 +43,7 @@ raise oefmt(space.w_ValueError, "object __array__ method not producing an array") -def try_interface_method(space, w_object): +def try_interface_method(space, w_object, copy): try: w_interface = space.getattr(w_object, space.wrap("__array_interface__")) if w_interface is None: @@ -81,17 +82,20 @@ raise oefmt(space.w_ValueError, "__array_interface__ could not decode dtype %R", w_dtype ) - if w_data is not None and (space.isinstance_w(w_data, space.w_tuple) or space.isinstance_w(w_data, space.w_list)): + if w_data is not None and (space.isinstance_w(w_data, space.w_tuple) or + space.isinstance_w(w_data, space.w_list)): data_w = space.listview(w_data) - data = rffi.cast(RAW_STORAGE_PTR, space.int_w(data_w[0])) - read_only = True # XXX why not space.is_true(data_w[1]) + w_data = rffi.cast(RAW_STORAGE_PTR, space.int_w(data_w[0])) + read_only = space.is_true(data_w[1]) or copy offset = 0 - return W_NDimArray.from_shape_and_storage(space, shape, data, - dtype, strides=strides, start=offset), read_only + w_base = w_object + if read_only: + w_base = None + return W_NDimArray.from_shape_and_storage(space, shape, w_data, + dtype, w_base=w_base, strides=strides, + start=offset), read_only if w_data is None: - data = w_object - else: - data = w_data + w_data = w_object w_offset = space.finditem(w_interface, space.wrap('offset')) if w_offset is None: offset = 0 @@ -101,7 +105,7 @@ if strides is not None: raise oefmt(space.w_NotImplementedError, "__array_interface__ strides not fully supported yet") - arr = frombuffer(space, data, dtype, support.product(shape), offset) + arr = frombuffer(space, w_data, dtype, support.product(shape), offset) new_impl = arr.implementation.reshape(arr, shape) return W_NDimArray(new_impl), False @@ -110,6 +114,78 @@ return None, False raise +def _descriptor_from_pep3118_format(space, c_format): + descr = descriptor.decode_w_dtype(space, space.wrap(c_format)) + if descr: + return descr + msg = "invalid PEP 3118 format string: '%s'" % c_format + space.warn(space.wrap(msg), space.w_RuntimeWarning) + return None + +def _array_from_buffer_3118(space, w_object, dtype): + try: + w_buf = space.call_method(space.builtin, "memoryview", w_object) + except OperationError as e: + if e.match(space, space.w_TypeError): + # object does not have buffer interface + return w_object + raise + format = space.getattr(w_buf,space.newbytes('format')) + if format: + descr = _descriptor_from_pep3118_format(space, space.str_w(format)) + if not descr: + return w_object + if dtype and descr: + raise oefmt(space.w_NotImplementedError, + "creating an array from a memoryview while specifying dtype " + "not supported") + if descr.elsize != space.int_w(space.getattr(w_buf, space.newbytes('itemsize'))): + msg = ("Item size computed from the PEP 3118 buffer format " + "string does not match the actual item size.") + space.warn(space.wrap(msg), space.w_RuntimeWarning) + return w_object + dtype = descr + elif not dtype: + dtype = descriptor.get_dtype_cache(space).w_stringdtype + dtype.elsize = space.int_w(space.getattr(w_buf, space.newbytes('itemsize'))) + nd = space.int_w(space.getattr(w_buf, space.newbytes('ndim'))) + shape = [space.int_w(d) for d in space.listview( + space.getattr(w_buf, space.newbytes('shape')))] + strides = [] + buflen = space.len_w(w_buf) * dtype.elsize + if shape: + strides = [space.int_w(d) for d in space.listview( + space.getattr(w_buf, space.newbytes('strides')))] + if not strides: + d = buflen + strides = [0] * nd + for k in range(nd): + if shape[k] > 0: + d /= shape[k] + strides[k] = d + else: + if nd == 1: + shape = [buflen / dtype.elsize, ] + strides = [dtype.elsize, ] + elif nd > 1: + msg = ("ndim computed from the PEP 3118 buffer format " + "is greater than 1, but shape is NULL.") + space.warn(space.wrap(msg), space.w_RuntimeWarning) + return w_object + try: + w_data = rffi.cast(RAW_STORAGE_PTR, space.int_w(space.call_method(w_buf, '_pypy_raw_address'))) + except OperationError as e: + if e.match(space, space.w_ValueError): + return w_object + else: + raise e + writable = not space.bool_w(space.getattr(w_buf, space.newbytes('readonly'))) + w_ret = W_NDimArray.from_shape_and_storage(space, shape, w_data, + storage_bytes=buflen, dtype=dtype, w_base=w_object, + writable=writable, strides=strides) + if w_ret: + return w_ret + return w_object @unwrap_spec(ndmin=int, copy=bool, subok=bool) def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, @@ -127,6 +203,7 @@ def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): + from pypy.module.micronumpy.boxes import W_GenericBox # numpy testing calls array(type(array([]))) and expects a ValueError if space.isinstance_w(w_object, space.w_type): raise oefmt(space.w_ValueError, "cannot create ndarray from type instance") @@ -134,13 +211,19 @@ dtype = descriptor.decode_w_dtype(space, w_dtype) if not isinstance(w_object, W_NDimArray): w_array = try_array_method(space, w_object, w_dtype) - if w_array is not None: + if w_array is None: + if ( not space.isinstance_w(w_object, space.w_str) and + not space.isinstance_w(w_object, space.w_unicode) and + not isinstance(w_object, W_GenericBox)): + # use buffer interface + w_object = _array_from_buffer_3118(space, w_object, dtype) + else: # continue with w_array, but do further operations in place w_object = w_array copy = False dtype = w_object.get_dtype() if not isinstance(w_object, W_NDimArray): - w_array, _copy = try_interface_method(space, w_object) + w_array, _copy = try_interface_method(space, w_object, copy) if w_array is not None: w_object = w_array copy = _copy diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -805,19 +805,19 @@ return w_result def buffer_w(self, space, flags): - return self.implementation.get_buffer(space, True) + return self.implementation.get_buffer(space, flags) def readbuf_w(self, space): - return self.implementation.get_buffer(space, True) + return self.implementation.get_buffer(space, space.BUF_FULL_RO) def writebuf_w(self, space): - return self.implementation.get_buffer(space, False) + return self.implementation.get_buffer(space, space.BUF_FULL) def charbuf_w(self, space): - return self.implementation.get_buffer(space, True).as_str() + return self.implementation.get_buffer(space, space.BUF_FULL_RO).as_str() def descr_get_data(self, space): - return space.newbuffer(self.implementation.get_buffer(space, False)) + return space.newbuffer(self.implementation.get_buffer(space, space.BUF_FULL)) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_diagonal(self, space, offset=0, axis1=0, axis2=1): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3215,7 +3215,9 @@ raises(TypeError, array, Dummy({'version': 3, 'typestr': 'f8', 'shape': ('a', 3)})) a = array([1, 2, 3]) - b = array(Dummy(a.__array_interface__)) + d = Dummy(a.__array_interface__) + b = array(d) + assert b.base is None b[1] = 200 assert a[1] == 2 # upstream compatibility, is this a bug? interface_a = a.__array_interface__ @@ -3226,6 +3228,8 @@ interface_b.pop('data') interface_a.pop('data') assert interface_a == interface_b + b = array(d, copy=False) + assert b.base is d b = array(Dummy({'version':3, 'shape': (50,), 'typestr': 'u1', 'data': 'a'*100})) @@ -3594,6 +3598,7 @@ cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2)) cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) cls.w_ulongval = cls.space.wrap(struct.pack('L', 12)) + cls.w_one = cls.space.wrap(struct.pack('i', 1)) def test_frombuffer(self): import numpy as np @@ -3645,8 +3650,6 @@ else: EMPTY = None x = np.array([1, 2, 3, 4, 5], dtype='i') - y = memoryview('abc') - assert y.format == 'B' y = memoryview(x) assert y.format == 'i' assert y.shape == (5,) @@ -3654,6 +3657,16 @@ assert y.strides == (4,) assert y.suboffsets == EMPTY assert y.itemsize == 4 + assert isinstance(y, memoryview) + assert y[0] == self.one + assert (np.array(y) == x).all() + + x = np.array([0, 0, 0, 0], dtype='O') + y = memoryview(x) + # handles conversion of address to pinned object? + z = np.array(y) + assert z.dtype == 'O' + assert (z == x).all() def test_fromstring(self): import sys diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -702,3 +702,32 @@ ret = obj.sum() print type(ret) assert ret.info == 'spam' + + def test_ndarray_subclass_assigns_base(self): + import numpy as np + init_called = [] + class _DummyArray(object): + """ Dummy object that just exists to hang __array_interface__ dictionaries + and possibly keep alive a reference to a base array. + """ + def __init__(self, interface, base=None): + self.__array_interface__ = interface + init_called.append(1) + self.base = base + + x = np.zeros(10) + d = _DummyArray(x.__array_interface__, base=x) + y = np.array(d, copy=False) + assert sum(init_called) == 1 + assert y.base is d + + x = np.zeros((0,), dtype='float32') + intf = x.__array_interface__.copy() + intf["strides"] = x.strides + x.__array_interface__["strides"] = x.strides + d = _DummyArray(x.__array_interface__, base=x) + y = np.array(d, copy=False) + assert sum(init_called) == 2 + assert y.base is d + + diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1851,7 +1851,7 @@ arr.gcstruct) def read(self, arr, i, offset, dtype): - if arr.gcstruct is V_OBJECTSTORE: + if arr.gcstruct is V_OBJECTSTORE and not arr.base(): raise oefmt(self.space.w_NotImplementedError, "cannot read object from array with no gc hook") return self.box(self._read(arr.storage, i, offset)) diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -14,6 +14,7 @@ """Implement the built-in 'memoryview' type as a wrapper around an interp-level buffer. """ + _attrs_ = ['buf'] def __init__(self, buf): assert isinstance(buf, Buffer) @@ -115,7 +116,7 @@ self.buf.setslice(start, value.as_str()) def descr_len(self, space): - return space.wrap(self.buf.getlength()) + return space.wrap(self.buf.getlength() / self.buf.getitemsize()) def w_get_format(self, space): return space.wrap(self.buf.getformat()) From pypy.commits at gmail.com Tue Sep 6 06:59:17 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 06 Sep 2016 03:59:17 -0700 (PDT) Subject: [pypy-commit] pypy default: document branches and update release 5.4.1 Message-ID: <57cea185.449a1c0a.2f51c.d679@mx.google.com> Author: Matti Picus Branch: Changeset: r86907:2ee81d5bf769 Date: 2016-09-06 13:57 +0300 http://bitbucket.org/pypy/pypy/changeset/2ee81d5bf769/ Log: document branches and update release 5.4.1 diff --git a/pypy/doc/release-pypy2.7-v5.4.1.rst b/pypy/doc/release-pypy2.7-v5.4.1.rst --- a/pypy/doc/release-pypy2.7-v5.4.1.rst +++ b/pypy/doc/release-pypy2.7-v5.4.1.rst @@ -5,8 +5,8 @@ We have released a bugfix for PyPy2.7-v5.4.0, released last week, due to the following issues: - * Update list of contributors in documentation and LICENSE file, - this was unfortunately left out of 5.4.0. My apoligies to the new + * Update list of contributors in documentation and LICENSE file, + this was unfortunately left out of 5.4.0. My apologies to the new contributors * Allow tests run with `-A` to find `libm.so` even if it is a script not a @@ -21,10 +21,13 @@ * Fix for and issue where `unicode.decode('utf8', 'custom_replace')` messed up the last byte of a unicode string sometimes - * Update built-in cffi_ to the soon-to-be-released 1.8.1 version + * Update built-in cffi_ to the soon-to-be-released 1.8.2 version * Explicitly detect that we found as-yet-unsupported OpenSSL 1.1, and crash - translation with a message asking for help porting it + translation with a message asking for help porting it + + * Fix a regression where a PyBytesObject was forced (converted to a RPython + object) when not required, reported as issue #2395 Thanks to those who reported the issues. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -7,3 +7,9 @@ .. branch: rpython-resync Backport rpython changes made directly on the py3k and py3.5 branches. + +.. branch: buffer-interface +Implement PyObject_GetBuffer, PyMemoryView_GET_BUFFER, and handles memoryviews +in numpypy + + From pypy.commits at gmail.com Tue Sep 6 10:39:10 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 06 Sep 2016 07:39:10 -0700 (PDT) Subject: [pypy-commit] pypy union-side-effects: Use more general strategy in 2 tests Message-ID: <57ced50e.c41f1c0a.59642.189a@mx.google.com> Author: Ronan Lamy Branch: union-side-effects Changeset: r86908:35a154a7faf2 Date: 2016-09-06 15:38 +0100 http://bitbucket.org/pypy/pypy/changeset/35a154a7faf2/ Log: Use more general strategy in 2 tests diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -170,12 +170,12 @@ lambda st_ann: valid_unions(st_ann) | st.builds(SomeTuple, st.lists(st_ann)), max_leaves=3) - at given(s=st_numeric) + at given(s=st_annotation) def test_union_unary(s): assert union(s, s) == s assert union(s_ImpossibleValue, s) == s - at given(s1=st_numeric, s2=st_numeric) + at given(s1=st_annotation, s2=st_annotation) def test_commutativity_of_union_compatibility(s1, s2): assert compatible(s1, s2) == compatible(s2, s1) From pypy.commits at gmail.com Tue Sep 6 10:51:13 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 06 Sep 2016 07:51:13 -0700 (PDT) Subject: [pypy-commit] pypy default: improve the trace of import statements to not contain calls Message-ID: <57ced7e1.94071c0a.8c4bb.234e@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r86909:aab338e52409 Date: 2016-09-06 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/aab338e52409/ Log: improve the trace of import statements to not contain calls (needed slightly awkward code) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -259,10 +259,8 @@ raise oefmt(space.w_ValueError, "Empty module name") w = space.wrap - if w_fromlist is not None and space.is_true(w_fromlist): - fromlist_w = space.fixedview(w_fromlist) - else: - fromlist_w = None + if w_fromlist is not None and not space.is_true(w_fromlist): + w_fromlist = None rel_modulename = None if (level != 0 and w_globals is not None and @@ -284,19 +282,19 @@ w_mod = None else: w_mod = absolute_import(space, rel_modulename, rel_level, - fromlist_w, tentative=True) + w_fromlist, tentative=True) else: w_mod = absolute_import(space, rel_modulename, rel_level, - fromlist_w, tentative=False) + w_fromlist, tentative=False) if w_mod is not None: return w_mod - w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0) + w_mod = absolute_import(space, modulename, 0, w_fromlist, tentative=0) if rel_modulename is not None: space.setitem(space.sys.get('modules'), w(rel_modulename), space.w_None) return w_mod -def absolute_import(space, modulename, baselevel, fromlist_w, tentative): +def absolute_import(space, modulename, baselevel, w_fromlist, tentative): # Short path: check in sys.modules, but only if there is no conflict # on the import lock. In the situation of 'import' statements # inside tight loops, this should be true, and absolute_import_try() @@ -304,25 +302,25 @@ # if the import lock is currently held by another thread, then we # have to wait, and so shouldn't use the fast path. if not getimportlock(space).lock_held_by_someone_else(): - w_mod = absolute_import_try(space, modulename, baselevel, fromlist_w) + w_mod = absolute_import_try(space, modulename, baselevel, w_fromlist) if w_mod is not None and not space.is_w(w_mod, space.w_None): return w_mod return absolute_import_with_lock(space, modulename, baselevel, - fromlist_w, tentative) + w_fromlist, tentative) @jit.dont_look_inside def absolute_import_with_lock(space, modulename, baselevel, - fromlist_w, tentative): + w_fromlist, tentative): lock = getimportlock(space) lock.acquire_lock() try: return _absolute_import(space, modulename, baselevel, - fromlist_w, tentative) + w_fromlist, tentative) finally: lock.release_lock(silent_after_fork=True) @jit.unroll_safe -def absolute_import_try(space, modulename, baselevel, fromlist_w): +def absolute_import_try(space, modulename, baselevel, w_fromlist): """ Only look up sys.modules, not actually try to load anything """ w_path = None @@ -330,7 +328,7 @@ if '.' not in modulename: w_mod = check_sys_modules_w(space, modulename) first = w_mod - if fromlist_w is not None and w_mod is not None: + if w_fromlist is not None and w_mod is not None: w_path = try_getattr(space, w_mod, space.wrap('__path__')) else: level = 0 @@ -345,28 +343,36 @@ return None if level == baselevel: first = w_mod - if fromlist_w is not None: + if w_fromlist is not None: w_path = try_getattr(space, w_mod, space.wrap('__path__')) level += 1 - if fromlist_w is not None: + if w_fromlist is not None: + # bit artificial code but important to not just unwrap w_fromlist + # to get a better trace. if it is unwrapped, the immutability of the + # tuple is lost if w_path is not None: - if len(fromlist_w) == 1 and space.eq_w(fromlist_w[0], - space.wrap('*')): + length = space.len_w(w_fromlist) + if length == 1 and space.eq_w( + space.getitem(w_fromlist, space.wrap(0)), + space.wrap('*')): w_all = try_getattr(space, w_mod, space.wrap('__all__')) if w_all is not None: - fromlist_w = space.fixedview(w_all) + w_fromlist = w_all else: - fromlist_w = [] + w_fromlist = None # "from x import *" with x already imported and no x.__all__ # always succeeds without doing more imports. It will # just copy everything from x.__dict__ as it is now. - for w_name in fromlist_w: - if try_getattr(space, w_mod, w_name) is None: - return None + + if w_fromlist is not None: + for i in range(length): + w_name = space.getitem(w_fromlist, space.wrap(i)) + if try_getattr(space, w_mod, w_name) is None: + return None return w_mod return first -def _absolute_import(space, modulename, baselevel, fromlist_w, tentative): +def _absolute_import(space, modulename, baselevel, w_fromlist, tentative): w = space.wrap if '/' in modulename or '\\' in modulename: @@ -394,18 +400,23 @@ w_path = try_getattr(space, w_mod, w('__path__')) level += 1 - if fromlist_w is not None: + if w_fromlist is not None: if w_path is not None: - if len(fromlist_w) == 1 and space.eq_w(fromlist_w[0],w('*')): + length = space.len_w(w_fromlist) + if length == 1 and space.eq_w( + space.getitem(w_fromlist, space.wrap(0)), + space.wrap('*')): w_all = try_getattr(space, w_mod, w('__all__')) if w_all is not None: - fromlist_w = space.fixedview(w_all) + w_fromlist = w_all else: - fromlist_w = [] - for w_name in fromlist_w: - if try_getattr(space, w_mod, w_name) is None: - load_part(space, w_path, prefix, space.str0_w(w_name), - w_mod, tentative=1) + w_fromlist = None + if w_fromlist is not None: + for i in range(length): + w_name = space.getitem(w_fromlist, space.wrap(i)) + if try_getattr(space, w_mod, w_name) is None: + load_part(space, w_path, prefix, space.str0_w(w_name), + w_mod, tentative=1) return w_mod else: return first diff --git a/pypy/module/pypyjit/test_pypy_c/test_import.py b/pypy/module/pypyjit/test_pypy_c/test_import.py --- a/pypy/module/pypyjit/test_pypy_c/test_import.py +++ b/pypy/module/pypyjit/test_pypy_c/test_import.py @@ -38,3 +38,27 @@ # call_may_force(absolute_import_with_lock). for opname in log.opnames(loop.allops(opcode="IMPORT_NAME")): assert 'call' not in opname # no call-like opcode + + def test_import_fast_path(self, tmpdir): + print tmpdir + pkg = tmpdir.join('mypkg').ensure(dir=True) + subdir = pkg.join("sub").ensure(dir=True) + pkg.join('__init__.py').write("") + subdir.join('__init__.py').write("") + subdir.join('mod.py').write(str(py.code.Source(""" + def do_the_import(): + import sys + """))) + def main(path, n): + def do_the_import(): + from mypkg.sub import mod + import sys + sys.path.append(path) + for i in range(n): + do_the_import() + # + log = self.run(main, [str(tmpdir), 300]) + loop, = log.loops_by_filename(self.filepath) + # check that no string compares and other calls are there + for opname in log.opnames(loop.allops(opcode="IMPORT_NAME")): + assert 'call' not in opname # no call-like opcode diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -64,6 +64,8 @@ def setitem_str(self, w_dict, key, w_value): cell = self.getdictvalue_no_unwrapping(w_dict, key) + #if (key == '__package__' or key == "__path__") and cell is not None and w_value is not cell: + # print "WARNING", key, w_value, cell, self return self._setitem_str_cell_known(cell, w_dict, key, w_value) def _setitem_str_cell_known(self, cell, w_dict, key, w_value): From pypy.commits at gmail.com Tue Sep 6 14:36:45 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Sep 2016 11:36:45 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update the checksums Message-ID: <57cf0cbd.436ec20a.434cf.da57@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r786:42216d0bec0f Date: 2016-09-06 20:36 +0200 http://bitbucket.org/pypy/pypy.org/changeset/42216d0bec0f/ Log: update the checksums diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -450,18 +450,18 @@ pypy2.7-v5.4.1 md5:: - 70958c05af6628a66db5072ef1c72522 pypy2-v5.4.1-linux-armel.tar.bz2 - 5246b7b963689ec5b70291c2b104476a pypy2-v5.4.1-linux-armhf-raring.tar.bz2 - 106cfa49756df7ae3bf531ce6659d0ed pypy2-v5.4.1-linux-armhf-raspbian.tar.bz2 - f561bedb338fa09f011eaf1edac0faf1 pypy2-v5.4.1-linux32.tar.bz2 - df1327fc3cd41a1ff860e90b5a901899 pypy2-v5.4.1-linux64.tar.bz2 - b24ebe9f4825fc05afd76a9e8f47018c pypy2-v5.4.1-osx64.tar.bz2 - b8e90edf11639d1757413c8bf5a11d49 pypy2-v5.4.1-ppc64.tar.bz2 - 55f6747988d981699e52a7c60aed8a7f pypy2-v5.4.1-ppc64le.tar.bz2 - 082cca9bb948c8b1389c35db7174396a pypy2-v5.4.1-s390x.tar.bz2 - 129e730c84f55133b9694bc48e2d4812 pypy2-v5.4.1-src.tar.bz2 - f20a420d90475b72c6ef8b4ab90377f0 pypy2-v5.4.1-src.zip - 125874d61b4ac4e2fd7d0b7c2db3b041 pypy2-v5.4.1-win32.zip + 425ffedf0db4dd737d450aa064ae0e7a pypy2-v5.4.1-linux-armel.tar.bz2 + 15f41409cbadbde3ef22ee60ded8579a pypy2-v5.4.1-linux-armhf-raring.tar.bz2 + 5940ea0c1077e3bc2ba461bf55800abb pypy2-v5.4.1-linux-armhf-raspbian.tar.bz2 + 23cbad540abff48ea67f5b75368344be pypy2-v5.4.1-linux32.tar.bz2 + 2f4a82ae306f3d30d486fb945ca9c6be pypy2-v5.4.1-linux64.tar.bz2 + a18cbd990c97f75a08235a35e546a637 pypy2-v5.4.1-osx64.tar.bz2 + 90b0650dda1b5bf33f6119f20ba9c3b6 pypy2-v5.4.1-ppc64.tar.bz2 + 9a2c1b3124151b79e68f36eb2d4891d1 pypy2-v5.4.1-ppc64le.tar.bz2 + 826d4b48e43c84a50dc7e2adc2cb69d4 pypy2-v5.4.1-s390x.tar.bz2 + d1d197d16331aa23a7fd4b5d4c3c1717 pypy2-v5.4.1-src.tar.bz2 + 1aab9fe6e7c03e959cde466819034bab pypy2-v5.4.1-src.zip + b04aad943aac92862a73b1fd90157a00 pypy2-v5.4.1-win32.zip pypy3.3-v5.2-alpha md5:: @@ -483,18 +483,18 @@ pypy2.7-5.4.1 sha1:: - 74fea5a7a0a3d8c899404ddc8c0296d4bf4ca3d0 pypy2-v5.4.1-linux-armel.tar.bz2 - 4c72d94325567d2079ca5021da3cba6cbc835744 pypy2-v5.4.1-linux-armhf-raring.tar.bz2 - 18486da20d2513c083be308f8222f83f80c74671 pypy2-v5.4.1-linux-armhf-raspbian.tar.bz2 - c38c40dfcbe9cba712a508fed0ea3dc6da5e2b7c pypy2-v5.4.1-linux32.tar.bz2 - 2c9a45c1bf67d8f2fac9d09e082f059ce892f291 pypy2-v5.4.1-linux64.tar.bz2 - 765bed9e45fa58a6f65ae20ff28b5e39beb56793 pypy2-v5.4.1-osx64.tar.bz2 - 6e73fa972ca01d58e95b36f133319bff0cc66876 pypy2-v5.4.1-ppc64.tar.bz2 - 4410c7514328d936084b00ac9d01af1aecdc7289 pypy2-v5.4.1-ppc64le.tar.bz2 - 8726412fcbaea859f21fb4b1c21fd5832e4c56d7 pypy2-v5.4.1-s390x.tar.bz2 - 0d865c16a3779f492b7f4687cd46c21bbfc05609 pypy2-v5.4.1-src.tar.bz2 - 8f898a052786d3b60e9effe162c15fa572a5f52d pypy2-v5.4.1-src.zip - 21958b782dc727a0be3bbc248e0ca9af18305654 pypy2-v5.4.1-win32.zip + a54b2a8b6def85663b10fd956d51fbd052954b83 pypy2-v5.4.1-linux-armel.tar.bz2 + 61c9a5269d6d414c4a1d7c41bbc6a45da318f138 pypy2-v5.4.1-linux-armhf-raring.tar.bz2 + ea48ffe40887e25adcf1969a6b0e25dbe42a2457 pypy2-v5.4.1-linux-armhf-raspbian.tar.bz2 + bf0ac4668323abead04dd0948a2e541a158e4a00 pypy2-v5.4.1-linux32.tar.bz2 + d16601b04987381c4922a19dacb0ca2591a3b566 pypy2-v5.4.1-linux64.tar.bz2 + 4084b7db8ee16a52e7ecb89d10765d961864c67c pypy2-v5.4.1-osx64.tar.bz2 + 98eac6f32f4fe8fce6eb0f0594163f3e01dccced pypy2-v5.4.1-ppc64.tar.bz2 + fc676af00b356ae48602b531675774f2badfc4fd pypy2-v5.4.1-ppc64le.tar.bz2 + 70b736f1fdb92ae9916630b7cc8954ed92490f64 pypy2-v5.4.1-s390x.tar.bz2 + 1143948f50b1b95b55465b246639b4c48251e38e pypy2-v5.4.1-src.tar.bz2 + a6a9ce0defb401d8777d8af5949d23393416a390 pypy2-v5.4.1-src.zip + 49c2ad75f1531730f7ee466d833d318333915ce0 pypy2-v5.4.1-win32.zip pypy3.3-v5.2-alpha sha1:: @@ -510,18 +510,18 @@ pypy2.7-5.4.1 sha256:: - 8925b76fe9ca6f960d8f914ed67f7a3c52ce2b4c65fa71a5ef7d4b285c2c3a36 pypy2-v5.4.1-linux-armel.tar.bz2 - 0213b0d948ae0afea8b4cb93f08e55b0562522b3ab8f2706c4e22ffe8cd86f84 pypy2-v5.4.1-linux-armhf-raring.tar.bz2 - 2daee13ec1836c1041c89c18d9514134ff606dc3648fc6304611eb1ec0819289 pypy2-v5.4.1-linux-armhf-raspbian.tar.bz2 - 85bccf8679f908c08850115fe74325474fe1b2e1e793c147d1fa484b56472b12 pypy2-v5.4.1-linux32.tar.bz2 - 06c29d59565d9fdb618ed8aa730e05cf975da21158955591dff38d9e305af074 pypy2-v5.4.1-linux64.tar.bz2 - 507c81af9ca302c67a582255306529f88fba56760d353e17d667a114eee1f7e2 pypy2-v5.4.1-osx64.tar.bz2 - c11b37f5e97b647003426987e223d75a0dc0da1ecc35675ddad0af8a9add972d pypy2-v5.4.1-ppc64.tar.bz2 - 652d97fbd574d0349f7aa8b37c8c5a1238ed0cd3d6b68cf2ea8280b7ead4c7ad pypy2-v5.4.1-ppc64le.tar.bz2 - 987b3354dcbed5fd3f0d8d9d1484a259f0dff97da5d11a84b354c6e61a4af891 pypy2-v5.4.1-s390x.tar.bz2 - 92af82664ace96d721c66dbe8726d4f39c7d01f568d9df56c11149be2960238f pypy2-v5.4.1-src.tar.bz2 - 08148d1157dd16f402c7844fc0cdfde9e7d187c7fd1549a93e888e2fd13828bf pypy2-v5.4.1-src.zip - b703224af4e99243d090783a7b685063da7ba01ef28bb99a89cacfce2fb0dfc2 pypy2-v5.4.1-win32.zip + a1eb5f672aae62606176305e52a51b060ba974b6181ebefcd2c555ecf5f8614f pypy2-v5.4.1-linux-armel.tar.bz2 + 2c4befc4517adec874155a8b6fa0b9d18388943d4ffe778002072db7783e417a pypy2-v5.4.1-linux-armhf-raring.tar.bz2 + b38646519ee1a888c68f8f4713c122867b4b36693c8acabb38eb827a9d2d51f9 pypy2-v5.4.1-linux-armhf-raspbian.tar.bz2 + 6d1e2386ec1e05dffed493aa2d5e6db5cf5de18d7350d44b85f2e45aa5c9a774 pypy2-v5.4.1-linux32.tar.bz2 + 9c85319778224d7fb0c348f55fe3fada15bb579c5f3870a13ad63b42a737dd72 pypy2-v5.4.1-linux64.tar.bz2 + ae9329c8f0a6df431c6224c27c634f998688ac803e8d100cee9a774e6bba38b5 pypy2-v5.4.1-osx64.tar.bz2 + ff118f2dc2393c821595fa7872e4d7bdae2a9a16a854759e103608df383d765a pypy2-v5.4.1-ppc64.tar.bz2 + 684e862cdb281a22c95d73012f7d2693be9fd9cdd171784256da1514ae1c8164 pypy2-v5.4.1-ppc64le.tar.bz2 + 4004c86bf2dfdd1d92120f514d296af7602e0c5e2c6526a14ff8b5508c0fe8f7 pypy2-v5.4.1-s390x.tar.bz2 + 45dbc50c81498f6f1067201b8fc887074b43b84ee32cc47f15e7db17571e9352 pypy2-v5.4.1-src.tar.bz2 + 54b23c11a92dd6328a58787c3d719189f0aa24c872b479acf50094dfb4be0a46 pypy2-v5.4.1-src.zip + ec729218a820bc2aa2cf1fcacf9de0fee9e04144fe138596198a6b4615505e03 pypy2-v5.4.1-win32.zip pypy3.3-v5.2-alpha sha256:: From pypy.commits at gmail.com Tue Sep 6 14:47:56 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 06 Sep 2016 11:47:56 -0700 (PDT) Subject: [pypy-commit] pypy default: remove one more pyobj force, rework documentation Message-ID: <57cf0f5c.02d31c0a.f7eda.09d5@mx.google.com> Author: Matti Picus Branch: Changeset: r86910:abbb7cdf30ab Date: 2016-09-06 21:46 +0300 http://bitbucket.org/pypy/pypy/changeset/abbb7cdf30ab/ Log: remove one more pyobj force, rework documentation diff --git a/pypy/doc/release-pypy2.7-v5.4.1.rst b/pypy/doc/release-pypy2.7-v5.4.1.rst --- a/pypy/doc/release-pypy2.7-v5.4.1.rst +++ b/pypy/doc/release-pypy2.7-v5.4.1.rst @@ -21,7 +21,7 @@ * Fix for and issue where `unicode.decode('utf8', 'custom_replace')` messed up the last byte of a unicode string sometimes - * Update built-in cffi_ to the soon-to-be-released 1.8.2 version + * Update built-in cffi_ to version 1.8.1 * Explicitly detect that we found as-yet-unsupported OpenSSL 1.1, and crash translation with a message asking for help porting it diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -14,43 +14,31 @@ ## Implementation of PyBytesObject ## ================================ ## -## The problem -## ----------- +## PyBytesObject has its own ob_sval buffer, so we have two copies of a string; +## one in the PyBytesObject returned from various C-API functions and another +## in the corresponding RPython object. ## -## PyString_AsString() must return a (non-movable) pointer to the underlying -## ob_sval, whereas pypy strings are movable. C code may temporarily store -## this address and use it, as long as it owns a reference to the PyObject. -## There is no "release" function to specify that the pointer is not needed -## any more. +## The following calls can create a PyBytesObject without a correspoinding +## RPython object: + +## In any of the above PyBytesObject, the ob_sval buffer may be modified as +## long as the freshly allocated PyBytesObject is not "forced" via a call +## to any of the more sophisticated C-API functions. ## -## Also, the pointer may be used to fill the initial value of string. This is -## valid only when the string was just allocated, and is not used elsewhere. -## -## Solution -## -------- -## -## PyBytesObject contains two additional members: the ob_size and an array -## char ob_sval which holds a \x0 terminated string. -## -## - A string allocated by pypy will be converted into a PyBytesObject with a -## buffer holding \x0. The first time PyString_AsString() is called, the -## PyStringObject is reallocated, and the string copied into the buffer. The -## ob_size reflects the length of the string. -## -## - A string allocated with PyString_FromStringAndSize(NULL, size) will -## allocate a PyBytesObject structure, and a buffer with the specified -## size+1, as part of the object. The buffer is then supposed to be immutable. -## -##- A buffer obtained from PyString_AS_STRING() could be mutable iff -## there is no corresponding pypy object for the string -## -## - _PyString_Resize() works only on not-yet-pypy'd strings, and returns a -## similar object. -## -## - PyString_Size() doesn't need to force the object. +## Care has been taken in implementing the functions below, so that +## if they are called with a non-forced PyBytesObject, they will not +## unintentionally force the creation of a RPython object. As long as only these +## are used, the ob_sval buffer is still modifiable: +## +## PyBytes_AsString / PyString_AsString +## PyBytes_AS_STRING / PyString_AS_STRING +## PyBytes_AsStringAndSize / PyString_AsStringAndSize +## PyBytes_Size / PyString_Size +## PyBytes_Resize / PyString_Resize +## _PyBytes_Resize / _PyString_Resize (raises if called with a forced object) ## ## - There could be an (expensive!) check in from_ref() that the buffer still -## corresponds to the pypy gc-managed string. +## corresponds to the pypy gc-managed string, ## PyBytesObjectStruct = lltype.ForwardReference() @@ -177,9 +165,6 @@ raise oefmt(space.w_TypeError, "expected string or Unicode object, %T found", from_ref(space, ref)) - if not pyobj_has_w_obj(ref): - # force the ref - bytes_realize(space, ref) ref_str = rffi.cast(PyBytesObject, ref) data[0] = ref_str.c_ob_sval if length: From pypy.commits at gmail.com Tue Sep 6 15:02:14 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 06 Sep 2016 12:02:14 -0700 (PDT) Subject: [pypy-commit] pypy default: documentation Message-ID: <57cf12b6.a3a3c20a.2eacd.afda@mx.google.com> Author: Matti Picus Branch: Changeset: r86911:c1a85c2f66cf Date: 2016-09-06 22:00 +0300 http://bitbucket.org/pypy/pypy/changeset/c1a85c2f66cf/ Log: documentation diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -20,8 +20,10 @@ ## ## The following calls can create a PyBytesObject without a correspoinding ## RPython object: - -## In any of the above PyBytesObject, the ob_sval buffer may be modified as +## +## PyBytes_FromStringAndSize(NULL, n) / PyString_FromStringAndSize(NULL, n) +## +## In the PyBytesObject returned, the ob_sval buffer may be modified as ## long as the freshly allocated PyBytesObject is not "forced" via a call ## to any of the more sophisticated C-API functions. ## From pypy.commits at gmail.com Tue Sep 6 16:43:34 2016 From: pypy.commits at gmail.com (sbauman) Date: Tue, 06 Sep 2016 13:43:34 -0700 (PDT) Subject: [pypy-commit] pypy remove-getarrayitem-pure: Sanity checks Message-ID: <57cf2a76.c4f6c20a.c752e.51c1@mx.google.com> Author: Spenser Bauman Branch: remove-getarrayitem-pure Changeset: r86913:424b9e80e6a4 Date: 2016-04-05 21:54 -0400 http://bitbucket.org/pypy/pypy/changeset/424b9e80e6a4/ Log: Sanity checks diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -515,9 +515,24 @@ return self._do_getarrayitem_gc_any(rop.GETARRAYITEM_GC_F, arraybox, indexbox, arraydescr) - opimpl_getarrayitem_gc_i_pure = opimpl_getarrayitem_gc_i - opimpl_getarrayitem_gc_r_pure = opimpl_getarrayitem_gc_r - opimpl_getarrayitem_gc_f_pure = opimpl_getarrayitem_gc_f + @arguments("box", "box", "descr") + def opimpl_getarrayitem_gc_i_pure(self, arraybox, indexbox, arraydescr): + assert arraydescr.is_always_pure() + return self.opimpl_getarrayitem_gc_i(arraybox, indexbox, arraydescr) + + @arguments("box", "box", "descr") + def opimpl_getarrayitem_gc_r_pure(self, arraybox, indexbox, arraydescr): + assert arraydescr.is_always_pure() + return self.opimpl_getarrayitem_gc_r(arraybox, indexbox, arraydescr) + + @arguments("box", "box", "descr") + def opimpl_getarrayitem_gc_f_pure(self, arraybox, indexbox, arraydescr): + assert arraydescr.is_always_pure() + return self.opimpl_getarrayitem_gc_f(arraybox, indexbox, arraydescr) + + # opimpl_getarrayitem_gc_i_pure = opimpl_getarrayitem_gc_i + # opimpl_getarrayitem_gc_r_pure = opimpl_getarrayitem_gc_r + # opimpl_getarrayitem_gc_f_pure = opimpl_getarrayitem_gc_f @arguments("box", "box", "descr") def opimpl_getarrayitem_raw_i(self, arraybox, indexbox, arraydescr): From pypy.commits at gmail.com Tue Sep 6 16:43:32 2016 From: pypy.commits at gmail.com (sbauman) Date: Tue, 06 Sep 2016 13:43:32 -0700 (PDT) Subject: [pypy-commit] pypy remove-getarrayitem-pure: Move code Message-ID: <57cf2a74.c50e1c0a.2dbfe.38f7@mx.google.com> Author: Spenser Bauman Branch: remove-getarrayitem-pure Changeset: r86912:2fb300020852 Date: 2016-04-03 13:16 -0400 http://bitbucket.org/pypy/pypy/changeset/2fb300020852/ Log: Move code diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -447,6 +447,8 @@ pass self.force_lazy_set(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: + if arraydescr.is_always_pure(): + continue self.force_lazy_setarrayitem(arraydescr, can_cache=False) if arraydescr in self.corresponding_array_descrs: dictdescr = self.corresponding_array_descrs.pop(arraydescr) @@ -552,10 +554,7 @@ cf.do_setfield(self, op) def optimize_GETARRAYITEM_GC_I(self, op): - arrayinfo = self.ensure_ptr_info_arg0(op) - indexb = self.getintbound(op.getarg(1)) arraydescr = op.getdescr() - if (arraydescr.is_always_pure() and self.get_constant_box(op.getarg(0)) is not None and self.get_constant_box(op.getarg(1)) is not None): @@ -563,6 +562,8 @@ self.optimizer.make_constant(op, resbox) return + arrayinfo = self.ensure_ptr_info_arg0(op) + indexb = self.getintbound(op.getarg(1)) cf = None if indexb.is_constant(): index = indexb.getint() From pypy.commits at gmail.com Tue Sep 6 16:43:36 2016 From: pypy.commits at gmail.com (sbauman) Date: Tue, 06 Sep 2016 13:43:36 -0700 (PDT) Subject: [pypy-commit] pypy force-virtual-state: Emit proper instruction sequence for checking raw pointers Message-ID: <57cf2a78.919a1c0a.eb019.3add@mx.google.com> Author: Spenser Bauman Branch: force-virtual-state Changeset: r86914:9c278b23ff9f Date: 2016-09-06 16:40 -0400 http://bitbucket.org/pypy/pypy/changeset/9c278b23ff9f/ Log: Emit proper instruction sequence for checking raw pointers diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -365,6 +365,13 @@ def visitor_dispatch_virtual_type(self, visitor): raise NotImplementedError("abstract") + def make_guards(self, op, short, optimizer): + from rpython.jit.metainterp.optimizeopt.optimizer import CONST_0 + op = ResOperation(rop.INT_EQ, [op, CONST_0]) + short.append(op) + op = ResOperation(rop.GUARD_FALSE, [op]) + short.append(op) + class RawBufferPtrInfo(AbstractRawPtrInfo): buffer = None From pypy.commits at gmail.com Tue Sep 6 18:02:08 2016 From: pypy.commits at gmail.com (stefanor) Date: Tue, 06 Sep 2016 15:02:08 -0700 (PDT) Subject: [pypy-commit] pypy default: If the expected exception was raised, the SSLContext can't be shut down yet Message-ID: <57cf3ce0.04141c0a.63e28.531f@mx.google.com> Author: Stefano Rivera Branch: Changeset: r86915:0dcc383e61e3 Date: 2016-09-06 15:01 -0700 http://bitbucket.org/pypy/pypy/changeset/0dcc383e61e3/ Log: If the expected exception was raised, the SSLContext can't be shut down yet diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -450,7 +450,12 @@ # For compatibility assert exc.value.errno == _ssl.SSL_ERROR_WANT_READ finally: - c.shutdown() + try: + c.shutdown() + except _ssl.SSLError: + # If the expected exception was raised, the SSLContext + # can't be shut down yet + pass finally: s.close() From pypy.commits at gmail.com Wed Sep 7 00:16:01 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 06 Sep 2016 21:16:01 -0700 (PDT) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <57cf9481.0575c20a.329f0.3186@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r86916:37c0e5dcf737 Date: 2016-09-07 05:15 +0100 http://bitbucket.org/pypy/pypy/changeset/37c0e5dcf737/ Log: hg merge default diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -30,3 +30,6 @@ 68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 77392ad263504df011ccfcabf6a62e21d04086d0 release-pypy2.7-v5.4.0 +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 +0e2d9a73f5a1818d0245d75daccdbe21b2d5c3ef release-pypy2.7-v5.4.1 diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.8.1 +Version: 1.8.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.8.1" -__version_info__ = (1, 8, 1) +__version__ = "1.8.2" +__version_info__ = (1, 8, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.8.1" + "\ncompiled with cffi version: 1.8.2" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -1009,6 +1009,12 @@ myref is not None and myref is other()) def __ne__(self, other): return not (self == other) + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self()) + return self._hash self._weakref_cache_ref = {}, MyRef weak_cache, MyRef = self._weakref_cache_ref diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-pypy2.7-v5.4.1.rst release-pypy2.7-v5.4.0.rst release-pypy2.7-v5.3.1.rst release-pypy2.7-v5.3.0.rst diff --git a/pypy/doc/release-pypy2.7-v5.4.1.rst b/pypy/doc/release-pypy2.7-v5.4.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-pypy2.7-v5.4.1.rst @@ -0,0 +1,64 @@ +========== +PyPy 5.4.1 +========== + +We have released a bugfix for PyPy2.7-v5.4.0, released last week, +due to the following issues: + + * Update list of contributors in documentation and LICENSE file, + this was unfortunately left out of 5.4.0. My apologies to the new + contributors + + * Allow tests run with `-A` to find `libm.so` even if it is a script not a + dynamically loadable file + + * Bump `sys.setrecursionlimit()` when translating PyPy, for translating with CPython + + * Tweak a float comparison with 0 in `backendopt.inline` to avoid rounding errors + + * Fix for an issue where os.access() accepted a float for mode + + * Fix for and issue where `unicode.decode('utf8', 'custom_replace')` messed up + the last byte of a unicode string sometimes + + * Update built-in cffi_ to version 1.8.1 + + * Explicitly detect that we found as-yet-unsupported OpenSSL 1.1, and crash + translation with a message asking for help porting it + + * Fix a regression where a PyBytesObject was forced (converted to a RPython + object) when not required, reported as issue #2395 + +Thanks to those who reported the issues. + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports: + + * **x86** machines on most common operating systems + (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s390x** running Linux + +.. _cffi: https://cffi.readthedocs.io +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -7,3 +7,9 @@ .. branch: rpython-resync Backport rpython changes made directly on the py3k and py3.5 branches. + +.. branch: buffer-interface +Implement PyObject_GetBuffer, PyMemoryView_GET_BUFFER, and handles memoryviews +in numpypy + + diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1432,6 +1432,9 @@ BUF_FORMAT = 0x0004 BUF_ND = 0x0008 BUF_STRIDES = 0x0010 | BUF_ND + BUF_C_CONTIGUOUS = 0x0020 | BUF_STRIDES + BUF_F_CONTIGUOUS = 0x0040 | BUF_STRIDES + BUF_ANY_CONTIGUOUS = 0x0080 | BUF_STRIDES BUF_INDIRECT = 0x0100 | BUF_STRIDES BUF_CONTIG_RO = BUF_ND diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.8.1" +VERSION = "1.8.2" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -105,9 +105,6 @@ return True return False - def _check_only_one_argument_for_union(self, w_ob): - pass - def convert_from_object(self, cdata, w_ob): if not self._copy_from_same(cdata, w_ob): self.convert_struct_from_object(cdata, w_ob, optvarsize=-1) @@ -117,19 +114,24 @@ ) def convert_struct_from_object(self, cdata, w_ob, optvarsize): self.force_lazy_struct() - self._check_only_one_argument_for_union(w_ob) space = self.space if (space.isinstance_w(w_ob, space.w_list) or space.isinstance_w(w_ob, space.w_tuple)): lst_w = space.listview(w_ob) - if len(lst_w) > len(self._fields_list): - raise oefmt(space.w_ValueError, - "too many initializers for '%s' (got %d)", - self.name, len(lst_w)) - for i in range(len(lst_w)): - optvarsize = self._fields_list[i].write_v(cdata, lst_w[i], + j = 0 + for w_obj in lst_w: + try: + while (self._fields_list[j].flags & + W_CField.BF_IGNORE_IN_CTOR): + j += 1 + except IndexError: + raise oefmt(space.w_ValueError, + "too many initializers for '%s' (got %d)", + self.name, len(lst_w)) + optvarsize = self._fields_list[j].write_v(cdata, w_obj, optvarsize) + j += 1 return optvarsize elif space.isinstance_w(w_ob, space.w_dict): @@ -185,14 +187,6 @@ class W_CTypeUnion(W_CTypeStructOrUnion): kind = "union" - def _check_only_one_argument_for_union(self, w_ob): - space = self.space - n = space.int_w(space.len(w_ob)) - if n > 1: - raise oefmt(space.w_ValueError, - "initializer for '%s': %d items given, but only one " - "supported (use a dict if needed)", self.name, n) - class W_CField(W_Root): _immutable_ = True @@ -200,18 +194,21 @@ BS_REGULAR = -1 BS_EMPTY_ARRAY = -2 - def __init__(self, ctype, offset, bitshift, bitsize): + BF_IGNORE_IN_CTOR = 0x01 + + def __init__(self, ctype, offset, bitshift, bitsize, flags): self.ctype = ctype self.offset = offset self.bitshift = bitshift # >= 0: bitshift; or BS_REGULAR/BS_EMPTY_ARRAY self.bitsize = bitsize + self.flags = flags # BF_xxx def is_bitfield(self): return self.bitshift >= 0 - def make_shifted(self, offset): + def make_shifted(self, offset, fflags): return W_CField(self.ctype, offset + self.offset, - self.bitshift, self.bitsize) + self.bitshift, self.bitsize, self.flags | fflags) def read(self, cdata): cdata = rffi.ptradd(cdata, self.offset) @@ -341,5 +338,6 @@ offset = interp_attrproperty('offset', W_CField), bitshift = interp_attrproperty('bitshift', W_CField), bitsize = interp_attrproperty('bitsize', W_CField), + flags = interp_attrproperty('flags', W_CField), ) W_CField.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -345,6 +345,11 @@ if alignment < falign and do_align: alignment = falign # + if is_union and i > 0: + fflags = ctypestruct.W_CField.BF_IGNORE_IN_CTOR + else: + fflags = 0 + # if fbitsize < 0: # not a bitfield: common case @@ -372,7 +377,7 @@ for name, srcfld in ftype._fields_dict.items(): srcfield2names[srcfld] = name for srcfld in ftype._fields_list: - fld = srcfld.make_shifted(boffset // 8) + fld = srcfld.make_shifted(boffset // 8, fflags) fields_list.append(fld) try: fields_dict[srcfield2names[srcfld]] = fld @@ -382,7 +387,8 @@ w_ctype._custom_field_pos = True else: # a regular field - fld = ctypestruct.W_CField(ftype, boffset // 8, bs_flag, -1) + fld = ctypestruct.W_CField(ftype, boffset // 8, bs_flag, -1, + fflags) fields_list.append(fld) fields_dict[fname] = fld @@ -489,7 +495,7 @@ bitshift = 8 * ftype.size - fbitsize- bitshift fld = ctypestruct.W_CField(ftype, field_offset_bytes, - bitshift, fbitsize) + bitshift, fbitsize, fflags) fields_list.append(fld) fields_dict[fname] = fld diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.8.1", ("This test_c.py file is for testing a version" +assert __version__ == "1.8.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): @@ -2525,6 +2525,25 @@ assert d[2][1].bitshift == -1 assert d[2][1].bitsize == -1 +def test_nested_anonymous_struct_2(): + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BInnerUnion = new_union_type("union bar") + complete_struct_or_union(BInnerUnion, [('a1', BInt, -1), + ('a2', BInt, -1)]) + complete_struct_or_union(BStruct, [('b1', BInt, -1), + ('', BInnerUnion, -1), + ('b2', BInt, -1)]) + assert sizeof(BInnerUnion) == sizeof(BInt) + assert sizeof(BStruct) == sizeof(BInt) * 3 + fields = [(name, fld.offset, fld.flags) for (name, fld) in BStruct.fields] + assert fields == [ + ('b1', 0 * sizeof(BInt), 0), + ('a1', 1 * sizeof(BInt), 0), + ('a2', 1 * sizeof(BInt), 1), + ('b2', 2 * sizeof(BInt), 0), + ] + def test_sizeof_union(): # a union has the largest alignment of its members, and a total size # that is the largest of its items *possibly further aligned* if diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -121,7 +121,7 @@ METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HAVE_INPLACEOPS Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_TPFLAGS_HAVE_NEWBUFFER -Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES +Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES Py_MAX_NDIMS Py_CLEANUP_SUPPORTED """.split() for name in constant_names: @@ -647,6 +647,9 @@ ('format', rffi.CCHARP), ('shape', Py_ssize_tP), ('strides', Py_ssize_tP), + ('_format', rffi.UCHAR), + ('_shape', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), + ('_strides', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), ('suboffsets', Py_ssize_tP), #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), ('internal', rffi.VOIDP) diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,20 +1,66 @@ from pypy.interpreter.error import oefmt from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import buffer +from rpython.rlib.rarithmetic import widen from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, Py_buffer) from pypy.module.cpyext.pyobject import PyObject, Py_DecRef -# PyObject_GetBuffer has been removed, it is defined in abstract.c -# PyObject_CheckBuffer is also already defined +def _IsFortranContiguous(view): + ndim = widen(view.c_ndim) + if ndim == 0: + return 1 + if not view.c_strides: + return ndim == 1 + sd = view.c_itemsize + if ndim == 1: + return view.c_shape[0] == 1 or sd == view.c_strides[0] + for i in range(view.c_ndim): + dim = view.c_shape[i] + if dim == 0: + return 1 + if view.c_strides[i] != sd: + return 0 + sd *= dim + return 1 + +def _IsCContiguous(view): + ndim = widen(view.c_ndim) + if ndim == 0: + return 1 + if not view.c_strides: + return ndim == 1 + sd = view.c_itemsize + if ndim == 1: + return view.c_shape[0] == 1 or sd == view.c_strides[0] + for i in range(ndim - 1, -1, -1): + dim = view.c_shape[i] + if dim == 0: + return 1 + if view.c_strides[i] != sd: + return 0 + sd *= dim + return 1 + @cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) -def PyBuffer_IsContiguous(space, view, fortran): +def PyBuffer_IsContiguous(space, view, fort): """Return 1 if the memory defined by the view is C-style (fortran is 'C') or Fortran-style (fortran is 'F') contiguous or either one (fortran is 'A'). Return 0 otherwise.""" - # PyPy only supports contiguous Py_buffers for now. - return 1 + # traverse the strides, checking for consistent stride increases from + # right-to-left (c) or left-to-right (fortran). Copied from cpython + if not view.c_suboffsets: + return 0 + if (fort == 'C'): + return _IsCContiguous(view) + elif (fort == 'F'): + return _IsFortranContiguous(view) + elif (fort == 'A'): + return (_IsCContiguous(view) or _IsFortranContiguous(view)) + return 0 + + class CBuffer(buffer.Buffer): diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -14,45 +14,33 @@ ## Implementation of PyBytesObject ## ================================ ## -## The problem -## ----------- +## PyBytesObject has its own ob_sval buffer, so we have two copies of a string; +## one in the PyBytesObject returned from various C-API functions and another +## in the corresponding RPython object. ## -## PyBytes_AsString() must return a (non-movable) pointer to the underlying -## ob_sval, whereas pypy strings are movable. C code may temporarily store -## this address and use it, as long as it owns a reference to the PyObject. -## There is no "release" function to specify that the pointer is not needed -## any more. +## The following calls can create a PyBytesObject without a correspoinding +## RPython object: ## -## Also, the pointer may be used to fill the initial value of string. This is -## valid only when the string was just allocated, and is not used elsewhere. +## PyBytes_FromStringAndSize(NULL, n) / PyString_FromStringAndSize(NULL, n) ## -## Solution -## -------- +## In the PyBytesObject returned, the ob_sval buffer may be modified as +## long as the freshly allocated PyBytesObject is not "forced" via a call +## to any of the more sophisticated C-API functions. ## -## PyBytesObject contains two additional members: the ob_size and a pointer to a -## char ob_sval; it may be NULL. -## -## - A string allocated by pypy will be converted into a PyBytesObject with a -## NULL buffer. The first time PyBytes_AsString() is called, memory is -## allocated (with flavor='raw') and content is copied. -## -## - A string allocated with PyBytes_FromStringAndSize(NULL, size) will -## allocate a PyBytesObject structure, and a buffer with the specified -## size+1, but the reference won't be stored in the global map; there is no -## corresponding object in pypy. When from_ref() or Py_INCREF() is called, -## the pypy string is created, and added to the global map of tracked -## objects. The buffer is then supposed to be immutable. -## -##- A buffer obtained from PyBytes_AS_STRING() could be mutable iff -## there is no corresponding pypy object for the string -## -## - _PyBytes_Resize() works only on not-yet-pypy'd strings, and returns a -## similar object. -## -## - PyBytes_Size() doesn't need to force the object. +## Care has been taken in implementing the functions below, so that +## if they are called with a non-forced PyBytesObject, they will not +## unintentionally force the creation of a RPython object. As long as only these +## are used, the ob_sval buffer is still modifiable: +## +## PyBytes_AsString / PyString_AsString +## PyBytes_AS_STRING / PyString_AS_STRING +## PyBytes_AsStringAndSize / PyString_AsStringAndSize +## PyBytes_Size / PyString_Size +## PyBytes_Resize / PyString_Resize +## _PyBytes_Resize / _PyString_Resize (raises if called with a forced object) ## ## - There could be an (expensive!) check in from_ref() that the buffer still -## corresponds to the pypy gc-managed string. +## corresponds to the pypy gc-managed string, ## PyBytesObjectStruct = lltype.ForwardReference() @@ -150,9 +138,6 @@ raise oefmt(space.w_TypeError, "expected bytes, %T found", from_ref(space, ref)) ref_str = rffi.cast(PyBytesObject, ref) - if not pyobj_has_w_obj(ref): - # XXX Force the ref? - bytes_realize(space, ref) return ref_str.c_ob_sval @cpython_api([rffi.VOIDP], rffi.CCHARP, error=0) @@ -170,9 +155,6 @@ if not PyBytes_Check(space, ref): raise oefmt(space.w_TypeError, "expected bytes, %T found", from_ref(space, ref)) - if not pyobj_has_w_obj(ref): - # force the ref - bytes_realize(space, ref) ref_str = rffi.cast(PyBytesObject, ref) data[0] = ref_str.c_ob_sval if length: diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -131,7 +131,8 @@ typedef int(*objobjargproc)(PyObject *, PyObject *, PyObject *); -/* Py3k buffer interface */ +/* Py3k buffer interface, adapted for PyPy */ +#define Py_MAX_NDIMS 32 typedef struct bufferinfo { void *buf; PyObject *obj; /* owned reference */ @@ -145,12 +146,14 @@ char *format; Py_ssize_t *shape; Py_ssize_t *strides; - Py_ssize_t *suboffsets; - + Py_ssize_t *suboffsets; /* alway NULL for app-level objects*/ + unsigned char _format; + Py_ssize_t _strides[Py_MAX_NDIMS]; + Py_ssize_t _shape[Py_MAX_NDIMS]; /* static store for shape and strides of mono-dimensional buffers. */ /* Py_ssize_t smalltable[2]; */ - void *internal; + void *internal; /* always NULL for app-level objects */ } Py_buffer; diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "3.3.5" /* PyPy version as a string */ -#define PYPY_VERSION "5.4.1-alpha0" -#define PYPY_VERSION_NUM 0x05040100 +#define PYPY_VERSION "5.5.0-alpha0" +#define PYPY_VERSION_NUM 0x05050000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,7 +1,8 @@ from pypy.module.cpyext.api import (cpython_api, Py_buffer, CANNOT_FAIL, - build_type_checkers) -from pypy.module.cpyext.pyobject import PyObject -from rpython.rtyper.lltypesystem import lltype + Py_MAX_NDIMS, build_type_checkers, Py_ssize_tP) +from pypy.module.cpyext.pyobject import PyObject, make_ref, incref +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.objspace.std.memoryobject import W_MemoryView from pypy.interpreter.error import oefmt from pypy.module.cpyext.pyobject import PyObject, from_ref @@ -16,6 +17,7 @@ @cpython_api([PyObject], PyObject) def PyMemoryView_GET_BASE(space, w_obj): # return the obj field of the Py_buffer created by PyMemoryView_GET_BUFFER + # XXX needed for numpy on py3k raise NotImplementedError('PyMemoryView_GET_BUFFER') @cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL) @@ -24,24 +26,38 @@ object. The object must be a memoryview instance; this macro doesn't check its type, you must do it yourself or you will risk crashes.""" view = lltype.malloc(Py_buffer, flavor='raw', zero=True) - # TODO - fill in fields - ''' - view.c_buf = buf - view.c_len = length - view.c_obj = obj - Py_IncRef(space, obj) - view.c_itemsize = 1 - rffi.setintfield(view, 'c_readonly', readonly) - rffi.setintfield(view, 'c_ndim', 0) - view.c_format = lltype.nullptr(rffi.CCHARP.TO) - view.c_shape = lltype.nullptr(Py_ssize_tP.TO) - view.c_strides = lltype.nullptr(Py_ssize_tP.TO) + if not isinstance(w_obj, W_MemoryView): + return view + ndim = w_obj.buf.getndim() + if ndim >= Py_MAX_NDIMS: + # XXX warn? + return view + try: + view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) + view.c_obj = make_ref(space, w_obj) + rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) + isstr = False + except ValueError: + w_s = w_obj.descr_tobytes(space) + view.c_obj = make_ref(space, w_s) + rffi.setintfield(view, 'c_readonly', 1) + isstr = True + view.c_len = w_obj.getlength() + view.c_itemsize = w_obj.buf.getitemsize() + rffi.setintfield(view, 'c_ndim', ndim) + view.c__format = rffi.cast(rffi.UCHAR, w_obj.buf.getformat()) + view.c_format = rffi.cast(rffi.CCHARP, view.c__format) + view.c_shape = rffi.cast(Py_ssize_tP, view.c__shape) + view.c_strides = rffi.cast(Py_ssize_tP, view.c__strides) + shape = w_obj.buf.getshape() + strides = w_obj.buf.getstrides() + for i in range(ndim): + view.c_shape[i] = shape[i] + view.c_strides[i] = strides[i] view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO) view.c_internal = lltype.nullptr(rffi.VOIDP.TO) - ''' return view - @cpython_api([lltype.Ptr(Py_buffer)], PyObject) def PyMemoryView_FromBuffer(space, view): """Create a memoryview object wrapping the given buffer structure view. diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -335,9 +335,15 @@ def getshape(self): return self.shape + def getstrides(self): + return self.strides + def getitemsize(self): return self.itemsize + def getndim(self): + return self.ndim + def wrap_getbuffer(space, w_self, w_args, func): func_target = rffi.cast(getbufferproc, func) with lltype.scoped_alloc(Py_buffer) as pybuf: diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c --- a/pypy/module/cpyext/test/buffer_test.c +++ b/pypy/module/cpyext/test/buffer_test.c @@ -107,14 +107,11 @@ PyMyArray_getbuffer(PyObject *obj, Py_buffer *view, int flags) { PyMyArray* self = (PyMyArray*)obj; - fprintf(stdout, "in PyMyArray_getbuffer\n"); if (view == NULL) { - fprintf(stdout, "view is NULL\n"); PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer"); return -1; } if (flags == 0) { - fprintf(stdout, "flags is 0\n"); PyErr_SetString(PyExc_ValueError, "flags == 0 in getbuffer"); return -1; } @@ -188,7 +185,131 @@ (initproc)PyMyArray_init, /* tp_init */ }; +static PyObject* +test_buffer(PyObject* self, PyObject* args) +{ + Py_buffer* view = NULL; + PyObject* obj = PyTuple_GetItem(args, 0); + PyObject* memoryview = PyMemoryView_FromObject(obj); + if (memoryview == NULL) + return PyInt_FromLong(-1); + view = PyMemoryView_GET_BUFFER(memoryview); + Py_DECREF(memoryview); + return PyInt_FromLong(view->len); +} + +/* Copied from numpy tests */ +/* + * Create python string from a FLAG and or the corresponding PyBuf flag + * for the use in get_buffer_info. + */ +#define GET_PYBUF_FLAG(FLAG) \ + buf_flag = PyUnicode_FromString(#FLAG); \ + flag_matches = PyObject_RichCompareBool(buf_flag, tmp, Py_EQ); \ + Py_DECREF(buf_flag); \ + if (flag_matches == 1) { \ + Py_DECREF(tmp); \ + flags |= PyBUF_##FLAG; \ + continue; \ + } \ + else if (flag_matches == -1) { \ + Py_DECREF(tmp); \ + return NULL; \ + } + + +/* + * Get information for a buffer through PyBuf_GetBuffer with the + * corresponding flags or'ed. Note that the python caller has to + * make sure that or'ing those flags actually makes sense. + * More information should probably be returned for future tests. + */ +static PyObject * +get_buffer_info(PyObject *self, PyObject *args) +{ + PyObject *buffer_obj, *pyflags; + PyObject *tmp, *buf_flag; + Py_buffer buffer; + PyObject *shape, *strides; + Py_ssize_t i, n; + int flag_matches; + int flags = 0; + + if (!PyArg_ParseTuple(args, "OO", &buffer_obj, &pyflags)) { + return NULL; + } + + n = PySequence_Length(pyflags); + if (n < 0) { + return NULL; + } + + for (i=0; i < n; i++) { + tmp = PySequence_GetItem(pyflags, i); + if (tmp == NULL) { + return NULL; + } + + GET_PYBUF_FLAG(SIMPLE); + GET_PYBUF_FLAG(WRITABLE); + GET_PYBUF_FLAG(STRIDES); + GET_PYBUF_FLAG(ND); + GET_PYBUF_FLAG(C_CONTIGUOUS); + GET_PYBUF_FLAG(F_CONTIGUOUS); + GET_PYBUF_FLAG(ANY_CONTIGUOUS); + GET_PYBUF_FLAG(INDIRECT); + GET_PYBUF_FLAG(FORMAT); + GET_PYBUF_FLAG(STRIDED); + GET_PYBUF_FLAG(STRIDED_RO); + GET_PYBUF_FLAG(RECORDS); + GET_PYBUF_FLAG(RECORDS_RO); + GET_PYBUF_FLAG(FULL); + GET_PYBUF_FLAG(FULL_RO); + GET_PYBUF_FLAG(CONTIG); + GET_PYBUF_FLAG(CONTIG_RO); + + Py_DECREF(tmp); + + /* One of the flags must match */ + PyErr_SetString(PyExc_ValueError, "invalid flag used."); + return NULL; + } + + if (PyObject_GetBuffer(buffer_obj, &buffer, flags) < 0) { + return NULL; + } + + if (buffer.shape == NULL) { + Py_INCREF(Py_None); + shape = Py_None; + } + else { + shape = PyTuple_New(buffer.ndim); + for (i=0; i < buffer.ndim; i++) { + PyTuple_SET_ITEM(shape, i, PyLong_FromSsize_t(buffer.shape[i])); + } + } + + if (buffer.strides == NULL) { + Py_INCREF(Py_None); + strides = Py_None; + } + else { + strides = PyTuple_New(buffer.ndim); + for (i=0; i < buffer.ndim; i++) { + PyTuple_SET_ITEM(strides, i, PyLong_FromSsize_t(buffer.strides[i])); + } + } + + PyBuffer_Release(&buffer); + return Py_BuildValue("(NN)", shape, strides); +} + + + static PyMethodDef buffer_functions[] = { + {"test_buffer", (PyCFunction)test_buffer, METH_VARARGS, NULL}, + {"get_buffer_info", (PyCFunction)get_buffer_info, METH_VARARGS, NULL}, {NULL, NULL} /* Sentinel */ }; diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -179,8 +179,27 @@ Py_INCREF(Py_None); return Py_None; """), + ("c_only", "METH_NOARGS", + """ + int ret; + char * buf2; + PyObject * obj = PyBytes_FromStringAndSize(NULL, 1024); + if (!obj) + return NULL; + buf2 = PyBytes_AsString(obj); + if (!buf2) + return NULL; + /* buf should not have been forced, issue #2395 */ + ret = _PyBytes_Resize(&obj, 512); + if (ret < 0) + return NULL; + Py_DECREF(obj); + Py_INCREF(Py_None); + return Py_None; + """), ]) module.getbytes() + module.c_only() class TestBytes(BaseApiTest): diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -1,6 +1,6 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase - +from rpython.rlib.buffer import StringBuffer class TestMemoryViewObject(BaseApiTest): def test_fromobject(self, space, api): w_hello = space.newbytes("hello") @@ -11,6 +11,13 @@ w_bytes = space.call_method(w_view, "tobytes") assert space.unwrap(w_bytes) == "hello" + def test_frombuffer(self, space, api): + w_buf = space.newbuffer(StringBuffer("hello")) + w_memoryview = api.PyMemoryView_FromObject(w_buf) + w_view = api.PyMemoryView_GET_BUFFER(w_memoryview) + ndim = w_view.c_ndim + assert ndim == 1 + class AppTestPyBuffer_FillInfo(AppTestCpythonExtensionBase): def test_fillWithObject(self): module = self.import_extension('foo', [ @@ -62,6 +69,25 @@ y = memoryview(arr) assert y.format == 'i' assert y.shape == (10,) + assert len(y) == 10 s = y[3] assert len(s) == struct.calcsize('i') assert s == struct.pack('i', 3) + viewlen = module.test_buffer(arr) + assert viewlen == y.itemsize * len(y) + + def test_buffer_info(self): + from _numpypy import multiarray as np + module = self.import_module(name='buffer_test') + get_buffer_info = module.get_buffer_info + # test_export_flags from numpy test_multiarray + raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',)) + # test_relaxed_strides from numpy test_multiarray + arr = np.zeros((1, 10)) + if arr.flags.f_contiguous: + shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS']) + assert strides[0] == 8 + arr = np.ones((10, 1), order='F') + shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS']) + assert strides[-1] == 8 + diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -41,9 +41,11 @@ assert module.py_minor_version == sys.version_info.minor assert module.py_micro_version == sys.version_info.micro - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') + #@pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') def test_pypy_versions(self): import sys + if '__pypy__' not in sys.builtin_module_names: + py.test.skip("pypy only test") init = """ if (Py_IsInitialized()) { PyObject *m = Py_InitModule("foo", NULL); diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -293,6 +293,8 @@ STRUCT_TYPE = PyNumberMethods elif slot_names[0] == 'c_tp_as_sequence': STRUCT_TYPE = PySequenceMethods + elif slot_names[0] == 'c_tp_as_buffer': + STRUCT_TYPE = PyBufferProcs else: raise AssertionError( "Structure not allocated: %s" % (slot_names[0],)) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -460,6 +460,9 @@ def getdictvalue(self, space, key): return self.items[key] + def descr_memoryview(self, space, buf): + raise oefmt(space.w_TypeError, "error") + class IterDictObject(W_Root): def __init__(self, space, w_dict): self.space = space diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -377,7 +377,25 @@ def __exit__(self, typ, value, traceback): keepalive_until_here(self) - def get_buffer(self, space, readonly): + def get_buffer(self, space, flags): + errtype = space.w_ValueError # should be BufferError, numpy does this instead + if ((flags & space.BUF_C_CONTIGUOUS) == space.BUF_C_CONTIGUOUS and + not self.flags & NPY.ARRAY_C_CONTIGUOUS): + raise oefmt(errtype, "ndarray is not C-contiguous") + if ((flags & space.BUF_F_CONTIGUOUS) == space.BUF_F_CONTIGUOUS and + not self.flags & NPY.ARRAY_F_CONTIGUOUS): + raise oefmt(errtype, "ndarray is not Fortran contiguous") + if ((flags & space.BUF_ANY_CONTIGUOUS) == space.BUF_ANY_CONTIGUOUS and + not (self.flags & NPY.ARRAY_F_CONTIGUOUS and + self.flags & NPY.ARRAY_C_CONTIGUOUS)): + raise oefmt(errtype, "ndarray is not contiguous") + if ((flags & space.BUF_STRIDES) != space.BUF_STRIDES and + not self.flags & NPY.ARRAY_C_CONTIGUOUS): + raise oefmt(errtype, "ndarray is not C-contiguous") + if ((flags & space.BUF_WRITABLE) == space.BUF_WRITABLE and + not self.flags & NPY.ARRAY_WRITEABLE): + raise oefmt(errtype, "buffer source array is read-only") + readonly = not (flags & space.BUF_WRITABLE) == space.BUF_WRITABLE return ArrayBuffer(self, readonly) def astype(self, space, dtype, order, copy=True): @@ -695,6 +713,7 @@ index + self.impl.start) def setitem(self, index, v): + # XXX what if self.readonly? raw_storage_setitem(self.impl.storage, index + self.impl.start, rffi.cast(lltype.Char, v)) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -1,4 +1,5 @@ from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.baseobjspace import BufferInterfaceNotFound from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces @@ -42,7 +43,7 @@ raise oefmt(space.w_ValueError, "object __array__ method not producing an array") -def try_interface_method(space, w_object): +def try_interface_method(space, w_object, copy): try: w_interface = space.getattr(w_object, space.wrap("__array_interface__")) if w_interface is None: @@ -81,17 +82,20 @@ raise oefmt(space.w_ValueError, "__array_interface__ could not decode dtype %R", w_dtype ) - if w_data is not None and (space.isinstance_w(w_data, space.w_tuple) or space.isinstance_w(w_data, space.w_list)): + if w_data is not None and (space.isinstance_w(w_data, space.w_tuple) or + space.isinstance_w(w_data, space.w_list)): data_w = space.listview(w_data) - data = rffi.cast(RAW_STORAGE_PTR, space.int_w(data_w[0])) - read_only = True # XXX why not space.is_true(data_w[1]) + w_data = rffi.cast(RAW_STORAGE_PTR, space.int_w(data_w[0])) + read_only = space.is_true(data_w[1]) or copy offset = 0 - return W_NDimArray.from_shape_and_storage(space, shape, data, - dtype, strides=strides, start=offset), read_only + w_base = w_object + if read_only: + w_base = None + return W_NDimArray.from_shape_and_storage(space, shape, w_data, + dtype, w_base=w_base, strides=strides, + start=offset), read_only if w_data is None: - data = w_object - else: - data = w_data + w_data = w_object w_offset = space.finditem(w_interface, space.wrap('offset')) if w_offset is None: offset = 0 @@ -101,7 +105,7 @@ if strides is not None: raise oefmt(space.w_NotImplementedError, "__array_interface__ strides not fully supported yet") - arr = frombuffer(space, data, dtype, support.product(shape), offset) + arr = frombuffer(space, w_data, dtype, support.product(shape), offset) new_impl = arr.implementation.reshape(arr, shape) return W_NDimArray(new_impl), False @@ -110,6 +114,78 @@ return None, False raise +def _descriptor_from_pep3118_format(space, c_format): + descr = descriptor.decode_w_dtype(space, space.wrap(c_format)) + if descr: + return descr + msg = "invalid PEP 3118 format string: '%s'" % c_format + space.warn(space.wrap(msg), space.w_RuntimeWarning) + return None + +def _array_from_buffer_3118(space, w_object, dtype): + try: + w_buf = space.call_method(space.builtin, "memoryview", w_object) + except OperationError as e: + if e.match(space, space.w_TypeError): + # object does not have buffer interface + return w_object + raise + format = space.getattr(w_buf,space.newbytes('format')) + if format: + descr = _descriptor_from_pep3118_format(space, space.str_w(format)) + if not descr: + return w_object + if dtype and descr: + raise oefmt(space.w_NotImplementedError, + "creating an array from a memoryview while specifying dtype " + "not supported") + if descr.elsize != space.int_w(space.getattr(w_buf, space.newbytes('itemsize'))): + msg = ("Item size computed from the PEP 3118 buffer format " + "string does not match the actual item size.") + space.warn(space.wrap(msg), space.w_RuntimeWarning) + return w_object + dtype = descr + elif not dtype: + dtype = descriptor.get_dtype_cache(space).w_stringdtype + dtype.elsize = space.int_w(space.getattr(w_buf, space.newbytes('itemsize'))) + nd = space.int_w(space.getattr(w_buf, space.newbytes('ndim'))) + shape = [space.int_w(d) for d in space.listview( + space.getattr(w_buf, space.newbytes('shape')))] + strides = [] + buflen = space.len_w(w_buf) * dtype.elsize + if shape: + strides = [space.int_w(d) for d in space.listview( + space.getattr(w_buf, space.newbytes('strides')))] + if not strides: + d = buflen + strides = [0] * nd + for k in range(nd): + if shape[k] > 0: + d /= shape[k] + strides[k] = d + else: + if nd == 1: + shape = [buflen / dtype.elsize, ] + strides = [dtype.elsize, ] + elif nd > 1: + msg = ("ndim computed from the PEP 3118 buffer format " + "is greater than 1, but shape is NULL.") + space.warn(space.wrap(msg), space.w_RuntimeWarning) + return w_object + try: + w_data = rffi.cast(RAW_STORAGE_PTR, space.int_w(space.call_method(w_buf, '_pypy_raw_address'))) + except OperationError as e: + if e.match(space, space.w_ValueError): + return w_object + else: + raise e + writable = not space.bool_w(space.getattr(w_buf, space.newbytes('readonly'))) + w_ret = W_NDimArray.from_shape_and_storage(space, shape, w_data, + storage_bytes=buflen, dtype=dtype, w_base=w_object, + writable=writable, strides=strides) + if w_ret: + return w_ret + return w_object @unwrap_spec(ndmin=int, copy=bool, subok=bool) def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, @@ -127,6 +203,7 @@ def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): + from pypy.module.micronumpy.boxes import W_GenericBox # numpy testing calls array(type(array([]))) and expects a ValueError if space.isinstance_w(w_object, space.w_type): raise oefmt(space.w_ValueError, "cannot create ndarray from type instance") @@ -134,13 +211,19 @@ dtype = descriptor.decode_w_dtype(space, w_dtype) if not isinstance(w_object, W_NDimArray): w_array = try_array_method(space, w_object, w_dtype) - if w_array is not None: + if w_array is None: + if ( not space.isinstance_w(w_object, space.w_str) and + not space.isinstance_w(w_object, space.w_unicode) and + not isinstance(w_object, W_GenericBox)): + # use buffer interface + w_object = _array_from_buffer_3118(space, w_object, dtype) + else: # continue with w_array, but do further operations in place w_object = w_array copy = False dtype = w_object.get_dtype() if not isinstance(w_object, W_NDimArray): - w_array, _copy = try_interface_method(space, w_object) + w_array, _copy = try_interface_method(space, w_object, copy) if w_array is not None: w_object = w_array copy = _copy diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -806,10 +806,10 @@ def buffer_w(self, space, flags): # XXX format isn't always 'B' probably - return self.implementation.get_buffer(space, True) + return self.implementation.get_buffer(space, flags) def descr_get_data(self, space): - return space.newbuffer(self.implementation.get_buffer(space, False)) + return space.newbuffer(self.implementation.get_buffer(space, space.BUF_FULL)) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_diagonal(self, space, offset=0, axis1=0, axis2=1): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3206,7 +3206,9 @@ raises(TypeError, array, Dummy({'version': 3, 'typestr': 'f8', 'shape': ('a', 3)})) a = array([1, 2, 3]) - b = array(Dummy(a.__array_interface__)) + d = Dummy(a.__array_interface__) + b = array(d) + assert b.base is None b[1] = 200 assert a[1] == 2 # upstream compatibility, is this a bug? interface_a = a.__array_interface__ @@ -3217,6 +3219,8 @@ interface_b.pop('data') interface_a.pop('data') assert interface_a == interface_b + b = array(d, copy=False) + assert b.base is d b = array(Dummy({'version':3, 'shape': (50,), 'typestr': 'u1', 'data': 'a'*100})) @@ -3585,6 +3589,7 @@ cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2)) cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4)) cls.w_ulongval = cls.space.wrap(struct.pack('L', 12)) + cls.w_one = cls.space.wrap(struct.pack('i', 1)) def test_frombuffer(self): import numpy as np @@ -3636,8 +3641,6 @@ else: EMPTY = None x = np.array([1, 2, 3, 4, 5], dtype='i') - y = memoryview('abc') - assert y.format == 'B' y = memoryview(x) assert y.format == 'i' assert y.shape == (5,) @@ -3645,6 +3648,16 @@ assert y.strides == (4,) assert y.suboffsets == EMPTY assert y.itemsize == 4 + assert isinstance(y, memoryview) + assert y[0] == self.one + assert (np.array(y) == x).all() + + x = np.array([0, 0, 0, 0], dtype='O') + y = memoryview(x) + # handles conversion of address to pinned object? + z = np.array(y) + assert z.dtype == 'O' + assert (z == x).all() def test_fromstring(self): import sys diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -702,3 +702,32 @@ ret = obj.sum() print type(ret) assert ret.info == 'spam' + + def test_ndarray_subclass_assigns_base(self): + import numpy as np + init_called = [] + class _DummyArray(object): + """ Dummy object that just exists to hang __array_interface__ dictionaries + and possibly keep alive a reference to a base array. + """ + def __init__(self, interface, base=None): + self.__array_interface__ = interface + init_called.append(1) + self.base = base + + x = np.zeros(10) + d = _DummyArray(x.__array_interface__, base=x) + y = np.array(d, copy=False) + assert sum(init_called) == 1 + assert y.base is d + + x = np.zeros((0,), dtype='float32') + intf = x.__array_interface__.copy() + intf["strides"] = x.strides + x.__array_interface__["strides"] = x.strides + d = _DummyArray(x.__array_interface__, base=x) + y = np.array(d, copy=False) + assert sum(init_called) == 2 + assert y.base is d + + diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1851,7 +1851,7 @@ arr.gcstruct) def read(self, arr, i, offset, dtype): - if arr.gcstruct is V_OBJECTSTORE: + if arr.gcstruct is V_OBJECTSTORE and not arr.base(): raise oefmt(self.space.w_NotImplementedError, "cannot read object from array with no gc hook") return self.box(self._read(arr.storage, i, offset)) diff --git a/pypy/module/pypyjit/test_pypy_c/test_import.py b/pypy/module/pypyjit/test_pypy_c/test_import.py --- a/pypy/module/pypyjit/test_pypy_c/test_import.py +++ b/pypy/module/pypyjit/test_pypy_c/test_import.py @@ -38,3 +38,27 @@ # call_may_force(absolute_import_with_lock). for opname in log.opnames(loop.allops(opcode="IMPORT_NAME")): assert 'call' not in opname # no call-like opcode + + def test_import_fast_path(self, tmpdir): + print tmpdir + pkg = tmpdir.join('mypkg').ensure(dir=True) + subdir = pkg.join("sub").ensure(dir=True) + pkg.join('__init__.py').write("") + subdir.join('__init__.py').write("") + subdir.join('mod.py').write(str(py.code.Source(""" + def do_the_import(): + import sys + """))) + def main(path, n): + def do_the_import(): + from mypkg.sub import mod + import sys + sys.path.append(path) + for i in range(n): + do_the_import() + # + log = self.run(main, [str(tmpdir), 300]) + loop, = log.loops_by_filename(self.filepath) + # check that no string compares and other calls are there + for opname in log.opnames(loop.allops(opcode="IMPORT_NAME")): + assert 'call' not in opname # no call-like opcode diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (5, 4, 1, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 5, 0, "alpha", 0) #XXX # sync patchlevel.h import pypy diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1415,6 +1415,7 @@ assert p.b == 12 assert p.c == 14 assert p.d == 14 + py.test.raises(ValueError, ffi.new, "struct foo_s *", [0, 0, 0, 0]) def test_nested_field_offset_align(self): ffi = FFI(backend=self.Backend()) @@ -1454,14 +1455,42 @@ assert p.b == 0 assert p.c == 14 assert p.d == 14 - p = ffi.new("union foo_u *", {'b': 12}) - assert p.a == 0 + p = ffi.new("union foo_u *", {'a': -63, 'b': 12}) + assert p.a == -63 assert p.b == 12 - assert p.c == 0 - assert p.d == 0 - # we cannot specify several items in the dict, even though - # in theory in this particular case it would make sense - # to give both 'a' and 'b' + assert p.c == -63 + assert p.d == -63 + p = ffi.new("union foo_u *", [123, 456]) + assert p.a == 123 + assert p.b == 456 + assert p.c == 123 + assert p.d == 123 + py.test.raises(ValueError, ffi.new, "union foo_u *", [0, 0, 0]) + + def test_nested_anonymous_struct_2(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + struct foo_s { + int a; + union { int b; union { int c, d; }; }; + int e; + }; + """) + assert ffi.sizeof("struct foo_s") == 3 * SIZE_OF_INT + p = ffi.new("struct foo_s *", [11, 22, 33]) + assert p.a == 11 + assert p.b == p.c == p.d == 22 + assert p.e == 33 + py.test.raises(ValueError, ffi.new, "struct foo_s *", [11, 22, 33, 44]) + FOO = ffi.typeof("struct foo_s") + fields = [(name, fld.offset, fld.flags) for (name, fld) in FOO.fields] + assert fields == [ + ('a', 0 * SIZE_OF_INT, 0), + ('b', 1 * SIZE_OF_INT, 0), + ('c', 1 * SIZE_OF_INT, 1), + ('d', 1 * SIZE_OF_INT, 1), + ('e', 2 * SIZE_OF_INT, 0), + ] def test_cast_to_array_type(self): ffi = FFI(backend=self.Backend()) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py @@ -35,6 +35,9 @@ def test_nested_anonymous_union(self): py.test.skip("ctypes backend: not supported: nested anonymous union") + def test_nested_anonymous_struct_2(self): + py.test.skip("ctypes backend: not supported: nested anonymous union") + def test_CData_CType_2(self): if sys.version_info >= (3,): py.test.skip("ctypes backend: not supported in Python 3: CType") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py @@ -154,7 +154,10 @@ from cffi.setuptools_ext import _set_py_limited_api try: import setuptools - orig_version = setuptools.__version__ + except ImportError as e: + py.test.skip(str(e)) + orig_version = setuptools.__version__ + try: setuptools.__version__ = '26.0.0' from setuptools import Extension diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -64,6 +64,8 @@ def setitem_str(self, w_dict, key, w_value): cell = self.getdictvalue_no_unwrapping(w_dict, key) + #if (key == '__package__' or key == "__path__") and cell is not None and w_value is not cell: + # print "WARNING", key, w_value, cell, self return self._setitem_str_cell_known(cell, w_dict, key, w_value) def _setitem_str_cell_known(self, cell, w_dict, key, w_value): diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -26,6 +26,7 @@ """Implement the built-in 'memoryview' type as a wrapper around an interp-level buffer. """ + _attrs_ = ['buf'] def __init__(self, buf, format=None, itemsize=1, ndim=-1, shape=None, strides=None, suboffsets=None): diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,7 +1,7 @@ # Edit these appropriately before running this script maj=5 min=4 -rev=0 +rev=1 branchname=release-$maj.x # ==OR== release-$maj.$min.x tagname=release-pypy2.7-v$maj.$min.$rev # ==OR== release-$maj.$min diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -97,6 +97,21 @@ OPENSSL_VERSION_NUMBER = cconfig["OPENSSL_VERSION_NUMBER"] HAVE_TLSv1_2 = OPENSSL_VERSION_NUMBER >= 0x10001000 +if OPENSSL_VERSION_NUMBER >= 0x10100000: + eci.pre_include_bits = () + eci.post_include_bits = () + raise Exception("""OpenSSL version >= 1.1 not supported yet. + + This program requires OpenSSL version 1.0.x, and may also + work with LibreSSL or OpenSSL 0.9.x. OpenSSL 1.1 is quite + some work to update to; contributions are welcome. Sorry, + you need to install an older version of OpenSSL for now. + Make sure this older version is the one picked up by this + program when it runs the compiler. + + This is the configuration used: %r""" % (eci,)) + + class CConfig: _compilation_info_ = eci diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -1,5 +1,5 @@ import sys -from rpython.rlib.objectmodel import specialize, we_are_translated +from rpython.rlib.objectmodel import specialize, we_are_translated, enforceargs from rpython.rlib.rstring import StringBuilder, UnicodeBuilder from rpython.rlib.rarithmetic import r_uint, intmask, widen from rpython.rlib.unicodedata import unicodedb @@ -145,19 +145,21 @@ _invalid_byte_3_of_4 = _invalid_cont_byte _invalid_byte_4_of_4 = _invalid_cont_byte - at specialize.arg(2) + at enforceargs(allow_surrogates=bool) def _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates): return (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xe0 and ordch2 < 0xa0) # surrogates shouldn't be valid UTF-8! - or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f)) + or (ordch1 == 0xed and ordch2 > 0x9f and not allow_surrogates)) def _invalid_byte_2_of_4(ordch1, ordch2): return (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)) - at specialize.arg(5) +# note: this specialize() is here for rtyper/rstr.py, which calls this +# function too but with its own fixed errorhandler + at specialize.arg_or_var(4) def str_decode_utf_8_impl(s, size, errors, final, errorhandler, allow_surrogates, result): if size == 0: @@ -330,6 +332,9 @@ return unicode_encode_utf_8_impl(s, size, errors, errorhandler, allow_surrogates=allow_surrogates) +# note: this specialize() is here for rtyper/rstr.py, which calls this +# function too but with its own fixed errorhandler + at specialize.arg_or_var(3) def unicode_encode_utf_8_impl(s, size, errors, errorhandler, allow_surrogates=False): assert(size >= 0) diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -55,7 +55,7 @@ s = s.encode(encoding) except LookupError as e: py.test.skip(e) - result, consumed = decoder(s, len(s), True) + result, consumed = decoder(s, len(s), 'strict', final=True) assert consumed == len(s) self.typeequals(trueresult, result) @@ -69,7 +69,7 @@ s = s.decode(encoding) except LookupError as e: py.test.skip(e) - result = encoder(s, len(s), True) + result = encoder(s, len(s), 'strict') self.typeequals(trueresult, result) def checkencodeerror(self, s, encoding, start, stop): @@ -823,9 +823,15 @@ def f(x): s1 = "".join(["\xd7\x90\xd6\x96\xeb\x96\x95\xf0\x90\x91\x93"] * x) - u, consumed = runicode.str_decode_utf_8(s1, len(s1), True) - s2 = runicode.unicode_encode_utf_8(u, len(u), True) - return s1 == s2 + u, consumed = runicode.str_decode_utf_8(s1, len(s1), 'strict', + allow_surrogates=True) + s2 = runicode.unicode_encode_utf_8(u, len(u), 'strict', + allow_surrogates=True) + u3, consumed3 = runicode.str_decode_utf_8(s1, len(s1), 'strict', + allow_surrogates=False) + s3 = runicode.unicode_encode_utf_8(u3, len(u3), 'strict', + allow_surrogates=False) + return s1 == s2 == s3 res = interpret(f, [2]) assert res diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -30,12 +30,13 @@ assert value is not None result = UnicodeBuilder(len(value)) self.rstr_decode_utf_8( - value, len(value), 'strict', final=False, + value, len(value), 'strict', final=True, errorhandler=self.ll_raise_unicode_exception_decode, allow_surrogates=False, result=result) return self.ll.llunicode(result.build()) - def ll_raise_unicode_exception_decode(self, errors, encoding, msg, s, + @staticmethod + def ll_raise_unicode_exception_decode(errors, encoding, msg, s, startingpos, endingpos): raise UnicodeDecodeError(encoding, s, startingpos, endingpos, msg) @@ -411,7 +412,8 @@ allow_surrogates=False) return self.ll.llstr(bytes) - def ll_raise_unicode_exception_encode(self, errors, encoding, msg, u, + @staticmethod + def ll_raise_unicode_exception_encode(errors, encoding, msg, u, startingpos, endingpos): raise UnicodeEncodeError(encoding, u, startingpos, endingpos, msg) diff --git a/rpython/rtyper/test/test_runicode.py b/rpython/rtyper/test/test_runicode.py --- a/rpython/rtyper/test/test_runicode.py +++ b/rpython/rtyper/test/test_runicode.py @@ -162,6 +162,18 @@ assert self.ll_to_string(self.interpret(f, [0])) == f(0) + def test_unicode_decode_final(self): + strings = ['\xc3', ''] + def f(n): + try: + strings[n].decode('utf-8') + except UnicodeDecodeError: + return True + return False + + assert f(0) + assert self.interpret(f, [0]) + def test_utf_8_decoding_annotation(self): from rpython.rlib.runicode import str_decode_utf_8 def errorhandler(errors, encoding, msg, s, From pypy.commits at gmail.com Wed Sep 7 03:00:01 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Sep 2016 00:00:01 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <57cfbaf1.c398c20a.9932f.bb14@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r787:e957cbe8c298 Date: 2016-09-07 08:59 +0200 http://bitbucket.org/pypy/pypy.org/changeset/e957cbe8c298/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $64959 of $105000 (61.9%) + $65097 of $105000 (62.0%)
    @@ -23,7 +23,7 @@
  • Read proposal
  • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $58959 of $80000 (73.7%) + $58969 of $80000 (73.7%)
    @@ -25,7 +25,7 @@
  • From pypy.commits at gmail.com Wed Sep 7 03:25:40 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 07 Sep 2016 00:25:40 -0700 (PDT) Subject: [pypy-commit] pypy default: fix formatting Message-ID: <57cfc0f4.121a1c0a.c5d01.d3e4@mx.google.com> Author: Matti Picus Branch: Changeset: r86917:053c990217cb Date: 2016-09-07 10:21 +0300 http://bitbucket.org/pypy/pypy/changeset/053c990217cb/ Log: fix formatting diff --git a/pypy/doc/release-pypy2.7-v5.4.1.rst b/pypy/doc/release-pypy2.7-v5.4.1.rst --- a/pypy/doc/release-pypy2.7-v5.4.1.rst +++ b/pypy/doc/release-pypy2.7-v5.4.1.rst @@ -9,16 +9,16 @@ this was unfortunately left out of 5.4.0. My apologies to the new contributors - * Allow tests run with `-A` to find `libm.so` even if it is a script not a + * Allow tests run with ``-A`` to find ``libm.so`` even if it is a script not a dynamically loadable file - * Bump `sys.setrecursionlimit()` when translating PyPy, for translating with CPython + * Bump ``sys.setrecursionlimit()`` when translating PyPy, for translating with CPython - * Tweak a float comparison with 0 in `backendopt.inline` to avoid rounding errors + * Tweak a float comparison with 0 in ``backendopt.inline`` to avoid rounding errors * Fix for an issue where os.access() accepted a float for mode - * Fix for and issue where `unicode.decode('utf8', 'custom_replace')` messed up + * Fix for and issue where ``unicode.decode('utf8', 'custom_replace')`` messed up the last byte of a unicode string sometimes * Update built-in cffi_ to version 1.8.1 From pypy.commits at gmail.com Wed Sep 7 03:28:41 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 07 Sep 2016 00:28:41 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update for 5.4.1 Message-ID: <57cfc1a9.88711c0a.4e105.d5d9@mx.google.com> Author: Matti Picus Branch: extradoc Changeset: r788:34944c992b22 Date: 2016-09-07 10:26 +0300 http://bitbucket.org/pypy/pypy.org/changeset/34944c992b22/ Log: update for 5.4.1 diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -74,7 +74,8 @@ performance improvements.

    We provide binaries for x86, ARM, and PPC Linux, Mac OS/X and Windows for:

    @@ -114,22 +115,22 @@
  • or translate your own PyPy.
  • -
    -

    Python2.7 compatible PyPy 5.4.0

    +
    +

    Python2.7 compatible PyPy 5.4.1

    @@ -198,7 +199,7 @@ uncompressed, they run in-place. For now you can uncompress them either somewhere in your home directory or, say, in /opt, and if you want, put a symlink from somewhere like -/usr/local/bin/pypy to /path/to/pypy2-5.4.0/bin/pypy. Do +/usr/local/bin/pypy to /path/to/pypy2-5.4.1/bin/pypy. Do not move or copy the executable pypy outside the tree – put a symlink to it, otherwise it will not find its libraries.

    @@ -263,7 +264,7 @@
  • Get the source code. The following packages contain the source at the same revision as the above binaries:

    Or you can checkout the current trunk using Mercurial (the trunk usually works and is of course more up-to-date):

    @@ -403,20 +404,20 @@

    Checksums

    Here are the checksums for each of the downloads

    -

    pypy2.7-v5.4.0 md5:

    +

    pypy2.7-v5.4.1 md5:

    -50ea504e66f4d9297f5228d7a3b026ec  pypy2-v5.4.0-linux-armel.tar.bz2
    -e838ba554bc53c793f23c378a898fa0f  pypy2-v5.4.0-linux-armhf-raring.tar.bz2
    -b1b9b755631ef85d400d7690ece50210  pypy2-v5.4.0-linux-armhf-raspbian.tar.bz2
    -df7180d5070ac19a234fc6c39b88f420  pypy2-v5.4.0-linux32.tar.bz2
    -5e228ba05b6eaa0af37321fd3f425891  pypy2-v5.4.0-linux64.tar.bz2
    -b32d4c97275901665945f1f2813b6f26  pypy2-v5.4.0-osx64.tar.bz2
    -1d32ef8036a9fe718f397813bd070be8  pypy2-v5.4.0-ppc64.tar.bz2
    -d8abb09416b4370ea40c51a710d12b18  pypy2-v5.4.0-ppc64le.tar.bz2
    -b560c2811a3089f22b21db9beea7f273  pypy2-v5.4.0-s390x.tar.bz2
    -c806bea7ecbb999fffeea3a06e6462e8  pypy2-v5.4.0-src.tar.bz2
    -26c2ab1c891651eb620dbde499088c1f  pypy2-v5.4.0-src.zip
    -bd25b15c0d6c0f7c7f6fa75f1da35014  pypy2-v5.4.0-win32.zip
    +425ffedf0db4dd737d450aa064ae0e7a  pypy2-v5.4.1-linux-armel.tar.bz2
    +15f41409cbadbde3ef22ee60ded8579a  pypy2-v5.4.1-linux-armhf-raring.tar.bz2
    +5940ea0c1077e3bc2ba461bf55800abb  pypy2-v5.4.1-linux-armhf-raspbian.tar.bz2
    +23cbad540abff48ea67f5b75368344be  pypy2-v5.4.1-linux32.tar.bz2
    +2f4a82ae306f3d30d486fb945ca9c6be  pypy2-v5.4.1-linux64.tar.bz2
    +a18cbd990c97f75a08235a35e546a637  pypy2-v5.4.1-osx64.tar.bz2
    +90b0650dda1b5bf33f6119f20ba9c3b6  pypy2-v5.4.1-ppc64.tar.bz2
    +9a2c1b3124151b79e68f36eb2d4891d1  pypy2-v5.4.1-ppc64le.tar.bz2
    +826d4b48e43c84a50dc7e2adc2cb69d4  pypy2-v5.4.1-s390x.tar.bz2
    +d1d197d16331aa23a7fd4b5d4c3c1717  pypy2-v5.4.1-src.tar.bz2
    +1aab9fe6e7c03e959cde466819034bab  pypy2-v5.4.1-src.zip
    +b04aad943aac92862a73b1fd90157a00  pypy2-v5.4.1-win32.zip
     

    pypy3.3-v5.2-alpha md5:

    @@ -435,20 +436,20 @@
     2c9f0054f3b93a6473f10be35277825a  pypy-1.8-sandbox-linux64.tar.bz2
     009c970b5fa75754ae4c32a5d108a8d4  pypy-1.8-sandbox-linux.tar.bz2
     
    -

    pypy2.7-5.4.0 sha1:

    +

    pypy2.7-5.4.1 sha1:

    -c50062a83e4bb9fc59b76901c92e7bf1ecd0351f  pypy2-v5.4.0-linux-armel.tar.bz2
    -f4ebad7a9a31dfa55b35cc01b0533ef8e31ab7c4  pypy2-v5.4.0-linux-armhf-raring.tar.bz2
    -c0becdcb7f44e09947afab9df759313ec94563ef  pypy2-v5.4.0-linux-armhf-raspbian.tar.bz2
    -63be7254bdecd4f3272bcc47f0da7f5db82435a0  pypy2-v5.4.0-linux32.tar.bz2
    -b0e0405ca8f3b143e16122767eb5605d3388af0c  pypy2-v5.4.0-linux64.tar.bz2
    -9c97f54d492886fcaae8611733bcc40a625c8245  pypy2-v5.4.0-osx64.tar.bz2
    -4a263167bbc89447e5adc2ed687ed44798bbca08  pypy2-v5.4.0-ppc64.tar.bz2
    -b3554db74a826fd8e86f1132e9c2cb2e49caac1c  pypy2-v5.4.0-ppc64le.tar.bz2
    -165920a2d0eeda83e8808e7fce93f2a9db7f736a  pypy2-v5.4.0-s390x.tar.bz2
    -95163f8f3c8e9e52e126fc1807d8d94e3d224aec  pypy2-v5.4.0-src.tar.bz2
    -b26546821836cb4bfda0160d37d4dd31fd3aace8  pypy2-v5.4.0-src.zip
    -5ec0ca235cc68b557770b8cf5e1e49bd7b1a0aad  pypy2-v5.4.0-win32.zip
    +a54b2a8b6def85663b10fd956d51fbd052954b83  pypy2-v5.4.1-linux-armel.tar.bz2
    +61c9a5269d6d414c4a1d7c41bbc6a45da318f138  pypy2-v5.4.1-linux-armhf-raring.tar.bz2
    +ea48ffe40887e25adcf1969a6b0e25dbe42a2457  pypy2-v5.4.1-linux-armhf-raspbian.tar.bz2
    +bf0ac4668323abead04dd0948a2e541a158e4a00  pypy2-v5.4.1-linux32.tar.bz2
    +d16601b04987381c4922a19dacb0ca2591a3b566  pypy2-v5.4.1-linux64.tar.bz2
    +4084b7db8ee16a52e7ecb89d10765d961864c67c  pypy2-v5.4.1-osx64.tar.bz2
    +98eac6f32f4fe8fce6eb0f0594163f3e01dccced  pypy2-v5.4.1-ppc64.tar.bz2
    +fc676af00b356ae48602b531675774f2badfc4fd  pypy2-v5.4.1-ppc64le.tar.bz2
    +70b736f1fdb92ae9916630b7cc8954ed92490f64  pypy2-v5.4.1-s390x.tar.bz2
    +1143948f50b1b95b55465b246639b4c48251e38e  pypy2-v5.4.1-src.tar.bz2
    +a6a9ce0defb401d8777d8af5949d23393416a390  pypy2-v5.4.1-src.zip
    +49c2ad75f1531730f7ee466d833d318333915ce0  pypy2-v5.4.1-win32.zip
     

    pypy3.3-v5.2-alpha sha1:

    @@ -462,20 +463,20 @@
     4b31ab492716ea375dd090bbacdf3d7c2d483059  pypy3.3-v5.2.0-alpha1-src.tar.bz2
     d9f5b64f144ebec1a200156809fbbe04fdf7eb7e  pypy3.3-v5.2.0-alpha1-src.zip
     
    -

    pypy2.7-5.4.0 sha256:

    +

    pypy2.7-5.4.1 sha256:

    -04509044f21bb41ee6d3fafcf637fc0c586c248d4cdae6ac3357606a7b660fdb  pypy2-v5.4.0-linux-armel.tar.bz2
    -95c690bcae6771ebce6cf06c7c2842e0662e007e35162afc963337aa597b471a  pypy2-v5.4.0-linux-armhf-raring.tar.bz2
    -839b08db89b7e20cb670b8cf02596e033ea0b76fb8336af7bedfbb04b6b502da  pypy2-v5.4.0-linux-armhf-raspbian.tar.bz2
    -ce581270464b14cdecd13dedb9bd7bf98232f767ac4ac282229a405d8e807af1  pypy2-v5.4.0-linux32.tar.bz2
    -bdfea513d59dcd580970cb6f79f3a250d00191fd46b68133d5327e924ca845f8  pypy2-v5.4.0-linux64.tar.bz2
    -3adf21c2bf3432759c99123f21240d71a72aba81d73129e48ef912c34631b723  pypy2-v5.4.0-osx64.tar.bz2
    -dc09a057264dafb7e4bceca57b6a6ba3b0a5273e125a9b29da32b8439f980270  pypy2-v5.4.0-ppc64.tar.bz2
    -4feb0711e7c235b247f8ea0b22e8a676f89e8831488b7a4e9c7f3a6943d07052  pypy2-v5.4.0-ppc64le.tar.bz2
    -6bceb2760b1c7d6105d20207102862160ddddfd9b1a2707b3a8d866ac29e08d3  pypy2-v5.4.0-s390x.tar.bz2
    -d9568ebe9a14d0eaefde887d78f3cba63d665e95c0d234bb583932341f55a655  pypy2-v5.4.0-src.tar.bz2
    -3c165676be8df3b482727438836a9a240ea641392ddd60593f825e1d50029022  pypy2-v5.4.0-src.zip
    -442c0a917781b6155bf78d2648f1ccd9a36c321926a043f83efcea22a99960b4  pypy2-v5.4.0-win32.zip
    +a1eb5f672aae62606176305e52a51b060ba974b6181ebefcd2c555ecf5f8614f  pypy2-v5.4.1-linux-armel.tar.bz2
    +2c4befc4517adec874155a8b6fa0b9d18388943d4ffe778002072db7783e417a  pypy2-v5.4.1-linux-armhf-raring.tar.bz2
    +b38646519ee1a888c68f8f4713c122867b4b36693c8acabb38eb827a9d2d51f9  pypy2-v5.4.1-linux-armhf-raspbian.tar.bz2
    +6d1e2386ec1e05dffed493aa2d5e6db5cf5de18d7350d44b85f2e45aa5c9a774  pypy2-v5.4.1-linux32.tar.bz2
    +9c85319778224d7fb0c348f55fe3fada15bb579c5f3870a13ad63b42a737dd72  pypy2-v5.4.1-linux64.tar.bz2
    +ae9329c8f0a6df431c6224c27c634f998688ac803e8d100cee9a774e6bba38b5  pypy2-v5.4.1-osx64.tar.bz2
    +ff118f2dc2393c821595fa7872e4d7bdae2a9a16a854759e103608df383d765a  pypy2-v5.4.1-ppc64.tar.bz2
    +684e862cdb281a22c95d73012f7d2693be9fd9cdd171784256da1514ae1c8164  pypy2-v5.4.1-ppc64le.tar.bz2
    +4004c86bf2dfdd1d92120f514d296af7602e0c5e2c6526a14ff8b5508c0fe8f7  pypy2-v5.4.1-s390x.tar.bz2
    +45dbc50c81498f6f1067201b8fc887074b43b84ee32cc47f15e7db17571e9352  pypy2-v5.4.1-src.tar.bz2
    +54b23c11a92dd6328a58787c3d719189f0aa24c872b479acf50094dfb4be0a46  pypy2-v5.4.1-src.zip
    +ec729218a820bc2aa2cf1fcacf9de0fee9e04144fe138596198a6b4615505e03  pypy2-v5.4.1-win32.zip
     

    pypy3.3-v5.2-alpha sha256:

    diff --git a/source/download.txt b/source/download.txt
    --- a/source/download.txt
    +++ b/source/download.txt
    @@ -14,12 +14,14 @@
     
     We provide binaries for x86, ARM, and PPC Linux, Mac OS/X and Windows for:
     
    -* the Python2.7 compatible release — **PyPy2.7 v5.4.0** — (`what's new in PyPy2.7?`_ )
    +* the Python2.7 compatible release — **PyPy2.7 v5.4.1** — (`what's new in PyPy2.7?`_
    +  and `bugfix release`_)
     * the Python3.3 compatible release — **PyPy3.3 v5.2-alpha** — (`what's new in PyPy3.3?`_).
     
     * the Python2.7 Software Transactional Memory special release — **PyPy-STM 2.5.1** (Linux x86-64 only)
     
     .. _what's new in PyPy2.7?: http://doc.pypy.org/en/latest/release-pypy2.7-v5.4.0.html
    +.. _bugfix release: http://doc.pypy.org/en/latest/release-pypy2.7-v5.4.1.html
     .. _what's new in PyPy3.3?: http://doc.pypy.org/en/latest/release-pypy3.3-v5.2-alpha1.html
     
     
    @@ -75,7 +77,7 @@
     .. _`portable Linux binaries`: https://github.com/squeaky-pl/portable-pypy#portable-pypy-distribution-for-linux
     
     
    -Python2.7 compatible PyPy 5.4.0
    +Python2.7 compatible PyPy 5.4.1
     -----------------------------------
     
     * `Linux x86 binary (32bit, tar.bz2 built on Ubuntu 12.04 - 14.04)`__ (see ``[1]`` below)
    @@ -94,18 +96,18 @@
     * `All our downloads,`__ including previous versions.  We also have a
       mirror_, but please use only if you have troubles accessing the links above
     
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.0-linux32.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.0-linux64.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.0-linux-armhf-raspbian.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.0-linux-armhf-raring.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.0-linux-armel.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.0-osx64.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.0-win32.zip
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.0-ppc64.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.0-ppc64le.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.0-s390x.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.0-src.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.0-src.zip
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.1-linux32.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.1-linux64.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.1-linux-armhf-raspbian.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.1-linux-armhf-raring.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.1-linux-armel.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.1-osx64.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.1-win32.zip
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.1-ppc64.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.1-ppc64le.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.1-s390x.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.1-src.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.1-src.zip
     .. _`vcredist_x86.exe`: http://www.microsoft.com/en-us/download/details.aspx?id=5582
     .. __: https://bitbucket.org/pypy/pypy/downloads
     .. _mirror: http://buildbot.pypy.org/mirror/
    @@ -205,7 +207,7 @@
     uncompressed, they run in-place.  For now you can uncompress them
     either somewhere in your home directory or, say, in ``/opt``, and
     if you want, put a symlink from somewhere like
    -``/usr/local/bin/pypy`` to ``/path/to/pypy2-5.4.0/bin/pypy``.  Do
    +``/usr/local/bin/pypy`` to ``/path/to/pypy2-5.4.1/bin/pypy``.  Do
     not move or copy the executable ``pypy`` outside the tree --- put
     a symlink to it, otherwise it will not find its libraries.
     
    @@ -291,9 +293,9 @@
     1. Get the source code.  The following packages contain the source at
        the same revision as the above binaries:
     
    -   * `pypy2-v5.4.0-src.tar.bz2`__ (sources)
    +   * `pypy2-v5.4.1-src.tar.bz2`__ (sources)
     
    -   .. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.0-src.tar.bz2
    +   .. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.4.1-src.tar.bz2
     
        Or you can checkout the current trunk using Mercurial_ (the trunk
        usually works and is of course more up-to-date)::
    
    From pypy.commits at gmail.com  Wed Sep  7 03:33:25 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 00:33:25 -0700 (PDT)
    Subject: [pypy-commit] pypy default: os.access() and float arguments is a
     translation crash with sandboxing
    Message-ID: <57cfc2c5.04e21c0a.5fe1.d1f0@mx.google.com>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r86918:2f7389e85d58
    Date: 2016-09-07 09:32 +0200
    http://bitbucket.org/pypy/pypy/changeset/2f7389e85d58/
    
    Log:	os.access() and float arguments is a translation crash with
    	sandboxing
    
    diff --git a/pypy/doc/release-pypy2.7-v5.4.1.rst b/pypy/doc/release-pypy2.7-v5.4.1.rst
    --- a/pypy/doc/release-pypy2.7-v5.4.1.rst
    +++ b/pypy/doc/release-pypy2.7-v5.4.1.rst
    @@ -16,7 +16,7 @@
     
       * Tweak a float comparison with 0 in ``backendopt.inline`` to avoid rounding errors
     
    -  * Fix for an issue where os.access() accepted a float for mode
    +  * Fix for an issue for translating the sandbox
     
       * Fix for and issue where ``unicode.decode('utf8', 'custom_replace')`` messed up
         the last byte of a unicode string sometimes
    
    From pypy.commits at gmail.com  Wed Sep  7 04:01:09 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 01:01:09 -0700 (PDT)
    Subject: [pypy-commit] pypy boehm-rawrefcount: A branch to implement
     rawrefcount for Boehm (needed by revdb)
    Message-ID: <57cfc945.c19d1c0a.89e5a.da1f@mx.google.com>
    
    Author: Armin Rigo 
    Branch: boehm-rawrefcount
    Changeset: r86919:97b95319d48d
    Date: 2016-09-07 10:00 +0200
    http://bitbucket.org/pypy/pypy/changeset/97b95319d48d/
    
    Log:	A branch to implement rawrefcount for Boehm (needed by revdb)
    
    
    From pypy.commits at gmail.com  Wed Sep  7 05:00:48 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 02:00:48 -0700 (PDT)
    Subject: [pypy-commit] pypy boehm-rawrefcount: Add missing operation
    Message-ID: <57cfd740.4177c20a.ff41e.3f1a@mx.google.com>
    
    Author: Armin Rigo 
    Branch: boehm-rawrefcount
    Changeset: r86920:8910c2dbe6a0
    Date: 2016-09-07 10:01 +0200
    http://bitbucket.org/pypy/pypy/changeset/8910c2dbe6a0/
    
    Log:	Add missing operation
    
    diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
    --- a/rpython/rtyper/lltypesystem/lloperation.py
    +++ b/rpython/rtyper/lltypesystem/lloperation.py
    @@ -491,6 +491,7 @@
         'gc_rawrefcount_create_link_pyobj': LLOp(),
         'gc_rawrefcount_from_obj':          LLOp(sideeffects=False),
         'gc_rawrefcount_to_obj':            LLOp(sideeffects=False),
    +    'gc_rawrefcount_next_dead':         LLOp(),
     
         # ------- JIT & GC interaction, only for some GCs ----------
     
    
    From pypy.commits at gmail.com  Wed Sep  7 05:00:50 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 02:00:50 -0700 (PDT)
    Subject: [pypy-commit] pypy boehm-rawrefcount: Import the C code from
     https://bitbucket.org/arigo/arigo/src/default/hack/pypy-hack/boehm-rawrefcount/
    Message-ID: <57cfd742.88711c0a.4e105.fab3@mx.google.com>
    
    Author: Armin Rigo 
    Branch: boehm-rawrefcount
    Changeset: r86921:5706f541d3da
    Date: 2016-09-07 11:00 +0200
    http://bitbucket.org/pypy/pypy/changeset/5706f541d3da/
    
    Log:	Import the C code from
    	https://bitbucket.org/arigo/arigo/src/default/hack/pypy-hack/boehm-
    	rawrefcount/
    
    diff --git a/rpython/rlib/src/boehm-rawrefcount.c b/rpython/rlib/src/boehm-rawrefcount.c
    new file mode 100644
    --- /dev/null
    +++ b/rpython/rlib/src/boehm-rawrefcount.c
    @@ -0,0 +1,258 @@
    +#include 
    +#include 
    +#include 
    +#include 
    +#include 
    +#include 
    +#include 
    +
    +
    +#define REFCNT_FROM_PYPY  (LONG_MAX / 4 + 1)
    +
    +typedef struct pypy_header0 gcobj_t;    /* opaque here */
    +
    +#ifndef _WIN32
    +typedef intptr_t Py_ssize_t;
    +#else
    +typedef long Py_ssize_t;
    +#endif
    +
    +/* this is the first two words of the PyObject structure used in
    +   pypy/module/cpyext */
    +typedef struct {
    +    Py_ssize_t ob_refcnt;
    +    Py_ssize_t ob_pypy_link;
    +} pyobj_t;
    +
    +struct link_s {
    +    pyobj_t *pyobj;    /* NULL if entry unused */
    +    uintptr_t gcenc;
    +    struct link_s *next_in_bucket;
    +};
    +
    +#define MARKER_LIST_START  ((pyobj_t *)-1)
    +
    +static struct link_s **hash_buckets, *hash_list, *hash_free_list;
    +static uintptr_t hash_mask_bucket;
    +static intptr_t hash_list_walk_next = -1;
    +
    +static uintptr_t hash_get_hash(gcobj_t *gcobj)
    +{
    +    assert(gcobj != NULL);
    +    uintptr_t h = (uintptr_t)gcobj;
    +    assert((h & 1) == 0);
    +    h -= (h >> 6);
    +    return h & hash_mask_bucket;
    +}
    +
    +static gcobj_t *decode_gcenc(uintptr_t gcenc)
    +{
    +    if (gcenc & 1)
    +        gcenc = ~gcenc;
    +    return (gcobj_t *)gcenc;
    +}
    +
    +static void hash_link(struct link_s *lnk)
    +{
    +    uintptr_t h = hash_get_hash(decode_gcenc(lnk->gcenc));
    +    lnk->next_in_bucket = hash_buckets[h];
    +    hash_buckets[h] = lnk;
    +}
    +
    +static void boehm_is_about_to_collect(void);
    +
    +static void hash_grow_table(void)
    +{
    +    static int rec = 0;
    +    assert(!rec);   /* recursive hash_grow_table() */
    +    rec = 1;
    +
    +    if (hash_buckets == NULL)
    +        GC_set_start_callback(boehm_is_about_to_collect);
    +
    +    uintptr_t i, num_buckets = (hash_mask_bucket + 1) * 2;
    +    if (num_buckets < 16) num_buckets = 16;
    +    assert((num_buckets & (num_buckets - 1)) == 0);  /* power of two */
    +
    +    /* The new hash_buckets: an array of pointers to struct link_s, of
    +       length a power of two, used as a dictionary hash table.  It is
    +       not allocated with Boehm because there is no point in Boehm looking
    +       in it.
    +     */
    +    struct link_s **new_buckets = calloc(num_buckets, sizeof(struct link_s *));
    +    assert(new_buckets);
    +
    +    /* The new hash_list: the array of all struct link_s.  Their order
    +       is irrelevant.  There is a GC_register_finalizer() on the 'gcenc'
    +       field, so we don't move the array; instead we allocate a new array
    +       to use in addition to the old one.  There are a total of 2 to 4
    +       times as many 'struct link_s' as the length of 'buckets'.
    +     */
    +    uintptr_t num_list = num_buckets * 2;
    +    struct link_s *new_list = GC_MALLOC(num_list * sizeof(struct link_s));
    +    for (i = num_list; i-- > 1; ) {
    +        new_list[i].next_in_bucket = hash_free_list;
    +        hash_free_list = &new_list[i];
    +    }
    +    /* list[0] is abused to store a pointer to the previous list and
    +       the length of the current list */
    +    struct link_s *old_list = hash_list;
    +    new_list[0].next_in_bucket = old_list;
    +    new_list[0].gcenc = num_list;
    +    new_list[0].pyobj = MARKER_LIST_START;
    +
    +    hash_list = new_list;
    +    free(hash_buckets);
    +    hash_buckets = new_buckets;
    +    hash_mask_bucket = num_buckets - 1;
    +    hash_list_walk_next = hash_mask_bucket;
    +
    +    /* re-add all old 'struct link_s' to the hash_buckets */
    +    struct link_s *plist = old_list;
    +    while (plist != NULL) {
    +        uintptr_t count = plist[0].gcenc;
    +        for (i = 1; i < count; i++) {
    +            if (plist[i].gcenc != 0)
    +                hash_link(&plist[i]);
    +        }
    +        plist = plist[0].next_in_bucket;
    +    }
    +    GC_reachable_here(old_list);
    +
    +    rec = 0;
    +}
    +
    +static void hash_add_entry(gcobj_t *gcobj, pyobj_t *pyobj)
    +{
    +    if (hash_free_list == NULL) {
    +        hash_grow_table();
    +    }
    +    assert(pyobj->ob_pypy_link == 0);
    +
    +    struct link_s *lnk = hash_free_list;
    +    hash_free_list = lnk->next_in_bucket;
    +    lnk->pyobj = pyobj;
    +    lnk->gcenc = (uintptr_t)gcobj;
    +    pyobj->ob_pypy_link = (Py_ssize_t)lnk;
    +
    +    hash_link(lnk);
    +
    +    int j = GC_general_register_disappearing_link((void **)&lnk->gcenc, gcobj);
    +    assert(j == GC_SUCCESS);
    +}
    +
    +static pyobj_t *hash_get_entry(gcobj_t *gcobj)
    +{
    +    if (hash_buckets == NULL)
    +        return NULL;
    +    uintptr_t h = hash_get_hash(gcobj);
    +    struct link_s *lnk = hash_buckets[h];
    +    while (lnk != NULL) {
    +        assert(lnk->pyobj != NULL);
    +        if (decode_gcenc(lnk->gcenc) == gcobj)
    +            return lnk->pyobj;
    +        lnk = lnk->next_in_bucket;
    +    }
    +    return NULL;
    +}
    +
    +
    +RPY_EXTERN
    +/*pyobj_t*/void *gc_rawrefcount_next_dead(void)
    +{
    +    while (hash_list_walk_next >= 0) {
    +        struct link_s *p, **pp = &hash_buckets[hash_list_walk_next];
    +        while (1) {
    +            p = *pp;
    +            if (p == NULL)
    +                break;
    +            assert(p->pyobj != NULL);
    +            if (p->gcenc == 0) {
    +                /* quadratic time on the number of links from the same
    +                   bucket chain, but it should be small with very high
    +                   probability */
    +                pyobj_t *result = p->pyobj;
    +                printf("next_dead: %p\n", result);
    +                assert(result->ob_refcnt == REFCNT_FROM_PYPY);
    +                p->pyobj = NULL;
    +                *pp = p->next_in_bucket;
    +                p->next_in_bucket = hash_free_list;
    +                hash_free_list = p;
    +                return result;
    +            }
    +            else {
    +                assert(p->gcenc != ~(uintptr_t)0);
    +                pp = &p->next_in_bucket;
    +            }
    +        }
    +        hash_list_walk_next--;
    +    }
    +    return NULL;
    +}
    +
    +RPY_EXTERN
    +void gc_rawrefcount_create_link_pypy(/*gcobj_t*/void *gcobj, 
    +                                     /*pyobj_t*/void *pyobj)
    +{
    +    gcobj_t *gcobj1 = (gcobj_t *)gcobj;
    +    pyobj_t *pyobj1 = (pyobj_t *)pyobj;
    +
    +    assert(pyobj1->ob_pypy_link == 0);
    +    assert(pyobj1->ob_refcnt >= REFCNT_FROM_PYPY);
    +
    +    hash_add_entry(gcobj1, pyobj1);
    +}
    +
    +RPY_EXTERN
    +/*pyobj_t*/void *gc_rawrefcount_from_obj(/*gcobj_t*/void *gcobj)
    +{
    +    return hash_get_entry((gcobj_t *)gcobj);
    +}
    +
    +RPY_EXTERN
    +/*gcobj_t*/void *gc_rawrefcount_to_obj(/*pyobj_t*/void *pyobj)
    +{
    +    pyobj_t *pyobj1 = (pyobj_t *)pyobj;
    +
    +    if (pyobj1->ob_pypy_link == 0)
    +        return NULL;
    +
    +    struct link_s *lnk = (struct link_s *)pyobj1->ob_pypy_link;
    +    assert(lnk->pyobj == pyobj1);
    +    
    +    gcobj_t *g = decode_gcenc(lnk->gcenc);
    +    assert(g != NULL);
    +    return g;
    +}
    +
    +static void boehm_is_about_to_collect(void)
    +{
    +    struct link_s *plist = hash_list;
    +    while (plist != NULL) {
    +        uintptr_t i, count = plist[0].gcenc;
    +        for (i = 1; i < count; i++) {
    +            if (plist[i].gcenc == 0)
    +                continue;
    +
    +            pyobj_t *p = plist[i].pyobj;
    +            assert(p != NULL);
    +            assert(p->ob_refcnt >= REFCNT_FROM_PYPY);
    +
    +            printf("plist[%d].gcenc: %p ", (int)i, plist[i].gcenc);
    +
    +            if ((plist[i].gcenc & 1) ^ (p->ob_refcnt == REFCNT_FROM_PYPY)) {
    +                /* ob_refcnt > FROM_PYPY: non-zero regular refcnt, 
    +                   the gc obj must stay alive.  decode gcenc.
    +                   ---OR---
    +                   ob_refcnt == FROM_PYPY: no refs from C code, the
    +                   gc obj must not (necessarily) stay alive.  encode gcenc.
    +                */
    +                plist[i].gcenc = ~plist[i].gcenc;
    +            }
    +            printf("-> %p\n", plist[i].gcenc);
    +        }
    +        plist = plist[0].next_in_bucket;
    +    }
    +    if (hash_mask_bucket > 0)
    +        hash_list_walk_next = hash_mask_bucket;
    +}
    diff --git a/rpython/rlib/src/boehm-rawrefcount.h b/rpython/rlib/src/boehm-rawrefcount.h
    new file mode 100644
    --- /dev/null
    +++ b/rpython/rlib/src/boehm-rawrefcount.h
    @@ -0,0 +1,22 @@
    +#include "common_header.h"
    +
    +#define OP_GC_RAWREFCOUNT_INIT(callback, r)   /* nothing */
    +
    +#define OP_GC_RAWREFCOUNT_CREATE_LINK_PYPY(gcobj, pyobj, r)   \
    +    gc_rawrefcount_create_link_pypy(gcobj, pyobj)
    +
    +#define OP_GC_RAWREFCOUNT_FROM_OBJ(gcobj, r)   \
    +    r = gc_rawrefcount_from_obj(gcobj)
    +
    +#define OP_GC_RAWREFCOUNT_TO_OBJ(pyobj, r)   \
    +    r = gc_rawrefcount_to_obj(pyobj)
    +
    +#define OP_GC_RAWREFCOUNT_NEXT_DEAD(r)   \
    +    r = gc_rawrefcount_next()
    +
    +
    +RPY_EXTERN void gc_rawrefcount_create_link_pypy(/*gcobj_t*/void *gcobj, 
    +                                                /*pyobj_t*/void *pyobj);
    +RPY_EXTERN /*pyobj_t*/void *gc_rawrefcount_from_obj(/*gcobj_t*/void *gcobj);
    +RPY_EXTERN /*gcobj_t*/void *gc_rawrefcount_to_obj(/*pyobj_t*/void *pyobj);
    +RPY_EXTERN /*pyobj_t*/void *gc_rawrefcount_next_dead(void);
    diff --git a/rpython/rlib/test/test_rawrefcount_boehm.py b/rpython/rlib/test/test_rawrefcount_boehm.py
    new file mode 100644
    --- /dev/null
    +++ b/rpython/rlib/test/test_rawrefcount_boehm.py
    @@ -0,0 +1,230 @@
    +import itertools, os, subprocess
    +from hypothesis import given, strategies
    +from rpython.tool.udir import udir
    +
    +
    +TEST_CODE = r"""
    +#define RPY_EXTERN  /* nothing */
    +#include "boehm-rawrefcount.c"
    +
    +static gcobj_t *alloc_gcobj(void)   /* for tests */
    +{
    +    gcobj_t *g = GC_MALLOC(1000);
    +    printf("gc obj: %p\n", g);
    +    return g;
    +}
    +
    +static pyobj_t *alloc_pyobj(void)   /* for tests */
    +{
    +    pyobj_t *p = malloc(1000);
    +    p->ob_refcnt = 1;
    +    p->ob_pypy_link = 0;
    +    printf("py obj: %p\n", p);
    +    return p;
    +}
    +
    +static void decref(pyobj_t *p)      /* for tests */
    +{
    +    p->ob_refcnt--;
    +    if (p->ob_refcnt == 0) {
    +        printf("decref to zero: %p\n", p);
    +        free(p);
    +    }
    +    assert(p->ob_refcnt >= REFCNT_FROM_PYPY ||
    +           p->ob_refcnt < REFCNT_FROM_PYPY * 0.99);
    +}
    +
    +void run_test(void);     /* forward declaration, produced by the test */
    +
    +int main(void)
    +{
    +    run_test();
    +    while (gc_rawrefcount_next_dead() != NULL)
    +        ;
    +    return 0;
    +}
    +"""
    +
    +
    +operations = strategies.sampled_from([
    +    'new_pyobj',
    +    'new_gcobj',
    +    'create_link',
    +    'from_obj',
    +    'to_obj',
    +    'forget_pyobj',
    +    'forget_gcobj',
    +    'collect',
    +    'dead',
    +    ])
    +
    +
    + at strategies.composite
    +def make_code(draw):
    +    code = []
    +    pyobjs = []
    +    gcobjs = []
    +    num_gcobj = itertools.count()
    +    num_pyobj = itertools.count()
    +    links_g2p = {}
    +    links_p2g = {}
    +
    +    def new_gcobj():
    +        varname = 'g%d' % next(num_gcobj)
    +        code.append('gcobj_t *volatile %s = alloc_gcobj();' % varname)
    +        gcobjs.append(varname)
    +        return varname
    +
    +    def new_pyobj():
    +        varname = 'p%d' % next(num_pyobj)
    +        code.append('pyobj_t *%s = alloc_pyobj();' % varname)
    +        pyobjs.append(varname)
    +        return varname
    +
    +    for op in draw(strategies.lists(operations, average_size=250)):
    +        if op == 'new_gcobj':
    +            new_gcobj()
    +        elif op == 'new_pyobj':
    +            new_pyobj()
    +        elif op == 'create_link':
    +            gvars = [varname for varname in gcobjs if varname not in links_g2p]
    +            if gvars == []:
    +                gvars.append(new_gcobj())
    +            pvars = [varname for varname in pyobjs if varname not in links_p2g]
    +            if pvars == []:
    +                pvars.append(new_pyobj())
    +            gvar = draw(strategies.sampled_from(gvars))
    +            pvar = draw(strategies.sampled_from(pvars))
    +            code.append(r'printf("create_link %%p-%%p\n", %s, %s); '
    +                            % (gvar, pvar) +
    +                        "%s->ob_refcnt += REFCNT_FROM_PYPY; " % pvar +
    +                        "gc_rawrefcount_create_link_pypy(%s, %s);"
    +                            % (gvar, pvar))
    +            links_g2p[gvar] = pvar
    +            links_p2g[pvar] = gvar
    +        elif op == 'from_obj':
    +            if gcobjs:
    +                prnt = False
    +                gvar = draw(strategies.sampled_from(gcobjs))
    +                if gvar not in links_g2p:
    +                    check = "== NULL"
    +                elif links_g2p[gvar] in pyobjs:
    +                    check = "== %s" % (links_g2p[gvar],)
    +                else:
    +                    check = "!= NULL"
    +                    prnt = True
    +                code.append("assert(gc_rawrefcount_from_obj(%s) %s);"
    +                            % (gvar, check))
    +                if prnt:
    +                    code.append(r'printf("link %%p-%%p\n", %s, '
    +                        'gc_rawrefcount_from_obj(%s));' % (gvar, gvar))
    +        elif op == 'to_obj':
    +            if pyobjs:
    +                prnt = False
    +                pvar = draw(strategies.sampled_from(pyobjs))
    +                if pvar not in links_p2g:
    +                    check = "== NULL"
    +                elif links_p2g[pvar] in gcobjs:
    +                    check = "== %s" % (links_p2g[pvar],)
    +                else:
    +                    check = "!= NULL"
    +                    prnt = True
    +                code.append("assert(gc_rawrefcount_to_obj(%s) %s);"
    +                            % (pvar, check))
    +                if prnt:
    +                    code.append(r'printf("link %%p-%%p\n", '
    +                        'gc_rawrefcount_to_obj(%s), %s);' % (pvar, pvar))
    +        elif op == 'forget_pyobj':
    +            if pyobjs:
    +                index = draw(strategies.sampled_from(range(len(pyobjs))))
    +                pvar = pyobjs.pop(index)
    +                code.append(r'printf("-p%%p\n", %s); ' % pvar +
    +                            "decref(%s); %s = NULL;" % (pvar, pvar))
    +        elif op == 'forget_gcobj':
    +            if gcobjs:
    +                index = draw(strategies.sampled_from(range(len(gcobjs))))
    +                gvar = gcobjs.pop(index)
    +                code.append(r'printf("-g%%p\n", %s); ' % gvar +
    +                            "%s = NULL;" % (gvar,))
    +        elif op == 'collect':
    +            code.append("GC_gcollect();")
    +        elif op == 'dead':
    +            code.append('gc_rawrefcount_next_dead();')
    +        else:
    +            assert False, op
    +
    +    return '\n'.join(code)
    +
    +
    + at given(make_code())
    +def test_random(code):
    +    filename = str(udir.join("test-rawrefcount-boehm.c"))
    +    with open(filename, "w") as f:
    +        print >> f, TEST_CODE
    +        print >> f, 'void run_test(void) {'
    +        print >> f, code
    +        print >> f, '}'
    +
    +    srcdir = os.path.dirname(os.path.dirname(
    +        os.path.abspath(os.path.join(__file__))))
    +    srcdir = os.path.join(srcdir, 'src')
    +
    +    err = os.system("cd '%s' && gcc -Werror -lgc -I%s -o test-rawrefcount-boehm"
    +                    " test-rawrefcount-boehm.c" % (udir, srcdir))
    +    assert err == 0
    +    p = subprocess.Popen("./test-rawrefcount-boehm", stdout=subprocess.PIPE,
    +                         cwd=str(udir))
    +    stdout, _ = p.communicate()
    +    assert p.wait() == 0
    +
    +    gcobjs = {}
    +    pyobjs = {}
    +    links_p2g = {}
    +    links_g2p = {}
    +    for line in stdout.splitlines():
    +        if line.startswith('py obj: '):
    +            p = line[8:]
    +            assert not pyobjs.get(p)
    +            pyobjs[p] = True
    +            assert p not in links_p2g
    +        elif line.startswith('gc obj: '):
    +            g = line[8:]
    +            assert not gcobjs.get(g)
    +            gcobjs[g] = True
    +            if g in links_g2p: del links_g2p[g]
    +        elif line.startswith('-p'):
    +            p = line[2:]
    +            assert pyobjs[p] == True
    +            pyobjs[p] = False
    +        elif line.startswith('-g'):
    +            g = line[2:]
    +            assert gcobjs[g] == True
    +            gcobjs[g] = False
    +        elif line.startswith('decref to zero: '):
    +            p = line[16:]
    +            assert pyobjs[p] == False
    +            assert p not in links_p2g
    +            del pyobjs[p]
    +        elif line.startswith('create_link '):
    +            g, p = line[12:].split('-')
    +            assert g in gcobjs
    +            assert p in pyobjs
    +            assert g not in links_g2p
    +            assert p not in links_p2g
    +            links_g2p[g] = p
    +            links_p2g[p] = g
    +        elif line.startswith('link '):
    +            g, p = line[5:].split('-')
    +            assert g in gcobjs
    +            assert p in pyobjs
    +            assert links_g2p[g] == p
    +            assert links_p2g[p] == g
    +        elif line.startswith('plist['):
    +            pass
    +        elif line.startswith('next_dead: '):
    +            p = line[11:]
    +            assert pyobjs[p] == False
    +            del pyobjs[p]
    +            del links_p2g[p]
    +        else:
    +            assert False, repr(line)
    
    From pypy.commits at gmail.com  Wed Sep  7 05:28:31 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 02:28:31 -0700 (PDT)
    Subject: [pypy-commit] pypy boehm-rawrefcount: Integration in-progress
    Message-ID: <57cfddbf.0cce1c0a.30a6.fd8e@mx.google.com>
    
    Author: Armin Rigo 
    Branch: boehm-rawrefcount
    Changeset: r86922:a6e4b73b6de0
    Date: 2016-09-07 11:27 +0200
    http://bitbucket.org/pypy/pypy/changeset/a6e4b73b6de0/
    
    Log:	Integration in-progress
    
    diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py
    --- a/rpython/rlib/rawrefcount.py
    +++ b/rpython/rlib/rawrefcount.py
    @@ -4,10 +4,11 @@
     #  This is meant for pypy's cpyext module, but is a generally
     #  useful interface over our GC.  XXX "pypy" should be removed here
     #
    -import sys, weakref
    -from rpython.rtyper.lltypesystem import lltype, llmemory
    +import sys, weakref, py
    +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
     from rpython.rlib.objectmodel import we_are_translated, specialize
     from rpython.rtyper.extregistry import ExtRegistryEntry
    +from rpython.translator.tool.cbuild import ExternalCompilationInfo
     from rpython.rlib import rgc
     
     
    @@ -229,6 +230,11 @@
             v_p, v_ob = hop.inputargs(*hop.args_r)
             hop.exception_cannot_occur()
             hop.genop(name, [_unspec_p(hop, v_p), _unspec_ob(hop, v_ob)])
    +        #
    +        if hop.rtyper.annotator.translator.config.translation.gc == "boehm":
    +            c_func = hop.inputconst(lltype.typeOf(func_boehm_eci),
    +                                    func_boehm_eci)
    +            hop.genop('direct_call', [c_func])
     
     
     class Entry(ExtRegistryEntry):
    @@ -281,3 +287,10 @@
             v_ob = hop.genop('gc_rawrefcount_next_dead', [],
                              resulttype = llmemory.Address)
             return _spec_ob(hop, v_ob)
    +
    +src_dir = py.path.local(__file__).dirpath() / 'src'
    +boehm_eci = ExternalCompilationInfo(
    +    post_include_bits     = [(src_dir / 'boehm-rawrefcount.h').read()],
    +    separate_module_files = [(src_dir / 'boehm-rawrefcount.c')],
    +)
    +func_boehm_eci = rffi.llexternal_use_eci(boehm_eci)
    diff --git a/rpython/rlib/src/boehm-rawrefcount.c b/rpython/rlib/src/boehm-rawrefcount.c
    --- a/rpython/rlib/src/boehm-rawrefcount.c
    +++ b/rpython/rlib/src/boehm-rawrefcount.c
    @@ -6,6 +6,12 @@
     #include 
     #include 
     
    +#ifdef TEST_BOEHM_RAWREFCOUNT
    +#  define RPY_EXTERN  /* nothing */
    +#else
    +#  include "common_header.h"
    +#endif
    +
     
     #define REFCNT_FROM_PYPY  (LONG_MAX / 4 + 1)
     
    @@ -172,7 +178,9 @@
                        bucket chain, but it should be small with very high
                        probability */
                     pyobj_t *result = p->pyobj;
    +#ifdef TEST_BOEHM_RAWREFCOUNT
                     printf("next_dead: %p\n", result);
    +#endif
                     assert(result->ob_refcnt == REFCNT_FROM_PYPY);
                     p->pyobj = NULL;
                     *pp = p->next_in_bucket;
    @@ -238,7 +246,9 @@
                 assert(p != NULL);
                 assert(p->ob_refcnt >= REFCNT_FROM_PYPY);
     
    +#ifdef TEST_BOEHM_RAWREFCOUNT
                 printf("plist[%d].gcenc: %p ", (int)i, plist[i].gcenc);
    +#endif
     
                 if ((plist[i].gcenc & 1) ^ (p->ob_refcnt == REFCNT_FROM_PYPY)) {
                     /* ob_refcnt > FROM_PYPY: non-zero regular refcnt, 
    @@ -249,8 +259,10 @@
                     */
                     plist[i].gcenc = ~plist[i].gcenc;
                 }
    +#ifdef TEST_BOEHM_RAWREFCOUNT
                 printf("-> %p\n", plist[i].gcenc);
    -        }
    +#endif
    +    }
             plist = plist[0].next_in_bucket;
         }
         if (hash_mask_bucket > 0)
    diff --git a/rpython/rlib/src/boehm-rawrefcount.h b/rpython/rlib/src/boehm-rawrefcount.h
    --- a/rpython/rlib/src/boehm-rawrefcount.h
    +++ b/rpython/rlib/src/boehm-rawrefcount.h
    @@ -1,6 +1,8 @@
    -#include "common_header.h"
     
    -#define OP_GC_RAWREFCOUNT_INIT(callback, r)   /* nothing */
    +/* Missing:
    +   OP_GC_RAWREFCOUNT_INIT(callback, r): the callback is not supported here
    +   OP_GC_RAWREFCOUNT_CREATE_LINK_PYOBJ(): not implemented, maybe not needed
    +*/
     
     #define OP_GC_RAWREFCOUNT_CREATE_LINK_PYPY(gcobj, pyobj, r)   \
         gc_rawrefcount_create_link_pypy(gcobj, pyobj)
    @@ -12,7 +14,7 @@
         r = gc_rawrefcount_to_obj(pyobj)
     
     #define OP_GC_RAWREFCOUNT_NEXT_DEAD(r)   \
    -    r = gc_rawrefcount_next()
    +    r = gc_rawrefcount_next_dead()
     
     
     RPY_EXTERN void gc_rawrefcount_create_link_pypy(/*gcobj_t*/void *gcobj, 
    diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py
    --- a/rpython/rlib/test/test_rawrefcount.py
    +++ b/rpython/rlib/test/test_rawrefcount.py
    @@ -214,6 +214,7 @@
     
     
     class TestTranslated(StandaloneTests):
    +    _GC = 'incminimark'
     
         def test_full_translation(self):
             class State:
    @@ -226,29 +227,37 @@
             def make_p():
                 p = W_Root(42)
                 ob = lltype.malloc(PyObjectS, flavor='raw', zero=True)
    +            ob.c_ob_refcnt += REFCNT_FROM_PYPY
                 rawrefcount.create_link_pypy(p, ob)
    -            ob.c_ob_refcnt += REFCNT_FROM_PYPY
                 assert rawrefcount.from_obj(PyObject, p) == ob
                 assert rawrefcount.to_obj(W_Root, ob) == p
                 return ob, p
     
             FTYPE = rawrefcount.RAWREFCOUNT_DEALLOC_TRIGGER
    +        has_callback = (self._GC != "boehm")
     
             def entry_point(argv):
    -            ll_dealloc_trigger_callback = llhelper(FTYPE, dealloc_trigger)
    -            rawrefcount.init(ll_dealloc_trigger_callback)
    +            if has_callback:
    +                ll_dealloc_trigger_callback = llhelper(FTYPE, dealloc_trigger)
    +                rawrefcount.init(ll_dealloc_trigger_callback)
                 ob, p = make_p()
                 if state.seen != []:
                     print "OB COLLECTED REALLY TOO SOON"
                     return 1
    +            if rawrefcount.next_dead(PyObject) != lltype.nullptr(PyObjectS):
    +                print "got a next_dead() really too soon"
    +                return 1
                 rgc.collect()
                 if state.seen != []:
                     print "OB COLLECTED TOO SOON"
                     return 1
    +            if rawrefcount.next_dead(PyObject) != lltype.nullptr(PyObjectS):
    +                print "got a next_dead() too soon"
    +                return 1
                 objectmodel.keepalive_until_here(p)
                 p = None
                 rgc.collect()
    -            if state.seen != [1]:
    +            if has_callback and state.seen != [1]:
                     print "OB NOT COLLECTED"
                     return 1
                 if rawrefcount.next_dead(PyObject) != ob:
    @@ -262,7 +271,11 @@
                 return 0
     
             self.config = get_combined_translation_config(translating=True)
    -        self.config.translation.gc = "incminimark"
    +        self.config.translation.gc = self._GC
             t, cbuilder = self.compile(entry_point)
             data = cbuilder.cmdexec('hi there')
             assert data.startswith('OK!\n')
    +
    +
    +class TestBoehm(TestTranslated):
    +    _GC = "boehm"
    diff --git a/rpython/rlib/test/test_rawrefcount_boehm.py b/rpython/rlib/test/test_rawrefcount_boehm.py
    --- a/rpython/rlib/test/test_rawrefcount_boehm.py
    +++ b/rpython/rlib/test/test_rawrefcount_boehm.py
    @@ -4,7 +4,7 @@
     
     
     TEST_CODE = r"""
    -#define RPY_EXTERN  /* nothing */
    +#define TEST_BOEHM_RAWREFCOUNT
     #include "boehm-rawrefcount.c"
     
     static gcobj_t *alloc_gcobj(void)   /* for tests */
    
    From pypy.commits at gmail.com  Wed Sep  7 05:43:20 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 02:43:20 -0700 (PDT)
    Subject: [pypy-commit] pypy boehm-rawrefcount: Stop trying to use the same
     test, and wrote a boehm-specific test
    Message-ID: <57cfe138.ec02c20a.bd1d.87bc@mx.google.com>
    
    Author: Armin Rigo 
    Branch: boehm-rawrefcount
    Changeset: r86923:c8bdf1398ada
    Date: 2016-09-07 11:42 +0200
    http://bitbucket.org/pypy/pypy/changeset/c8bdf1398ada/
    
    Log:	Stop trying to use the same test, and wrote a boehm-specific test
    
    diff --git a/rpython/rlib/src/boehm-rawrefcount.c b/rpython/rlib/src/boehm-rawrefcount.c
    --- a/rpython/rlib/src/boehm-rawrefcount.c
    +++ b/rpython/rlib/src/boehm-rawrefcount.c
    @@ -206,7 +206,8 @@
         pyobj_t *pyobj1 = (pyobj_t *)pyobj;
     
         assert(pyobj1->ob_pypy_link == 0);
    -    assert(pyobj1->ob_refcnt >= REFCNT_FROM_PYPY);
    +    /*assert(pyobj1->ob_refcnt >= REFCNT_FROM_PYPY);*/
    +    /*^^^ could also be fixed just after the call to create_link_pypy()*/
     
         hash_add_entry(gcobj1, pyobj1);
     }
    diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py
    --- a/rpython/rlib/test/test_rawrefcount.py
    +++ b/rpython/rlib/test/test_rawrefcount.py
    @@ -214,7 +214,6 @@
     
     
     class TestTranslated(StandaloneTests):
    -    _GC = 'incminimark'
     
         def test_full_translation(self):
             class State:
    @@ -227,37 +226,29 @@
             def make_p():
                 p = W_Root(42)
                 ob = lltype.malloc(PyObjectS, flavor='raw', zero=True)
    +            rawrefcount.create_link_pypy(p, ob)
                 ob.c_ob_refcnt += REFCNT_FROM_PYPY
    -            rawrefcount.create_link_pypy(p, ob)
                 assert rawrefcount.from_obj(PyObject, p) == ob
                 assert rawrefcount.to_obj(W_Root, ob) == p
                 return ob, p
     
             FTYPE = rawrefcount.RAWREFCOUNT_DEALLOC_TRIGGER
    -        has_callback = (self._GC != "boehm")
     
             def entry_point(argv):
    -            if has_callback:
    -                ll_dealloc_trigger_callback = llhelper(FTYPE, dealloc_trigger)
    -                rawrefcount.init(ll_dealloc_trigger_callback)
    +            ll_dealloc_trigger_callback = llhelper(FTYPE, dealloc_trigger)
    +            rawrefcount.init(ll_dealloc_trigger_callback)
                 ob, p = make_p()
                 if state.seen != []:
                     print "OB COLLECTED REALLY TOO SOON"
                     return 1
    -            if rawrefcount.next_dead(PyObject) != lltype.nullptr(PyObjectS):
    -                print "got a next_dead() really too soon"
    -                return 1
                 rgc.collect()
                 if state.seen != []:
                     print "OB COLLECTED TOO SOON"
                     return 1
    -            if rawrefcount.next_dead(PyObject) != lltype.nullptr(PyObjectS):
    -                print "got a next_dead() too soon"
    -                return 1
                 objectmodel.keepalive_until_here(p)
                 p = None
                 rgc.collect()
    -            if has_callback and state.seen != [1]:
    +            if state.seen != [1]:
                     print "OB NOT COLLECTED"
                     return 1
                 if rawrefcount.next_dead(PyObject) != ob:
    @@ -271,11 +262,50 @@
                 return 0
     
             self.config = get_combined_translation_config(translating=True)
    -        self.config.translation.gc = self._GC
    +        self.config.translation.gc = "incminimark"
             t, cbuilder = self.compile(entry_point)
             data = cbuilder.cmdexec('hi there')
             assert data.startswith('OK!\n')
     
     
    -class TestBoehm(TestTranslated):
    -    _GC = "boehm"
    +class TestBoehmTranslated(StandaloneTests):
    +
    +    def test_full_translation(self):
    +
    +        def make_ob():
    +            p = W_Root(42)
    +            ob = lltype.malloc(PyObjectS, flavor='raw', zero=True)
    +            rawrefcount.create_link_pypy(p, ob)
    +            ob.c_ob_refcnt += REFCNT_FROM_PYPY
    +            assert rawrefcount.from_obj(PyObject, p) == ob
    +            assert rawrefcount.to_obj(W_Root, ob) == p
    +            return ob
    +
    +        def entry_point(argv):
    +            oblist = [make_ob() for i in range(50)]
    +            rgc.collect()
    +            deadlist = []
    +            while True:
    +                ob = rawrefcount.next_dead(PyObject)
    +                if not ob: break
    +                deadlist.append(ob)
    +            if len(deadlist) == 0:
    +                print "no dead object"
    +                return 1
    +            if len(deadlist) < 30:
    +                print "not enough dead objects"
    +                return 1
    +            for ob in deadlist:
    +                if ob not in oblist:
    +                    print "unexpected value for dead pointer"
    +                    return 1
    +                oblist.remove(ob)
    +            print "OK!"
    +            lltype.free(ob, flavor='raw')
    +            return 0
    +
    +        self.config = get_combined_translation_config(translating=True)
    +        self.config.translation.gc = "boehm"
    +        t, cbuilder = self.compile(entry_point)
    +        data = cbuilder.cmdexec('hi there')
    +        assert data.startswith('OK!\n')
    
    From pypy.commits at gmail.com  Wed Sep  7 07:05:07 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 04:05:07 -0700 (PDT)
    Subject: [pypy-commit] pypy boehm-rawrefcount: For Boehm,
     we can't pass the callback to rawrefcount.init() and we need
    Message-ID: <57cff463.93841c0a.48e69.302a@mx.google.com>
    
    Author: Armin Rigo 
    Branch: boehm-rawrefcount
    Changeset: r86924:c4bc3be86d15
    Date: 2016-09-07 13:04 +0200
    http://bitbucket.org/pypy/pypy/changeset/c4bc3be86d15/
    
    Log:	For Boehm, we can't pass the callback to rawrefcount.init() and we
    	need instead to regularly check ourselves for dead objects.
    
    diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py
    --- a/pypy/module/cpyext/state.py
    +++ b/pypy/module/cpyext/state.py
    @@ -1,7 +1,7 @@
     from rpython.rlib.objectmodel import we_are_translated
     from rpython.rtyper.lltypesystem import rffi, lltype
     from pypy.interpreter.error import OperationError, oefmt
    -from pypy.interpreter.executioncontext import AsyncAction
    +from pypy.interpreter import executioncontext
     from rpython.rtyper.lltypesystem import lltype
     from rpython.rtyper.annlowlevel import llhelper
     from rpython.rlib.rdynload import DLLHANDLE
    @@ -14,8 +14,9 @@
             self.reset()
             self.programname = lltype.nullptr(rffi.CCHARP.TO)
             self.version = lltype.nullptr(rffi.CCHARP.TO)
    -        pyobj_dealloc_action = PyObjDeallocAction(space)
    -        self.dealloc_trigger = lambda: pyobj_dealloc_action.fire()
    +        if space.config.translation.gc != "boehm":
    +            pyobj_dealloc_action = PyObjDeallocAction(space)
    +            self.dealloc_trigger = lambda: pyobj_dealloc_action.fire()
     
         def reset(self):
             from pypy.module.cpyext.modsupport import PyMethodDef
    @@ -67,6 +68,11 @@
                 state.api_lib = str(api.build_bridge(self.space))
             else:
                 api.setup_library(self.space)
    +            #
    +            if self.space.config.translation.gc == "boehm":
    +                action = BoehmPyObjDeallocAction(self.space)
    +                self.space.actionflag.register_periodic_action(action,
    +                    use_bytecode_counter=True)
     
         def install_dll(self, eci):
             """NOT_RPYTHON
    @@ -84,8 +90,10 @@
             from pypy.module.cpyext.api import init_static_data_translated
     
             if we_are_translated():
    -            rawrefcount.init(llhelper(rawrefcount.RAWREFCOUNT_DEALLOC_TRIGGER,
    -                                      self.dealloc_trigger))
    +            if space.config.translation.gc != "boehm":
    +                rawrefcount.init(
    +                    llhelper(rawrefcount.RAWREFCOUNT_DEALLOC_TRIGGER,
    +                    self.dealloc_trigger))
                 init_static_data_translated(space)
     
             setup_new_method_def(space)
    @@ -143,15 +151,23 @@
             self.extensions[path] = w_copy
     
     
    -class PyObjDeallocAction(AsyncAction):
    +def _rawrefcount_perform(space):
    +    from pypy.module.cpyext.pyobject import PyObject, decref
    +    while True:
    +        py_obj = rawrefcount.next_dead(PyObject)
    +        if not py_obj:
    +            break
    +        decref(space, py_obj)
    +
    +class PyObjDeallocAction(executioncontext.AsyncAction):
         """An action that invokes _Py_Dealloc() on the dying PyObjects.
         """
    +    def perform(self, executioncontext, frame):
    +        _rawrefcount_perform(self.space)
     
    +class BoehmPyObjDeallocAction(executioncontext.PeriodicAsyncAction):
    +    # This variant is used with Boehm, which doesn't have the explicit
    +    # callback.  Instead we must periodically check ourselves.
         def perform(self, executioncontext, frame):
    -        from pypy.module.cpyext.pyobject import PyObject, decref
    -
    -        while True:
    -            py_obj = rawrefcount.next_dead(PyObject)
    -            if not py_obj:
    -                break
    -            decref(self.space, py_obj)
    +        if we_are_translated():
    +            _rawrefcount_perform(self.space)
    
    From pypy.commits at gmail.com  Wed Sep  7 08:23:29 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 05:23:29 -0700 (PDT)
    Subject: [pypy-commit] pypy boehm-rawrefcount: tweak
    Message-ID: <57d006c1.82ddc20a.52249.28d8@mx.google.com>
    
    Author: Armin Rigo 
    Branch: boehm-rawrefcount
    Changeset: r86925:9a75c25f25c3
    Date: 2016-09-07 14:22 +0200
    http://bitbucket.org/pypy/pypy/changeset/9a75c25f25c3/
    
    Log:	tweak
    
    diff --git a/rpython/rlib/src/boehm-rawrefcount.c b/rpython/rlib/src/boehm-rawrefcount.c
    --- a/rpython/rlib/src/boehm-rawrefcount.c
    +++ b/rpython/rlib/src/boehm-rawrefcount.c
    @@ -237,6 +237,7 @@
     static void boehm_is_about_to_collect(void)
     {
         struct link_s *plist = hash_list;
    +    uintptr_t gcenc_union = 0;
         while (plist != NULL) {
             uintptr_t i, count = plist[0].gcenc;
             for (i = 1; i < count; i++) {
    @@ -260,12 +261,13 @@
                     */
                     plist[i].gcenc = ~plist[i].gcenc;
                 }
    +            gcenc_union |= plist[i].gcenc;
     #ifdef TEST_BOEHM_RAWREFCOUNT
                 printf("-> %p\n", plist[i].gcenc);
     #endif
         }
             plist = plist[0].next_in_bucket;
         }
    -    if (hash_mask_bucket > 0)
    +    if (gcenc_union & 1)   /* if there is at least one item potentially dead */
             hash_list_walk_next = hash_mask_bucket;
     }
    
    From pypy.commits at gmail.com  Wed Sep  7 08:23:31 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 05:23:31 -0700 (PDT)
    Subject: [pypy-commit] pypy boehm-rawrefcount: Test and fix
    Message-ID: <57d006c3.a6a5c20a.90c9b.2be8@mx.google.com>
    
    Author: Armin Rigo 
    Branch: boehm-rawrefcount
    Changeset: r86926:43fec2a94ecc
    Date: 2016-09-07 14:22 +0200
    http://bitbucket.org/pypy/pypy/changeset/43fec2a94ecc/
    
    Log:	Test and fix
    
    diff --git a/rpython/rlib/src/boehm-rawrefcount.c b/rpython/rlib/src/boehm-rawrefcount.c
    --- a/rpython/rlib/src/boehm-rawrefcount.c
    +++ b/rpython/rlib/src/boehm-rawrefcount.c
    @@ -143,8 +143,16 @@
     
         hash_link(lnk);
     
    -    int j = GC_general_register_disappearing_link((void **)&lnk->gcenc, gcobj);
    -    assert(j == GC_SUCCESS);
    +    if (GC_base(gcobj) == NULL) {
    +        /* 'gcobj' is probably a prebuilt object - it makes no */
    +        /* sense to register it then, and it crashes Boehm in */
    +        /* quite obscure ways */
    +    }
    +    else {
    +        int j = GC_general_register_disappearing_link(
    +                                    (void **)&lnk->gcenc, gcobj);
    +        assert(j == GC_SUCCESS);
    +    }
     }
     
     static pyobj_t *hash_get_entry(gcobj_t *gcobj)
    diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py
    --- a/rpython/rlib/test/test_rawrefcount.py
    +++ b/rpython/rlib/test/test_rawrefcount.py
    @@ -281,7 +281,13 @@
                 assert rawrefcount.to_obj(W_Root, ob) == p
                 return ob
     
    +        prebuilt_p = W_Root(-42)
    +        prebuilt_ob = lltype.malloc(PyObjectS, flavor='raw', zero=True,
    +                                    immortal=True)
    +
             def entry_point(argv):
    +            rawrefcount.create_link_pypy(prebuilt_p, prebuilt_ob)
    +            prebuilt_ob.c_ob_refcnt += REFCNT_FROM_PYPY
                 oblist = [make_ob() for i in range(50)]
                 rgc.collect()
                 deadlist = []
    
    From pypy.commits at gmail.com  Wed Sep  7 08:44:51 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 05:44:51 -0700 (PDT)
    Subject: [pypy-commit] pypy reverse-debugger: hg merge boehm-rawrefcount
    Message-ID: <57d00bc3.8f081c0a.57fa7.53f8@mx.google.com>
    
    Author: Armin Rigo 
    Branch: reverse-debugger
    Changeset: r86928:ce52ed81e2ba
    Date: 2016-09-07 14:43 +0200
    http://bitbucket.org/pypy/pypy/changeset/ce52ed81e2ba/
    
    Log:	hg merge boehm-rawrefcount
    
    diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py
    --- a/pypy/module/cpyext/state.py
    +++ b/pypy/module/cpyext/state.py
    @@ -1,7 +1,7 @@
     from rpython.rlib.objectmodel import we_are_translated
     from rpython.rtyper.lltypesystem import rffi, lltype
     from pypy.interpreter.error import OperationError, oefmt
    -from pypy.interpreter.executioncontext import AsyncAction
    +from pypy.interpreter import executioncontext
     from rpython.rtyper.lltypesystem import lltype
     from rpython.rtyper.annlowlevel import llhelper
     from rpython.rlib.rdynload import DLLHANDLE
    @@ -14,8 +14,9 @@
             self.reset()
             self.programname = lltype.nullptr(rffi.CCHARP.TO)
             self.version = lltype.nullptr(rffi.CCHARP.TO)
    -        pyobj_dealloc_action = PyObjDeallocAction(space)
    -        self.dealloc_trigger = lambda: pyobj_dealloc_action.fire()
    +        if space.config.translation.gc != "boehm":
    +            pyobj_dealloc_action = PyObjDeallocAction(space)
    +            self.dealloc_trigger = lambda: pyobj_dealloc_action.fire()
     
         def reset(self):
             from pypy.module.cpyext.modsupport import PyMethodDef
    @@ -67,6 +68,11 @@
                 state.api_lib = str(api.build_bridge(self.space))
             else:
                 api.setup_library(self.space)
    +            #
    +            if self.space.config.translation.gc == "boehm":
    +                action = BoehmPyObjDeallocAction(self.space)
    +                self.space.actionflag.register_periodic_action(action,
    +                    use_bytecode_counter=True)
     
         def install_dll(self, eci):
             """NOT_RPYTHON
    @@ -84,8 +90,10 @@
             from pypy.module.cpyext.api import init_static_data_translated
     
             if we_are_translated():
    -            rawrefcount.init(llhelper(rawrefcount.RAWREFCOUNT_DEALLOC_TRIGGER,
    -                                      self.dealloc_trigger))
    +            if space.config.translation.gc != "boehm":
    +                rawrefcount.init(
    +                    llhelper(rawrefcount.RAWREFCOUNT_DEALLOC_TRIGGER,
    +                    self.dealloc_trigger))
                 init_static_data_translated(space)
     
             setup_new_method_def(space)
    @@ -143,15 +151,23 @@
             self.extensions[path] = w_copy
     
     
    -class PyObjDeallocAction(AsyncAction):
    +def _rawrefcount_perform(space):
    +    from pypy.module.cpyext.pyobject import PyObject, decref
    +    while True:
    +        py_obj = rawrefcount.next_dead(PyObject)
    +        if not py_obj:
    +            break
    +        decref(space, py_obj)
    +
    +class PyObjDeallocAction(executioncontext.AsyncAction):
         """An action that invokes _Py_Dealloc() on the dying PyObjects.
         """
    +    def perform(self, executioncontext, frame):
    +        _rawrefcount_perform(self.space)
     
    +class BoehmPyObjDeallocAction(executioncontext.PeriodicAsyncAction):
    +    # This variant is used with Boehm, which doesn't have the explicit
    +    # callback.  Instead we must periodically check ourselves.
         def perform(self, executioncontext, frame):
    -        from pypy.module.cpyext.pyobject import PyObject, decref
    -
    -        while True:
    -            py_obj = rawrefcount.next_dead(PyObject)
    -            if not py_obj:
    -                break
    -            decref(self.space, py_obj)
    +        if we_are_translated():
    +            _rawrefcount_perform(self.space)
    diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py
    --- a/rpython/rlib/rawrefcount.py
    +++ b/rpython/rlib/rawrefcount.py
    @@ -4,10 +4,11 @@
     #  This is meant for pypy's cpyext module, but is a generally
     #  useful interface over our GC.  XXX "pypy" should be removed here
     #
    -import sys, weakref
    -from rpython.rtyper.lltypesystem import lltype, llmemory
    +import sys, weakref, py
    +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
     from rpython.rlib.objectmodel import we_are_translated, specialize
     from rpython.rtyper.extregistry import ExtRegistryEntry
    +from rpython.translator.tool.cbuild import ExternalCompilationInfo
     from rpython.rlib import rgc
     
     
    @@ -229,6 +230,11 @@
             v_p, v_ob = hop.inputargs(*hop.args_r)
             hop.exception_cannot_occur()
             hop.genop(name, [_unspec_p(hop, v_p), _unspec_ob(hop, v_ob)])
    +        #
    +        if hop.rtyper.annotator.translator.config.translation.gc == "boehm":
    +            c_func = hop.inputconst(lltype.typeOf(func_boehm_eci),
    +                                    func_boehm_eci)
    +            hop.genop('direct_call', [c_func])
     
     
     class Entry(ExtRegistryEntry):
    @@ -281,3 +287,10 @@
             v_ob = hop.genop('gc_rawrefcount_next_dead', [],
                              resulttype = llmemory.Address)
             return _spec_ob(hop, v_ob)
    +
    +src_dir = py.path.local(__file__).dirpath() / 'src'
    +boehm_eci = ExternalCompilationInfo(
    +    post_include_bits     = [(src_dir / 'boehm-rawrefcount.h').read()],
    +    separate_module_files = [(src_dir / 'boehm-rawrefcount.c')],
    +)
    +func_boehm_eci = rffi.llexternal_use_eci(boehm_eci)
    diff --git a/rpython/rlib/src/boehm-rawrefcount.c b/rpython/rlib/src/boehm-rawrefcount.c
    new file mode 100644
    --- /dev/null
    +++ b/rpython/rlib/src/boehm-rawrefcount.c
    @@ -0,0 +1,281 @@
    +#include 
    +#include 
    +#include 
    +#include 
    +#include 
    +#include 
    +#include 
    +
    +#ifdef TEST_BOEHM_RAWREFCOUNT
    +#  define RPY_EXTERN  /* nothing */
    +#else
    +#  include "common_header.h"
    +#endif
    +
    +
    +#define REFCNT_FROM_PYPY  (LONG_MAX / 4 + 1)
    +
    +typedef struct pypy_header0 gcobj_t;    /* opaque here */
    +
    +#ifndef _WIN32
    +typedef intptr_t Py_ssize_t;
    +#else
    +typedef long Py_ssize_t;
    +#endif
    +
    +/* this is the first two words of the PyObject structure used in
    +   pypy/module/cpyext */
    +typedef struct {
    +    Py_ssize_t ob_refcnt;
    +    Py_ssize_t ob_pypy_link;
    +} pyobj_t;
    +
    +struct link_s {
    +    pyobj_t *pyobj;    /* NULL if entry unused */
    +    uintptr_t gcenc;
    +    struct link_s *next_in_bucket;
    +};
    +
    +#define MARKER_LIST_START  ((pyobj_t *)-1)
    +
    +static struct link_s **hash_buckets, *hash_list, *hash_free_list;
    +static uintptr_t hash_mask_bucket;
    +static intptr_t hash_list_walk_next = -1;
    +
    +static uintptr_t hash_get_hash(gcobj_t *gcobj)
    +{
    +    assert(gcobj != NULL);
    +    uintptr_t h = (uintptr_t)gcobj;
    +    assert((h & 1) == 0);
    +    h -= (h >> 6);
    +    return h & hash_mask_bucket;
    +}
    +
    +static gcobj_t *decode_gcenc(uintptr_t gcenc)
    +{
    +    if (gcenc & 1)
    +        gcenc = ~gcenc;
    +    return (gcobj_t *)gcenc;
    +}
    +
    +static void hash_link(struct link_s *lnk)
    +{
    +    uintptr_t h = hash_get_hash(decode_gcenc(lnk->gcenc));
    +    lnk->next_in_bucket = hash_buckets[h];
    +    hash_buckets[h] = lnk;
    +}
    +
    +static void boehm_is_about_to_collect(void);
    +
    +static void hash_grow_table(void)
    +{
    +    static int rec = 0;
    +    assert(!rec);   /* recursive hash_grow_table() */
    +    rec = 1;
    +
    +    if (hash_buckets == NULL)
    +        GC_set_start_callback(boehm_is_about_to_collect);
    +
    +    uintptr_t i, num_buckets = (hash_mask_bucket + 1) * 2;
    +    if (num_buckets < 16) num_buckets = 16;
    +    assert((num_buckets & (num_buckets - 1)) == 0);  /* power of two */
    +
    +    /* The new hash_buckets: an array of pointers to struct link_s, of
    +       length a power of two, used as a dictionary hash table.  It is
    +       not allocated with Boehm because there is no point in Boehm looking
    +       in it.
    +     */
    +    struct link_s **new_buckets = calloc(num_buckets, sizeof(struct link_s *));
    +    assert(new_buckets);
    +
    +    /* The new hash_list: the array of all struct link_s.  Their order
    +       is irrelevant.  There is a GC_register_finalizer() on the 'gcenc'
    +       field, so we don't move the array; instead we allocate a new array
    +       to use in addition to the old one.  There are a total of 2 to 4
    +       times as many 'struct link_s' as the length of 'buckets'.
    +     */
    +    uintptr_t num_list = num_buckets * 2;
    +    struct link_s *new_list = GC_MALLOC(num_list * sizeof(struct link_s));
    +    for (i = num_list; i-- > 1; ) {
    +        new_list[i].next_in_bucket = hash_free_list;
    +        hash_free_list = &new_list[i];
    +    }
    +    /* list[0] is abused to store a pointer to the previous list and
    +       the length of the current list */
    +    struct link_s *old_list = hash_list;
    +    new_list[0].next_in_bucket = old_list;
    +    new_list[0].gcenc = num_list;
    +    new_list[0].pyobj = MARKER_LIST_START;
    +
    +    hash_list = new_list;
    +    free(hash_buckets);
    +    hash_buckets = new_buckets;
    +    hash_mask_bucket = num_buckets - 1;
    +    hash_list_walk_next = hash_mask_bucket;
    +
    +    /* re-add all old 'struct link_s' to the hash_buckets */
    +    struct link_s *plist = old_list;
    +    while (plist != NULL) {
    +        uintptr_t count = plist[0].gcenc;
    +        for (i = 1; i < count; i++) {
    +            if (plist[i].gcenc != 0)
    +                hash_link(&plist[i]);
    +        }
    +        plist = plist[0].next_in_bucket;
    +    }
    +    GC_reachable_here(old_list);
    +
    +    rec = 0;
    +}
    +
    +static void hash_add_entry(gcobj_t *gcobj, pyobj_t *pyobj)
    +{
    +    if (hash_free_list == NULL) {
    +        hash_grow_table();
    +    }
    +    assert(pyobj->ob_pypy_link == 0);
    +
    +    struct link_s *lnk = hash_free_list;
    +    hash_free_list = lnk->next_in_bucket;
    +    lnk->pyobj = pyobj;
    +    lnk->gcenc = (uintptr_t)gcobj;
    +    pyobj->ob_pypy_link = (Py_ssize_t)lnk;
    +
    +    hash_link(lnk);
    +
    +    if (GC_base(gcobj) == NULL) {
    +        /* 'gcobj' is probably a prebuilt object - it makes no */
    +        /* sense to register it then, and it crashes Boehm in */
    +        /* quite obscure ways */
    +    }
    +    else {
    +        int j = GC_general_register_disappearing_link(
    +                                    (void **)&lnk->gcenc, gcobj);
    +        assert(j == GC_SUCCESS);
    +    }
    +}
    +
    +static pyobj_t *hash_get_entry(gcobj_t *gcobj)
    +{
    +    if (hash_buckets == NULL)
    +        return NULL;
    +    uintptr_t h = hash_get_hash(gcobj);
    +    struct link_s *lnk = hash_buckets[h];
    +    while (lnk != NULL) {
    +        assert(lnk->pyobj != NULL);
    +        if (decode_gcenc(lnk->gcenc) == gcobj)
    +            return lnk->pyobj;
    +        lnk = lnk->next_in_bucket;
    +    }
    +    return NULL;
    +}
    +
    +
    +RPY_EXTERN
    +/*pyobj_t*/void *gc_rawrefcount_next_dead(void)
    +{
    +    while (hash_list_walk_next >= 0) {
    +        struct link_s *p, **pp = &hash_buckets[hash_list_walk_next];
    +        while (1) {
    +            p = *pp;
    +            if (p == NULL)
    +                break;
    +            assert(p->pyobj != NULL);
    +            if (p->gcenc == 0) {
    +                /* quadratic time on the number of links from the same
    +                   bucket chain, but it should be small with very high
    +                   probability */
    +                pyobj_t *result = p->pyobj;
    +#ifdef TEST_BOEHM_RAWREFCOUNT
    +                printf("next_dead: %p\n", result);
    +#endif
    +                assert(result->ob_refcnt == REFCNT_FROM_PYPY);
    +                p->pyobj = NULL;
    +                *pp = p->next_in_bucket;
    +                p->next_in_bucket = hash_free_list;
    +                hash_free_list = p;
    +                return result;
    +            }
    +            else {
    +                assert(p->gcenc != ~(uintptr_t)0);
    +                pp = &p->next_in_bucket;
    +            }
    +        }
    +        hash_list_walk_next--;
    +    }
    +    return NULL;
    +}
    +
    +RPY_EXTERN
    +void gc_rawrefcount_create_link_pypy(/*gcobj_t*/void *gcobj, 
    +                                     /*pyobj_t*/void *pyobj)
    +{
    +    gcobj_t *gcobj1 = (gcobj_t *)gcobj;
    +    pyobj_t *pyobj1 = (pyobj_t *)pyobj;
    +
    +    assert(pyobj1->ob_pypy_link == 0);
    +    /*assert(pyobj1->ob_refcnt >= REFCNT_FROM_PYPY);*/
    +    /*^^^ could also be fixed just after the call to create_link_pypy()*/
    +
    +    hash_add_entry(gcobj1, pyobj1);
    +}
    +
    +RPY_EXTERN
    +/*pyobj_t*/void *gc_rawrefcount_from_obj(/*gcobj_t*/void *gcobj)
    +{
    +    return hash_get_entry((gcobj_t *)gcobj);
    +}
    +
    +RPY_EXTERN
    +/*gcobj_t*/void *gc_rawrefcount_to_obj(/*pyobj_t*/void *pyobj)
    +{
    +    pyobj_t *pyobj1 = (pyobj_t *)pyobj;
    +
    +    if (pyobj1->ob_pypy_link == 0)
    +        return NULL;
    +
    +    struct link_s *lnk = (struct link_s *)pyobj1->ob_pypy_link;
    +    assert(lnk->pyobj == pyobj1);
    +    
    +    gcobj_t *g = decode_gcenc(lnk->gcenc);
    +    assert(g != NULL);
    +    return g;
    +}
    +
    +static void boehm_is_about_to_collect(void)
    +{
    +    struct link_s *plist = hash_list;
    +    uintptr_t gcenc_union = 0;
    +    while (plist != NULL) {
    +        uintptr_t i, count = plist[0].gcenc;
    +        for (i = 1; i < count; i++) {
    +            if (plist[i].gcenc == 0)
    +                continue;
    +
    +            pyobj_t *p = plist[i].pyobj;
    +            assert(p != NULL);
    +            assert(p->ob_refcnt >= REFCNT_FROM_PYPY);
    +
    +#ifdef TEST_BOEHM_RAWREFCOUNT
    +            printf("plist[%d].gcenc: %p ", (int)i, plist[i].gcenc);
    +#endif
    +
    +            if ((plist[i].gcenc & 1) ^ (p->ob_refcnt == REFCNT_FROM_PYPY)) {
    +                /* ob_refcnt > FROM_PYPY: non-zero regular refcnt, 
    +                   the gc obj must stay alive.  decode gcenc.
    +                   ---OR---
    +                   ob_refcnt == FROM_PYPY: no refs from C code, the
    +                   gc obj must not (necessarily) stay alive.  encode gcenc.
    +                */
    +                plist[i].gcenc = ~plist[i].gcenc;
    +            }
    +            gcenc_union |= plist[i].gcenc;
    +#ifdef TEST_BOEHM_RAWREFCOUNT
    +            printf("-> %p\n", plist[i].gcenc);
    +#endif
    +    }
    +        plist = plist[0].next_in_bucket;
    +    }
    +    if (gcenc_union & 1)   /* if there is at least one item potentially dead */
    +        hash_list_walk_next = hash_mask_bucket;
    +}
    diff --git a/rpython/rlib/src/boehm-rawrefcount.h b/rpython/rlib/src/boehm-rawrefcount.h
    new file mode 100644
    --- /dev/null
    +++ b/rpython/rlib/src/boehm-rawrefcount.h
    @@ -0,0 +1,24 @@
    +
    +/* Missing:
    +   OP_GC_RAWREFCOUNT_INIT(callback, r): the callback is not supported here
    +   OP_GC_RAWREFCOUNT_CREATE_LINK_PYOBJ(): not implemented, maybe not needed
    +*/
    +
    +#define OP_GC_RAWREFCOUNT_CREATE_LINK_PYPY(gcobj, pyobj, r)   \
    +    gc_rawrefcount_create_link_pypy(gcobj, pyobj)
    +
    +#define OP_GC_RAWREFCOUNT_FROM_OBJ(gcobj, r)   \
    +    r = gc_rawrefcount_from_obj(gcobj)
    +
    +#define OP_GC_RAWREFCOUNT_TO_OBJ(pyobj, r)   \
    +    r = gc_rawrefcount_to_obj(pyobj)
    +
    +#define OP_GC_RAWREFCOUNT_NEXT_DEAD(r)   \
    +    r = gc_rawrefcount_next_dead()
    +
    +
    +RPY_EXTERN void gc_rawrefcount_create_link_pypy(/*gcobj_t*/void *gcobj, 
    +                                                /*pyobj_t*/void *pyobj);
    +RPY_EXTERN /*pyobj_t*/void *gc_rawrefcount_from_obj(/*gcobj_t*/void *gcobj);
    +RPY_EXTERN /*gcobj_t*/void *gc_rawrefcount_to_obj(/*pyobj_t*/void *pyobj);
    +RPY_EXTERN /*pyobj_t*/void *gc_rawrefcount_next_dead(void);
    diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py
    --- a/rpython/rlib/test/test_rawrefcount.py
    +++ b/rpython/rlib/test/test_rawrefcount.py
    @@ -266,3 +266,52 @@
             t, cbuilder = self.compile(entry_point)
             data = cbuilder.cmdexec('hi there')
             assert data.startswith('OK!\n')
    +
    +
    +class TestBoehmTranslated(StandaloneTests):
    +
    +    def test_full_translation(self):
    +
    +        def make_ob():
    +            p = W_Root(42)
    +            ob = lltype.malloc(PyObjectS, flavor='raw', zero=True)
    +            rawrefcount.create_link_pypy(p, ob)
    +            ob.c_ob_refcnt += REFCNT_FROM_PYPY
    +            assert rawrefcount.from_obj(PyObject, p) == ob
    +            assert rawrefcount.to_obj(W_Root, ob) == p
    +            return ob
    +
    +        prebuilt_p = W_Root(-42)
    +        prebuilt_ob = lltype.malloc(PyObjectS, flavor='raw', zero=True,
    +                                    immortal=True)
    +
    +        def entry_point(argv):
    +            rawrefcount.create_link_pypy(prebuilt_p, prebuilt_ob)
    +            prebuilt_ob.c_ob_refcnt += REFCNT_FROM_PYPY
    +            oblist = [make_ob() for i in range(50)]
    +            rgc.collect()
    +            deadlist = []
    +            while True:
    +                ob = rawrefcount.next_dead(PyObject)
    +                if not ob: break
    +                deadlist.append(ob)
    +            if len(deadlist) == 0:
    +                print "no dead object"
    +                return 1
    +            if len(deadlist) < 30:
    +                print "not enough dead objects"
    +                return 1
    +            for ob in deadlist:
    +                if ob not in oblist:
    +                    print "unexpected value for dead pointer"
    +                    return 1
    +                oblist.remove(ob)
    +            print "OK!"
    +            lltype.free(ob, flavor='raw')
    +            return 0
    +
    +        self.config = get_combined_translation_config(translating=True)
    +        self.config.translation.gc = "boehm"
    +        t, cbuilder = self.compile(entry_point)
    +        data = cbuilder.cmdexec('hi there')
    +        assert data.startswith('OK!\n')
    diff --git a/rpython/rlib/test/test_rawrefcount_boehm.py b/rpython/rlib/test/test_rawrefcount_boehm.py
    new file mode 100644
    --- /dev/null
    +++ b/rpython/rlib/test/test_rawrefcount_boehm.py
    @@ -0,0 +1,230 @@
    +import itertools, os, subprocess
    +from hypothesis import given, strategies
    +from rpython.tool.udir import udir
    +
    +
    +TEST_CODE = r"""
    +#define TEST_BOEHM_RAWREFCOUNT
    +#include "boehm-rawrefcount.c"
    +
    +static gcobj_t *alloc_gcobj(void)   /* for tests */
    +{
    +    gcobj_t *g = GC_MALLOC(1000);
    +    printf("gc obj: %p\n", g);
    +    return g;
    +}
    +
    +static pyobj_t *alloc_pyobj(void)   /* for tests */
    +{
    +    pyobj_t *p = malloc(1000);
    +    p->ob_refcnt = 1;
    +    p->ob_pypy_link = 0;
    +    printf("py obj: %p\n", p);
    +    return p;
    +}
    +
    +static void decref(pyobj_t *p)      /* for tests */
    +{
    +    p->ob_refcnt--;
    +    if (p->ob_refcnt == 0) {
    +        printf("decref to zero: %p\n", p);
    +        free(p);
    +    }
    +    assert(p->ob_refcnt >= REFCNT_FROM_PYPY ||
    +           p->ob_refcnt < REFCNT_FROM_PYPY * 0.99);
    +}
    +
    +void run_test(void);     /* forward declaration, produced by the test */
    +
    +int main(void)
    +{
    +    run_test();
    +    while (gc_rawrefcount_next_dead() != NULL)
    +        ;
    +    return 0;
    +}
    +"""
    +
    +
    +operations = strategies.sampled_from([
    +    'new_pyobj',
    +    'new_gcobj',
    +    'create_link',
    +    'from_obj',
    +    'to_obj',
    +    'forget_pyobj',
    +    'forget_gcobj',
    +    'collect',
    +    'dead',
    +    ])
    +
    +
    + at strategies.composite
    +def make_code(draw):
    +    code = []
    +    pyobjs = []
    +    gcobjs = []
    +    num_gcobj = itertools.count()
    +    num_pyobj = itertools.count()
    +    links_g2p = {}
    +    links_p2g = {}
    +
    +    def new_gcobj():
    +        varname = 'g%d' % next(num_gcobj)
    +        code.append('gcobj_t *volatile %s = alloc_gcobj();' % varname)
    +        gcobjs.append(varname)
    +        return varname
    +
    +    def new_pyobj():
    +        varname = 'p%d' % next(num_pyobj)
    +        code.append('pyobj_t *%s = alloc_pyobj();' % varname)
    +        pyobjs.append(varname)
    +        return varname
    +
    +    for op in draw(strategies.lists(operations, average_size=250)):
    +        if op == 'new_gcobj':
    +            new_gcobj()
    +        elif op == 'new_pyobj':
    +            new_pyobj()
    +        elif op == 'create_link':
    +            gvars = [varname for varname in gcobjs if varname not in links_g2p]
    +            if gvars == []:
    +                gvars.append(new_gcobj())
    +            pvars = [varname for varname in pyobjs if varname not in links_p2g]
    +            if pvars == []:
    +                pvars.append(new_pyobj())
    +            gvar = draw(strategies.sampled_from(gvars))
    +            pvar = draw(strategies.sampled_from(pvars))
    +            code.append(r'printf("create_link %%p-%%p\n", %s, %s); '
    +                            % (gvar, pvar) +
    +                        "%s->ob_refcnt += REFCNT_FROM_PYPY; " % pvar +
    +                        "gc_rawrefcount_create_link_pypy(%s, %s);"
    +                            % (gvar, pvar))
    +            links_g2p[gvar] = pvar
    +            links_p2g[pvar] = gvar
    +        elif op == 'from_obj':
    +            if gcobjs:
    +                prnt = False
    +                gvar = draw(strategies.sampled_from(gcobjs))
    +                if gvar not in links_g2p:
    +                    check = "== NULL"
    +                elif links_g2p[gvar] in pyobjs:
    +                    check = "== %s" % (links_g2p[gvar],)
    +                else:
    +                    check = "!= NULL"
    +                    prnt = True
    +                code.append("assert(gc_rawrefcount_from_obj(%s) %s);"
    +                            % (gvar, check))
    +                if prnt:
    +                    code.append(r'printf("link %%p-%%p\n", %s, '
    +                        'gc_rawrefcount_from_obj(%s));' % (gvar, gvar))
    +        elif op == 'to_obj':
    +            if pyobjs:
    +                prnt = False
    +                pvar = draw(strategies.sampled_from(pyobjs))
    +                if pvar not in links_p2g:
    +                    check = "== NULL"
    +                elif links_p2g[pvar] in gcobjs:
    +                    check = "== %s" % (links_p2g[pvar],)
    +                else:
    +                    check = "!= NULL"
    +                    prnt = True
    +                code.append("assert(gc_rawrefcount_to_obj(%s) %s);"
    +                            % (pvar, check))
    +                if prnt:
    +                    code.append(r'printf("link %%p-%%p\n", '
    +                        'gc_rawrefcount_to_obj(%s), %s);' % (pvar, pvar))
    +        elif op == 'forget_pyobj':
    +            if pyobjs:
    +                index = draw(strategies.sampled_from(range(len(pyobjs))))
    +                pvar = pyobjs.pop(index)
    +                code.append(r'printf("-p%%p\n", %s); ' % pvar +
    +                            "decref(%s); %s = NULL;" % (pvar, pvar))
    +        elif op == 'forget_gcobj':
    +            if gcobjs:
    +                index = draw(strategies.sampled_from(range(len(gcobjs))))
    +                gvar = gcobjs.pop(index)
    +                code.append(r'printf("-g%%p\n", %s); ' % gvar +
    +                            "%s = NULL;" % (gvar,))
    +        elif op == 'collect':
    +            code.append("GC_gcollect();")
    +        elif op == 'dead':
    +            code.append('gc_rawrefcount_next_dead();')
    +        else:
    +            assert False, op
    +
    +    return '\n'.join(code)
    +
    +
    + at given(make_code())
    +def test_random(code):
    +    filename = str(udir.join("test-rawrefcount-boehm.c"))
    +    with open(filename, "w") as f:
    +        print >> f, TEST_CODE
    +        print >> f, 'void run_test(void) {'
    +        print >> f, code
    +        print >> f, '}'
    +
    +    srcdir = os.path.dirname(os.path.dirname(
    +        os.path.abspath(os.path.join(__file__))))
    +    srcdir = os.path.join(srcdir, 'src')
    +
    +    err = os.system("cd '%s' && gcc -Werror -lgc -I%s -o test-rawrefcount-boehm"
    +                    " test-rawrefcount-boehm.c" % (udir, srcdir))
    +    assert err == 0
    +    p = subprocess.Popen("./test-rawrefcount-boehm", stdout=subprocess.PIPE,
    +                         cwd=str(udir))
    +    stdout, _ = p.communicate()
    +    assert p.wait() == 0
    +
    +    gcobjs = {}
    +    pyobjs = {}
    +    links_p2g = {}
    +    links_g2p = {}
    +    for line in stdout.splitlines():
    +        if line.startswith('py obj: '):
    +            p = line[8:]
    +            assert not pyobjs.get(p)
    +            pyobjs[p] = True
    +            assert p not in links_p2g
    +        elif line.startswith('gc obj: '):
    +            g = line[8:]
    +            assert not gcobjs.get(g)
    +            gcobjs[g] = True
    +            if g in links_g2p: del links_g2p[g]
    +        elif line.startswith('-p'):
    +            p = line[2:]
    +            assert pyobjs[p] == True
    +            pyobjs[p] = False
    +        elif line.startswith('-g'):
    +            g = line[2:]
    +            assert gcobjs[g] == True
    +            gcobjs[g] = False
    +        elif line.startswith('decref to zero: '):
    +            p = line[16:]
    +            assert pyobjs[p] == False
    +            assert p not in links_p2g
    +            del pyobjs[p]
    +        elif line.startswith('create_link '):
    +            g, p = line[12:].split('-')
    +            assert g in gcobjs
    +            assert p in pyobjs
    +            assert g not in links_g2p
    +            assert p not in links_p2g
    +            links_g2p[g] = p
    +            links_p2g[p] = g
    +        elif line.startswith('link '):
    +            g, p = line[5:].split('-')
    +            assert g in gcobjs
    +            assert p in pyobjs
    +            assert links_g2p[g] == p
    +            assert links_p2g[p] == g
    +        elif line.startswith('plist['):
    +            pass
    +        elif line.startswith('next_dead: '):
    +            p = line[11:]
    +            assert pyobjs[p] == False
    +            del pyobjs[p]
    +            del links_p2g[p]
    +        else:
    +            assert False, repr(line)
    diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py
    --- a/rpython/rtyper/lltypesystem/lloperation.py
    +++ b/rpython/rtyper/lltypesystem/lloperation.py
    @@ -497,6 +497,7 @@
         'gc_rawrefcount_create_link_pyobj': LLOp(),
         'gc_rawrefcount_from_obj':          LLOp(sideeffects=False),
         'gc_rawrefcount_to_obj':            LLOp(sideeffects=False),
    +    'gc_rawrefcount_next_dead':         LLOp(),
     
         # ------- JIT & GC interaction, only for some GCs ----------
     
    
    From pypy.commits at gmail.com  Wed Sep  7 08:44:49 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 05:44:49 -0700 (PDT)
    Subject: [pypy-commit] pypy reverse-debugger: hg merge default
    Message-ID: <57d00bc1.e97ac20a.1eabd.3260@mx.google.com>
    
    Author: Armin Rigo 
    Branch: reverse-debugger
    Changeset: r86927:b13abdc2e1b3
    Date: 2016-09-07 14:43 +0200
    http://bitbucket.org/pypy/pypy/changeset/b13abdc2e1b3/
    
    Log:	hg merge default
    
    diff too long, truncating to 2000 out of 212749 lines
    
    diff --git a/.hgtags b/.hgtags
    --- a/.hgtags
    +++ b/.hgtags
    @@ -27,3 +27,9 @@
     40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2
     c09c19272c990a0611b17569a0085ad1ab00c8ff release-pypy2.7-v5.3
     7e8df3df96417c16c2d55b41352ec82c9c69c978 release-pypy2.7-v5.3.1
    +68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0
    +68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0
    +77392ad263504df011ccfcabf6a62e21d04086d0 release-pypy2.7-v5.4.0
    +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1
    +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1
    +0e2d9a73f5a1818d0245d75daccdbe21b2d5c3ef release-pypy2.7-v5.4.1
    diff --git a/LICENSE b/LICENSE
    --- a/LICENSE
    +++ b/LICENSE
    @@ -74,6 +74,7 @@
       Seo Sanghyeon
       Ronny Pfannschmidt
       Justin Peel
    +  Raffael Tfirst
       David Edelsohn
       Anders Hammarquist
       Jakub Gustak
    @@ -117,7 +118,6 @@
       Wenzhu Man
       John Witulski
       Laurence Tratt
    -  Raffael Tfirst
       Ivan Sichmann Freitas
       Greg Price
       Dario Bertini
    @@ -141,6 +141,7 @@
       tav
       Taavi Burns
       Georg Brandl
    +  Nicolas Truessel
       Bert Freudenberg
       Stian Andreassen
       Wanja Saatkamp
    @@ -211,6 +212,7 @@
       Vaibhav Sood
       Alan McIntyre
       Alexander Sedov
    +  p_zieschang at yahoo.de
       Attila Gobi
       Jasper.Schulz
       Christopher Pope
    @@ -221,6 +223,7 @@
       Arjun Naik
       Valentina Mukhamedzhanova
       Stefano Parmesan
    +  touilleMan
       Alexis Daboville
       Jens-Uwe Mager
       Carl Meyer
    @@ -229,12 +232,14 @@
       Gabriel
       Lukas Vacek
       Kunal Grover
    +  Aaron Gallagher
       Andrew Dalke
       Sylvain Thenault
       Jakub Stasiak
       Nathan Taylor
       Vladimir Kryachko
       Omer Katz
    +  Mark Williams
       Jacek Generowicz
       Alejandro J. Cura
       Jacob Oscarson
    @@ -355,12 +360,15 @@
       yasirs
       Michael Chermside
       Anna Ravencroft
    +  pizi
       Andrey Churin
       Dan Crosta
    +  Eli Stevens
       Tobias Diaz
       Julien Phalip
       Roman Podoliaka
       Dan Loewenherz
    +  werat
     
       Heinrich-Heine University, Germany 
       Open End AB (formerly AB Strakt), Sweden
    diff --git a/_pytest/python.py b/_pytest/python.py
    --- a/_pytest/python.py
    +++ b/_pytest/python.py
    @@ -498,7 +498,10 @@
         """ Collector for test methods. """
         def collect(self):
             if hasinit(self.obj):
    -            pytest.skip("class %s.%s with __init__ won't get collected" % (
    +            # XXX used to be skip(), but silently skipping classes
    +            # XXX just because they have been written long ago is
    +            # XXX imho a very, very, very bad idea
    +            pytest.fail("class %s.%s with __init__ won't get collected" % (
                     self.obj.__module__,
                     self.obj.__name__,
                 ))
    diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py
    --- a/lib-python/2.7/distutils/sysconfig_pypy.py
    +++ b/lib-python/2.7/distutils/sysconfig_pypy.py
    @@ -122,22 +122,24 @@
         """Dummy method to let some easy_install packages that have
         optional C speedup components.
         """
    +    def customize(executable, flags):
    +        command = compiler.executables[executable] + flags
    +        setattr(compiler, executable, command)
    +
         if compiler.compiler_type == "unix":
             compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit'])
             compiler.shared_lib_extension = get_config_var('SO')
             if "CPPFLAGS" in os.environ:
                 cppflags = shlex.split(os.environ["CPPFLAGS"])
    -            compiler.compiler.extend(cppflags)
    -            compiler.compiler_so.extend(cppflags)
    -            compiler.linker_so.extend(cppflags)
    +            for executable in ('compiler', 'compiler_so', 'linker_so'):
    +                customize(executable, cppflags)
             if "CFLAGS" in os.environ:
                 cflags = shlex.split(os.environ["CFLAGS"])
    -            compiler.compiler.extend(cflags)
    -            compiler.compiler_so.extend(cflags)
    -            compiler.linker_so.extend(cflags)
    +            for executable in ('compiler', 'compiler_so', 'linker_so'):
    +                customize(executable, cflags)
             if "LDFLAGS" in os.environ:
                 ldflags = shlex.split(os.environ["LDFLAGS"])
    -            compiler.linker_so.extend(ldflags)
    +            customize('linker_so', ldflags)
     
     
     from sysconfig_cpython import (
    diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py
    --- a/lib_pypy/_ctypes/function.py
    +++ b/lib_pypy/_ctypes/function.py
    @@ -342,7 +342,7 @@
                 thisarg = cast(thisvalue, POINTER(POINTER(c_void_p)))
                 keepalives, newargs, argtypes, outargs, errcheckargs = (
                     self._convert_args(argtypes, args[1:], kwargs))
    -            newargs.insert(0, thisvalue.value)
    +            newargs.insert(0, thisarg)
                 argtypes.insert(0, c_void_p)
             else:
                 thisarg = None
    diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
    --- a/lib_pypy/cffi.egg-info/PKG-INFO
    +++ b/lib_pypy/cffi.egg-info/PKG-INFO
    @@ -1,6 +1,6 @@
     Metadata-Version: 1.1
     Name: cffi
    -Version: 1.8.0
    +Version: 1.8.2
     Summary: Foreign Function Interface for Python calling C code.
     Home-page: http://cffi.readthedocs.org
     Author: Armin Rigo, Maciej Fijalkowski
    diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
    --- a/lib_pypy/cffi/__init__.py
    +++ b/lib_pypy/cffi/__init__.py
    @@ -4,8 +4,8 @@
     from .api import FFI, CDefError, FFIError
     from .ffiplatform import VerificationError, VerificationMissing
     
    -__version__ = "1.8.0"
    -__version_info__ = (1, 8, 0)
    +__version__ = "1.8.2"
    +__version_info__ = (1, 8, 2)
     
     # The verifier module file names are based on the CRC32 of a string that
     # contains the following version number.  It may be older than __version__
    diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
    --- a/lib_pypy/cffi/_cffi_include.h
    +++ b/lib_pypy/cffi/_cffi_include.h
    @@ -1,4 +1,20 @@
     #define _CFFI_
    +
    +/* We try to define Py_LIMITED_API before including Python.h.
    +
    +   Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and
    +   Py_REF_DEBUG are not defined.  This is a best-effort approximation:
    +   we can learn about Py_DEBUG from pyconfig.h, but it is unclear if
    +   the same works for the other two macros.  Py_DEBUG implies them,
    +   but not the other way around.
    +*/
    +#ifndef _CFFI_USE_EMBEDDING
    +#  include 
    +#  if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG)
    +#    define Py_LIMITED_API
    +#  endif
    +#endif
    +
     #include 
     #ifdef __cplusplus
     extern "C" {
    diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
    --- a/lib_pypy/cffi/_embedding.h
    +++ b/lib_pypy/cffi/_embedding.h
    @@ -233,7 +233,7 @@
             f = PySys_GetObject((char *)"stderr");
             if (f != NULL && f != Py_None) {
                 PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
    -                               "\ncompiled with cffi version: 1.8.0"
    +                               "\ncompiled with cffi version: 1.8.2"
                                    "\n_cffi_backend module: ", f);
                 modules = PyImport_GetModuleDict();
                 mod = PyDict_GetItemString(modules, "_cffi_backend");
    diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
    --- a/lib_pypy/cffi/api.py
    +++ b/lib_pypy/cffi/api.py
    @@ -652,7 +652,7 @@
             recompile(self, module_name, source,
                       c_file=filename, call_c_compiler=False, **kwds)
     
    -    def compile(self, tmpdir='.', verbose=0, target=None):
    +    def compile(self, tmpdir='.', verbose=0, target=None, debug=None):
             """The 'target' argument gives the final file name of the
             compiled DLL.  Use '*' to force distutils' choice, suitable for
             regular CPython C API modules.  Use a file name ending in '.*'
    @@ -669,7 +669,7 @@
             module_name, source, source_extension, kwds = self._assigned_source
             return recompile(self, module_name, source, tmpdir=tmpdir,
                              target=target, source_extension=source_extension,
    -                         compiler_verbose=verbose, **kwds)
    +                         compiler_verbose=verbose, debug=debug, **kwds)
     
         def init_once(self, func, tag):
             # Read _init_once_cache[tag], which is either (False, lock) if
    diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
    --- a/lib_pypy/cffi/backend_ctypes.py
    +++ b/lib_pypy/cffi/backend_ctypes.py
    @@ -997,29 +997,43 @@
             assert onerror is None   # XXX not implemented
             return BType(source, error)
     
    +    _weakref_cache_ref = None
    +
         def gcp(self, cdata, destructor):
    -        BType = self.typeof(cdata)
    +        if self._weakref_cache_ref is None:
    +            import weakref
    +            class MyRef(weakref.ref):
    +                def __eq__(self, other):
    +                    myref = self()
    +                    return self is other or (
    +                        myref is not None and myref is other())
    +                def __ne__(self, other):
    +                    return not (self == other)
    +                def __hash__(self):
    +                    try:
    +                        return self._hash
    +                    except AttributeError:
    +                        self._hash = hash(self())
    +                        return self._hash
    +            self._weakref_cache_ref = {}, MyRef
    +        weak_cache, MyRef = self._weakref_cache_ref
     
             if destructor is None:
    -            if not (hasattr(BType, '_gcp_type') and
    -                    BType._gcp_type is BType):
    +            try:
    +                del weak_cache[MyRef(cdata)]
    +            except KeyError:
                     raise TypeError("Can remove destructor only on a object "
                                     "previously returned by ffi.gc()")
    -            cdata._destructor = None
                 return None
     
    -        try:
    -            gcp_type = BType._gcp_type
    -        except AttributeError:
    -            class CTypesDataGcp(BType):
    -                __slots__ = ['_orig', '_destructor']
    -                def __del__(self):
    -                    if self._destructor is not None:
    -                        self._destructor(self._orig)
    -            gcp_type = BType._gcp_type = CTypesDataGcp
    -        new_cdata = self.cast(gcp_type, cdata)
    -        new_cdata._orig = cdata
    -        new_cdata._destructor = destructor
    +        def remove(k):
    +            cdata, destructor = weak_cache.pop(k, (None, None))
    +            if destructor is not None:
    +                destructor(cdata)
    +
    +        new_cdata = self.cast(self.typeof(cdata), cdata)
    +        assert new_cdata is not cdata
    +        weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor)
             return new_cdata
     
         typeof = type
    diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py
    --- a/lib_pypy/cffi/ffiplatform.py
    +++ b/lib_pypy/cffi/ffiplatform.py
    @@ -21,12 +21,12 @@
             allsources.append(os.path.normpath(src))
         return Extension(name=modname, sources=allsources, **kwds)
     
    -def compile(tmpdir, ext, compiler_verbose=0):
    +def compile(tmpdir, ext, compiler_verbose=0, debug=None):
         """Compile a C extension module using distutils."""
     
         saved_environ = os.environ.copy()
         try:
    -        outputfilename = _build(tmpdir, ext, compiler_verbose)
    +        outputfilename = _build(tmpdir, ext, compiler_verbose, debug)
             outputfilename = os.path.abspath(outputfilename)
         finally:
             # workaround for a distutils bugs where some env vars can
    @@ -36,7 +36,7 @@
                     os.environ[key] = value
         return outputfilename
     
    -def _build(tmpdir, ext, compiler_verbose=0):
    +def _build(tmpdir, ext, compiler_verbose=0, debug=None):
         # XXX compact but horrible :-(
         from distutils.core import Distribution
         import distutils.errors, distutils.log
    @@ -44,6 +44,9 @@
         dist = Distribution({'ext_modules': [ext]})
         dist.parse_config_files()
         options = dist.get_option_dict('build_ext')
    +    if debug is None:
    +        debug = sys.flags.debug
    +    options['debug'] = ('ffiplatform', debug)
         options['force'] = ('ffiplatform', True)
         options['build_lib'] = ('ffiplatform', tmpdir)
         options['build_temp'] = ('ffiplatform', tmpdir)
    diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
    --- a/lib_pypy/cffi/recompiler.py
    +++ b/lib_pypy/cffi/recompiler.py
    @@ -275,8 +275,8 @@
         def write_c_source_to_f(self, f, preamble):
             self._f = f
             prnt = self._prnt
    -        if self.ffi._embedding is None:
    -            prnt('#define Py_LIMITED_API')
    +        if self.ffi._embedding is not None:
    +            prnt('#define _CFFI_USE_EMBEDDING')
             #
             # first the '#include' (actually done by inlining the file's content)
             lines = self._rel_readlines('_cffi_include.h')
    @@ -1431,7 +1431,7 @@
     
     def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True,
                   c_file=None, source_extension='.c', extradir=None,
    -              compiler_verbose=1, target=None, **kwds):
    +              compiler_verbose=1, target=None, debug=None, **kwds):
         if not isinstance(module_name, str):
             module_name = module_name.encode('ascii')
         if ffi._windows_unicode:
    @@ -1467,7 +1467,8 @@
                     if target != '*':
                         _patch_for_target(patchlist, target)
                     os.chdir(tmpdir)
    -                outputfilename = ffiplatform.compile('.', ext, compiler_verbose)
    +                outputfilename = ffiplatform.compile('.', ext,
    +                                                     compiler_verbose, debug)
                 finally:
                     os.chdir(cwd)
                     _unpatch_meths(patchlist)
    diff --git a/lib_pypy/cffi/setuptools_ext.py b/lib_pypy/cffi/setuptools_ext.py
    --- a/lib_pypy/cffi/setuptools_ext.py
    +++ b/lib_pypy/cffi/setuptools_ext.py
    @@ -69,16 +69,36 @@
         else:
             _add_c_module(dist, ffi, module_name, source, source_extension, kwds)
     
    +def _set_py_limited_api(Extension, kwds):
    +    """
    +    Add py_limited_api to kwds if setuptools >= 26 is in use.
    +    Do not alter the setting if it already exists.
    +    Setuptools takes care of ignoring the flag on Python 2 and PyPy.
    +    """
    +    if 'py_limited_api' not in kwds:
    +        import setuptools
    +        try:
    +            setuptools_major_version = int(setuptools.__version__.partition('.')[0])
    +            if setuptools_major_version >= 26:
    +                kwds['py_limited_api'] = True
    +        except ValueError:  # certain development versions of setuptools
    +            # If we don't know the version number of setuptools, we
    +            # try to set 'py_limited_api' anyway.  At worst, we get a
    +            # warning.
    +            kwds['py_limited_api'] = True
    +    return kwds
     
     def _add_c_module(dist, ffi, module_name, source, source_extension, kwds):
         from distutils.core import Extension
    -    from distutils.command.build_ext import build_ext
    +    # We are a setuptools extension. Need this build_ext for py_limited_api.
    +    from setuptools.command.build_ext import build_ext
         from distutils.dir_util import mkpath
         from distutils import log
         from cffi import recompiler
     
         allsources = ['$PLACEHOLDER']
         allsources.extend(kwds.pop('sources', []))
    +    kwds = _set_py_limited_api(Extension, kwds)
         ext = Extension(name=module_name, sources=allsources, **kwds)
     
         def make_mod(tmpdir, pre_run=None):
    diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
    --- a/pypy/doc/contributor.rst
    +++ b/pypy/doc/contributor.rst
    @@ -44,6 +44,7 @@
       Seo Sanghyeon
       Ronny Pfannschmidt
       Justin Peel
    +  Raffael Tfirst
       David Edelsohn
       Anders Hammarquist
       Jakub Gustak
    @@ -87,7 +88,6 @@
       Wenzhu Man
       John Witulski
       Laurence Tratt
    -  Raffael Tfirst
       Ivan Sichmann Freitas
       Greg Price
       Dario Bertini
    @@ -111,6 +111,7 @@
       tav
       Taavi Burns
       Georg Brandl
    +  Nicolas Truessel
       Bert Freudenberg
       Stian Andreassen
       Wanja Saatkamp
    @@ -181,6 +182,7 @@
       Vaibhav Sood
       Alan McIntyre
       Alexander Sedov
    +  p_zieschang at yahoo.de
       Attila Gobi
       Jasper.Schulz
       Christopher Pope
    @@ -191,6 +193,7 @@
       Arjun Naik
       Valentina Mukhamedzhanova
       Stefano Parmesan
    +  touilleMan
       Alexis Daboville
       Jens-Uwe Mager
       Carl Meyer
    @@ -199,12 +202,14 @@
       Gabriel
       Lukas Vacek
       Kunal Grover
    +  Aaron Gallagher
       Andrew Dalke
       Sylvain Thenault
       Jakub Stasiak
       Nathan Taylor
       Vladimir Kryachko
       Omer Katz
    +  Mark Williams
       Jacek Generowicz
       Alejandro J. Cura
       Jacob Oscarson
    @@ -325,9 +330,12 @@
       yasirs
       Michael Chermside
       Anna Ravencroft
    +  pizi
       Andrey Churin
       Dan Crosta
    +  Eli Stevens
       Tobias Diaz
       Julien Phalip
       Roman Podoliaka
       Dan Loewenherz
    +  werat
    diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
    --- a/pypy/doc/index-of-release-notes.rst
    +++ b/pypy/doc/index-of-release-notes.rst
    @@ -6,6 +6,7 @@
     
     .. toctree::
     
    +   release-pypy2.7-v5.4.1.rst
        release-pypy2.7-v5.4.0.rst
        release-pypy2.7-v5.3.1.rst
        release-pypy2.7-v5.3.0.rst
    diff --git a/pypy/doc/release-pypy2.7-v5.4.0.rst b/pypy/doc/release-pypy2.7-v5.4.0.rst
    --- a/pypy/doc/release-pypy2.7-v5.4.0.rst
    +++ b/pypy/doc/release-pypy2.7-v5.4.0.rst
    @@ -3,7 +3,8 @@
     ============
     
     We have released PyPy2.7 v5.4, a little under two months after PyPy2.7 v5.3.
    -This new PyPy2.7 release includes further improvements to our C-API compatability layer (cpyext), enabling us to pass over 99% of the upstream
    +This new PyPy2.7 release includes incremental improvements to our C-API
    +compatability layer (cpyext), enabling us to pass over 99% of the upstream
     numpy `test suite`_. We updated built-in cffi_ support to version 1.8,
     which now supports the "limited API" mode for c-extensions on 
     CPython >=3.2.
    @@ -12,9 +13,7 @@
     support to OpenBSD and Dragon Fly BSD
     
     As always, this release fixed many issues and bugs raised by the
    -growing community of PyPy users. 
    -
    -XXXXX MORE ???
    +growing community of PyPy users. We strongly recommend updating.
     
     You can download the PyPy2.7 v5.4 release here:
     
    @@ -110,8 +109,8 @@
     
       * (RPython) add `rposix_scandir` portably, needed for Python 3.5
     
    -  * Support for memoryview attributes (format, itemsize, ...) which also
    -    adds support for `PyMemoryView_FromObject`
    +  * Increased but incomplete support for memoryview attributes (format, 
    +    itemsize, ...) which also adds support for `PyMemoryView_FromObject`
     
     * Bug Fixes
     
    @@ -153,10 +152,6 @@
       * Make `hash(-1)` return -2, as CPython does, and fix all the
         ancilary places this matters
     
    -  * Issues reported with our previous release were resolved_ after
    -    reports from users on our issue tracker at
    -    https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy
    -
       * Fix `PyNumber_Check()` to behave more like CPython
     
       * (VMProf) Try hard to not miss any Python-level frame in the
    @@ -169,6 +164,10 @@
       * Fix the mapdict cache for subclasses of builtin types that
         provide a dict
     
    +  * Issues reported with our previous release were resolved_ after
    +    reports from users on our issue tracker at
    +    https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy
    +
     * Performance improvements:
     
       * Add a before_call()-like equivalent before a few operations like
    diff --git a/pypy/doc/release-pypy2.7-v5.4.1.rst b/pypy/doc/release-pypy2.7-v5.4.1.rst
    new file mode 100644
    --- /dev/null
    +++ b/pypy/doc/release-pypy2.7-v5.4.1.rst
    @@ -0,0 +1,64 @@
    +==========
    +PyPy 5.4.1
    +==========
    +
    +We have released a bugfix for PyPy2.7-v5.4.0, released last week,
    +due to the following issues:
    +
    +  * Update list of contributors in documentation and LICENSE file,
    +    this was unfortunately left out of 5.4.0. My apologies to the new
    +    contributors
    +
    +  * Allow tests run with `-A` to find `libm.so` even if it is a script not a
    +    dynamically loadable file
    +
    +  * Bump `sys.setrecursionlimit()` when translating PyPy, for translating with CPython
    +
    +  * Tweak a float comparison with 0 in `backendopt.inline` to avoid rounding errors
    +
    +  * Fix for an issue where os.access() accepted a float for mode
    +
    +  * Fix for and issue where `unicode.decode('utf8', 'custom_replace')` messed up
    +    the last byte of a unicode string sometimes
    +
    +  * Update built-in cffi_ to the soon-to-be-released 1.8.2 version
    +
    +  * Explicitly detect that we found as-yet-unsupported OpenSSL 1.1, and crash
    +    translation with a message asking for help porting it
    +
    +  * Fix a regression where a PyBytesObject was forced (converted to a RPython
    +    object) when not required, reported as issue #2395
    +
    +Thanks to those who reported the issues.
    +
    +What is PyPy?
    +=============
    +
    +PyPy is a very compliant Python interpreter, almost a drop-in replacement for
    +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
    +due to its integrated tracing JIT compiler.
    +
    +We also welcome developers of other
    +`dynamic languages`_ to see what RPython can do for them.
    +
    +This release supports:
    +
    +  * **x86** machines on most common operating systems
    +    (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
    +
    +  * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
    +
    +  * big- and little-endian variants of **PPC64** running Linux,
    +
    +  * **s390x** running Linux
    +
    +.. _cffi: https://cffi.readthedocs.io
    +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
    +.. _`dynamic languages`: http://pypyjs.org
    +
    +Please update, and continue to help us make PyPy better.
    +
    +Cheers
    +
    +The PyPy Team
    +
    diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
    --- a/pypy/doc/whatsnew-head.rst
    +++ b/pypy/doc/whatsnew-head.rst
    @@ -5,4 +5,11 @@
     .. this is a revision shortly after release-pypy2.7-v5.4
     .. startrev: 522736f816dc
     
    +.. branch: rpython-resync
    +Backport rpython changes made directly on the py3k and py3.5 branches.
     
    +.. branch: buffer-interface
    +Implement PyObject_GetBuffer, PyMemoryView_GET_BUFFER, and handles memoryviews
    +in numpypy
    +
    +
    diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
    --- a/pypy/interpreter/baseobjspace.py
    +++ b/pypy/interpreter/baseobjspace.py
    @@ -1454,6 +1454,9 @@
         BUF_FORMAT   = 0x0004
         BUF_ND       = 0x0008
         BUF_STRIDES  = 0x0010 | BUF_ND
    +    BUF_C_CONTIGUOUS = 0x0020 | BUF_STRIDES
    +    BUF_F_CONTIGUOUS = 0x0040 | BUF_STRIDES
    +    BUF_ANY_CONTIGUOUS = 0x0080 | BUF_STRIDES
         BUF_INDIRECT = 0x0100 | BUF_STRIDES
     
         BUF_CONTIG_RO = BUF_ND
    diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py
    --- a/pypy/module/__builtin__/descriptor.py
    +++ b/pypy/module/__builtin__/descriptor.py
    @@ -23,6 +23,14 @@
             self.w_objtype = w_type
             self.w_self = w_obj_or_type
     
    +    def descr_repr(self, space):
    +        if self.w_objtype is not None:
    +            objtype_name = "<%s object>" % self.w_objtype.getname(space)
    +        else:
    +            objtype_name = 'NULL'
    +        return space.wrap(", %s>" % (
    +            self.w_starttype.getname(space), objtype_name))
    +
         def get(self, space, w_obj, w_type=None):
             if self.w_self is None or space.is_w(w_obj, space.w_None):
                 return self
    @@ -84,7 +92,10 @@
         'super',
         __new__          = generic_new_descr(W_Super),
         __init__         = interp2app(W_Super.descr_init),
    +    __repr__         = interp2app(W_Super.descr_repr),
         __thisclass__    = interp_attrproperty_w("w_starttype", W_Super),
    +    __self__         = interp_attrproperty_w("w_self", W_Super),
    +    __self_class__   = interp_attrproperty_w("w_objtype", W_Super),
         __getattribute__ = interp2app(W_Super.getattribute),
         __get__          = interp2app(W_Super.get),
         __doc__          =     """\
    diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
    --- a/pypy/module/__builtin__/interp_classobj.py
    +++ b/pypy/module/__builtin__/interp_classobj.py
    @@ -38,6 +38,8 @@
     
     
     class W_ClassObject(W_Root):
    +    _immutable_fields_ = ['bases_w?[*]', 'w_dict?']
    +
         def __init__(self, space, w_name, bases, w_dict):
             self.name = space.str_w(w_name)
             make_sure_not_resized(bases)
    @@ -75,6 +77,7 @@
                                 "__bases__ items must be classes")
             self.bases_w = bases_w
     
    +    @jit.unroll_safe
         def is_subclass_of(self, other):
             assert isinstance(other, W_ClassObject)
             if self is other:
    @@ -313,7 +316,7 @@
             # This method ignores the instance dict and the __getattr__.
             # Returns None if not found.
             assert isinstance(name, str)
    -        w_value = self.w_class.lookup(space, name)
    +        w_value = jit.promote(self.w_class).lookup(space, name)
             if w_value is None:
                 return None
             w_descr_get = space.lookup(w_value, '__get__')
    diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py
    --- a/pypy/module/__builtin__/test/test_descriptor.py
    +++ b/pypy/module/__builtin__/test/test_descriptor.py
    @@ -250,6 +250,24 @@
             assert super(B, B()).__thisclass__ is B
             assert super(A, B()).__thisclass__ is A
     
    +    def test_super_self_selfclass(self):
    +        class A(object):
    +            pass
    +        class B(A):
    +            pass
    +        b = B()
    +        assert super(A, b).__self__ is b
    +        assert super(A).__self__ is None
    +        assert super(A, b).__self_class__ is B
    +        assert super(A).__self_class__ is None
    +
    +    def test_super_repr(self):
    +        class A(object):
    +            def __repr__(self):
    +                return super(A, self).__repr__() + '!'
    +        assert repr(A()).endswith('>!')
    +        assert repr(super(A, A())) == ", >"
    +
         def test_property_docstring(self):
             assert property.__doc__.startswith('property')
     
    diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
    --- a/pypy/module/_cffi_backend/__init__.py
    +++ b/pypy/module/_cffi_backend/__init__.py
    @@ -3,7 +3,7 @@
     from rpython.rlib import rdynload, clibffi, entrypoint
     from rpython.rtyper.lltypesystem import rffi
     
    -VERSION = "1.8.0"
    +VERSION = "1.8.2"
     
     FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
     try:
    diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py
    --- a/pypy/module/_cffi_backend/ctypestruct.py
    +++ b/pypy/module/_cffi_backend/ctypestruct.py
    @@ -105,9 +105,6 @@
                     return True
             return False
     
    -    def _check_only_one_argument_for_union(self, w_ob):
    -        pass
    -
         def convert_from_object(self, cdata, w_ob):
             if not self._copy_from_same(cdata, w_ob):
                 self.convert_struct_from_object(cdata, w_ob, optvarsize=-1)
    @@ -117,19 +114,24 @@
         )
         def convert_struct_from_object(self, cdata, w_ob, optvarsize):
             self.force_lazy_struct()
    -        self._check_only_one_argument_for_union(w_ob)
     
             space = self.space
             if (space.isinstance_w(w_ob, space.w_list) or
                 space.isinstance_w(w_ob, space.w_tuple)):
                 lst_w = space.listview(w_ob)
    -            if len(lst_w) > len(self._fields_list):
    -                raise oefmt(space.w_ValueError,
    -                            "too many initializers for '%s' (got %d)",
    -                            self.name, len(lst_w))
    -            for i in range(len(lst_w)):
    -                optvarsize = self._fields_list[i].write_v(cdata, lst_w[i],
    +            j = 0
    +            for w_obj in lst_w:
    +                try:
    +                    while (self._fields_list[j].flags &
    +                               W_CField.BF_IGNORE_IN_CTOR):
    +                        j += 1
    +                except IndexError:
    +                    raise oefmt(space.w_ValueError,
    +                                "too many initializers for '%s' (got %d)",
    +                                self.name, len(lst_w))
    +                optvarsize = self._fields_list[j].write_v(cdata, w_obj,
                                                               optvarsize)
    +                j += 1
                 return optvarsize
     
             elif space.isinstance_w(w_ob, space.w_dict):
    @@ -185,14 +187,6 @@
     class W_CTypeUnion(W_CTypeStructOrUnion):
         kind = "union"
     
    -    def _check_only_one_argument_for_union(self, w_ob):
    -        space = self.space
    -        n = space.int_w(space.len(w_ob))
    -        if n > 1:
    -            raise oefmt(space.w_ValueError,
    -                        "initializer for '%s': %d items given, but only one "
    -                        "supported (use a dict if needed)", self.name, n)
    -
     
     class W_CField(W_Root):
         _immutable_ = True
    @@ -200,18 +194,21 @@
         BS_REGULAR     = -1
         BS_EMPTY_ARRAY = -2
     
    -    def __init__(self, ctype, offset, bitshift, bitsize):
    +    BF_IGNORE_IN_CTOR = 0x01
    +
    +    def __init__(self, ctype, offset, bitshift, bitsize, flags):
             self.ctype = ctype
             self.offset = offset
             self.bitshift = bitshift # >= 0: bitshift; or BS_REGULAR/BS_EMPTY_ARRAY
             self.bitsize = bitsize
    +        self.flags = flags       # BF_xxx
     
         def is_bitfield(self):
             return self.bitshift >= 0
     
    -    def make_shifted(self, offset):
    +    def make_shifted(self, offset, fflags):
             return W_CField(self.ctype, offset + self.offset,
    -                        self.bitshift, self.bitsize)
    +                        self.bitshift, self.bitsize, self.flags | fflags)
     
         def read(self, cdata):
             cdata = rffi.ptradd(cdata, self.offset)
    @@ -341,5 +338,6 @@
         offset = interp_attrproperty('offset', W_CField),
         bitshift = interp_attrproperty('bitshift', W_CField),
         bitsize = interp_attrproperty('bitsize', W_CField),
    +    flags = interp_attrproperty('flags', W_CField),
         )
     W_CField.typedef.acceptable_as_base_class = False
    diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py
    --- a/pypy/module/_cffi_backend/newtype.py
    +++ b/pypy/module/_cffi_backend/newtype.py
    @@ -345,6 +345,11 @@
             if alignment < falign and do_align:
                 alignment = falign
             #
    +        if is_union and i > 0:
    +            fflags = ctypestruct.W_CField.BF_IGNORE_IN_CTOR
    +        else:
    +            fflags = 0
    +        #
             if fbitsize < 0:
                 # not a bitfield: common case
     
    @@ -372,7 +377,7 @@
                     for name, srcfld in ftype._fields_dict.items():
                         srcfield2names[srcfld] = name
                     for srcfld in ftype._fields_list:
    -                    fld = srcfld.make_shifted(boffset // 8)
    +                    fld = srcfld.make_shifted(boffset // 8, fflags)
                         fields_list.append(fld)
                         try:
                             fields_dict[srcfield2names[srcfld]] = fld
    @@ -382,7 +387,8 @@
                     w_ctype._custom_field_pos = True
                 else:
                     # a regular field
    -                fld = ctypestruct.W_CField(ftype, boffset // 8, bs_flag, -1)
    +                fld = ctypestruct.W_CField(ftype, boffset // 8, bs_flag, -1,
    +                                           fflags)
                     fields_list.append(fld)
                     fields_dict[fname] = fld
     
    @@ -489,7 +495,7 @@
                         bitshift = 8 * ftype.size - fbitsize- bitshift
     
                     fld = ctypestruct.W_CField(ftype, field_offset_bytes,
    -                                           bitshift, fbitsize)
    +                                           bitshift, fbitsize, fflags)
                     fields_list.append(fld)
                     fields_dict[fname] = fld
     
    diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
    --- a/pypy/module/_cffi_backend/test/_backend_test_c.py
    +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
    @@ -1,7 +1,7 @@
     # ____________________________________________________________
     
     import sys
    -assert __version__ == "1.8.0", ("This test_c.py file is for testing a version"
    +assert __version__ == "1.8.2", ("This test_c.py file is for testing a version"
                                     " of cffi that differs from the one that we"
                                     " get from 'import _cffi_backend'")
     if sys.version_info < (3,):
    @@ -2525,6 +2525,25 @@
         assert d[2][1].bitshift == -1
         assert d[2][1].bitsize == -1
     
    +def test_nested_anonymous_struct_2():
    +    BInt = new_primitive_type("int")
    +    BStruct = new_struct_type("struct foo")
    +    BInnerUnion = new_union_type("union bar")
    +    complete_struct_or_union(BInnerUnion, [('a1', BInt, -1),
    +                                           ('a2', BInt, -1)])
    +    complete_struct_or_union(BStruct, [('b1', BInt, -1),
    +                                       ('', BInnerUnion, -1),
    +                                       ('b2', BInt, -1)])
    +    assert sizeof(BInnerUnion) == sizeof(BInt)
    +    assert sizeof(BStruct) == sizeof(BInt) * 3
    +    fields = [(name, fld.offset, fld.flags) for (name, fld) in BStruct.fields]
    +    assert fields == [
    +        ('b1', 0 * sizeof(BInt), 0),
    +        ('a1', 1 * sizeof(BInt), 0),
    +        ('a2', 1 * sizeof(BInt), 1),
    +        ('b2', 2 * sizeof(BInt), 0),
    +    ]
    +
     def test_sizeof_union():
         # a union has the largest alignment of its members, and a total size
         # that is the largest of its items *possibly further aligned* if
    diff --git a/pypy/module/_sre/__init__.py b/pypy/module/_sre/__init__.py
    --- a/pypy/module/_sre/__init__.py
    +++ b/pypy/module/_sre/__init__.py
    @@ -1,4 +1,4 @@
    -from pypy.interpreter.mixedmodule import MixedModule 
    +from pypy.interpreter.mixedmodule import MixedModule
     
     class Module(MixedModule):
     
    @@ -7,7 +7,7 @@
     
         interpleveldefs = {
             'CODESIZE':       'space.wrap(interp_sre.CODESIZE)',
    -        'MAGIC':          'space.wrap(interp_sre.MAGIC)',
    +        'MAGIC':          'space.newint(20031017)',
             'MAXREPEAT':      'space.wrap(interp_sre.MAXREPEAT)',
             'compile':        'interp_sre.W_SRE_Pattern',
             'getlower':       'interp_sre.w_getlower',
    diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py
    --- a/pypy/module/_sre/interp_sre.py
    +++ b/pypy/module/_sre/interp_sre.py
    @@ -14,7 +14,7 @@
     # Constants and exposed functions
     
     from rpython.rlib.rsre import rsre_core
    -from rpython.rlib.rsre.rsre_char import MAGIC, CODESIZE, MAXREPEAT, getlower, set_unicode_db
    +from rpython.rlib.rsre.rsre_char import CODESIZE, MAXREPEAT, getlower, set_unicode_db
     
     
     @unwrap_spec(char_ord=int, flags=int)
    diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py
    --- a/pypy/module/array/interp_array.py
    +++ b/pypy/module/array/interp_array.py
    @@ -597,6 +597,18 @@
         def getlength(self):
             return self.array.len * self.array.itemsize
     
    +    def getformat(self):
    +        return self.array.typecode
    +
    +    def getitemsize(self):
    +        return self.array.itemsize
    +
    +    def getndim(self):
    +        return 1
    +
    +    def getstrides(self):
    +        return [self.getitemsize()]
    +
         def getitem(self, index):
             array = self.array
             data = array._charbuf_start()
    diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
    --- a/pypy/module/cpyext/api.py
    +++ b/pypy/module/cpyext/api.py
    @@ -122,7 +122,7 @@
     METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE
     METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HAVE_INPLACEOPS
     Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_TPFLAGS_HAVE_NEWBUFFER
    -Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES
    +Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES Py_MAX_NDIMS
     """.split()
     for name in constant_names:
         setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name))
    @@ -645,6 +645,9 @@
             ('format', rffi.CCHARP),
             ('shape', Py_ssize_tP),
             ('strides', Py_ssize_tP),
    +        ('_format', rffi.UCHAR),
    +        ('_shape', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)),
    +        ('_strides', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)),
             ('suboffsets', Py_ssize_tP),
             #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)),
             ('internal', rffi.VOIDP)
    @@ -977,8 +980,10 @@
             py_type_ready(space, get_capsule_type())
         INIT_FUNCTIONS.append(init_types)
         from pypy.module.posix.interp_posix import add_fork_hook
    -    reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void,
    -                                 compilation_info=eci)
    +    _reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], 
    +                                  lltype.Void, compilation_info=eci)
    +    def reinit_tls(space):
    +        _reinit_tls()
         add_fork_hook('child', reinit_tls)
     
     def init_function(func):
    diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py
    --- a/pypy/module/cpyext/buffer.py
    +++ b/pypy/module/cpyext/buffer.py
    @@ -1,8 +1,9 @@
     from pypy.interpreter.error import oefmt
     from rpython.rtyper.lltypesystem import rffi, lltype
    +from rpython.rlib.rarithmetic import widen
     from pypy.module.cpyext.api import (
    -    cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER)
    -from pypy.module.cpyext.pyobject import PyObject
    +    cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER, Py_ssize_tP)
    +from pypy.module.cpyext.pyobject import PyObject, make_ref, incref
     
     @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL)
     def PyObject_CheckBuffer(space, pyobj):
    @@ -33,13 +34,82 @@
         raise an error if the object can't support a simpler view of its memory.
     
         0 is returned on success and -1 on error."""
    -    raise oefmt(space.w_TypeError,
    -                "PyPy does not yet implement the new buffer interface")
    +    flags = widen(flags)
    +    buf = space.buffer_w(w_obj, flags)
    +    try:
    +        view.c_buf = rffi.cast(rffi.VOIDP, buf.get_raw_address())
    +    except ValueError:
    +        raise BufferError("could not create buffer from object")
    +    view.c_len = buf.getlength()
    +    view.c_obj = make_ref(space, w_obj)
    +    ndim = buf.getndim()
    +    view.c_itemsize = buf.getitemsize()
    +    rffi.setintfield(view, 'c_readonly', int(buf.readonly))
    +    rffi.setintfield(view, 'c_ndim', ndim)
    +    view.c_format = rffi.str2charp(buf.getformat())
    +    view.c_shape = lltype.malloc(Py_ssize_tP.TO, ndim, flavor='raw')
    +    view.c_strides = lltype.malloc(Py_ssize_tP.TO, ndim, flavor='raw')
    +    shape = buf.getshape()
    +    strides = buf.getstrides()
    +    for i in range(ndim):
    +        view.c_shape[i] = shape[i]
    +        view.c_strides[i] = strides[i]
    +    view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO)
    +    view.c_internal = lltype.nullptr(rffi.VOIDP.TO)
    +    return 0
    +
    +def _IsFortranContiguous(view):
    +    ndim = widen(view.c_ndim)
    +    if ndim == 0:
    +        return 1
    +    if not view.c_strides:
    +        return ndim == 1
    +    sd = view.c_itemsize
    +    if ndim == 1:
    +        return view.c_shape[0] == 1 or sd == view.c_strides[0]
    +    for i in range(view.c_ndim):
    +        dim = view.c_shape[i]
    +        if dim == 0:
    +            return 1
    +        if view.c_strides[i] != sd:
    +            return 0
    +        sd *= dim
    +    return 1
    +
    +def _IsCContiguous(view):
    +    ndim = widen(view.c_ndim)
    +    if ndim == 0:
    +        return 1
    +    if not view.c_strides:
    +        return ndim == 1
    +    sd = view.c_itemsize
    +    if ndim == 1:
    +        return view.c_shape[0] == 1 or sd == view.c_strides[0]
    +    for i in range(ndim - 1, -1, -1):
    +        dim = view.c_shape[i]
    +        if dim == 0:
    +            return 1
    +        if view.c_strides[i] != sd:
    +            return 0
    +        sd *= dim
    +    return 1
    +        
     
     @cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL)
    -def PyBuffer_IsContiguous(space, view, fortran):
    +def PyBuffer_IsContiguous(space, view, fort):
         """Return 1 if the memory defined by the view is C-style (fortran is
         'C') or Fortran-style (fortran is 'F') contiguous or either one
         (fortran is 'A').  Return 0 otherwise."""
    -    # PyPy only supports contiguous Py_buffers for now.
    -    return 1
    +    # traverse the strides, checking for consistent stride increases from
    +    # right-to-left (c) or left-to-right (fortran). Copied from cpython
    +    if not view.c_suboffsets:
    +        return 0
    +    if (fort == 'C'):
    +        return _IsCContiguous(view)
    +    elif (fort == 'F'):
    +        return _IsFortranContiguous(view)
    +    elif (fort == 'A'):
    +        return (_IsCContiguous(view) or _IsFortranContiguous(view))
    +    return 0
    +
    +    
    diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
    --- a/pypy/module/cpyext/bytesobject.py
    +++ b/pypy/module/cpyext/bytesobject.py
    @@ -29,19 +29,17 @@
     ## Solution
     ## --------
     ##
    -## PyBytesObject contains two additional members: the ob_size and a pointer to a
    -## char ob_sval; it may be NULL.
    +## PyBytesObject contains two additional members: the ob_size and an array
    +## char ob_sval which holds a \x0 terminated string.
     ##
     ## - A string allocated by pypy will be converted into a PyBytesObject with a
    -##   NULL buffer.  The first time PyString_AsString() is called, memory is
    -##   allocated (with flavor='raw') and content is copied.
    +##   buffer holding \x0.  The first time PyString_AsString() is called, the 
    +##   PyStringObject is reallocated, and the string copied into the buffer. The
    +##   ob_size reflects the length of the string.
     ##
     ## - A string allocated with PyString_FromStringAndSize(NULL, size) will
     ##   allocate a PyBytesObject structure, and a buffer with the specified
    -##   size+1, but the reference won't be stored in the global map; there is no
    -##   corresponding object in pypy.  When from_ref() or Py_INCREF() is called,
    -##   the pypy string is created, and added to the global map of tracked
    -##   objects.  The buffer is then supposed to be immutable.
    +##   size+1, as part of the object. The buffer is then supposed to be immutable.
     ##
     ##-  A buffer obtained from PyString_AS_STRING() could be mutable iff
     ##   there is no corresponding pypy object for the string
    @@ -156,9 +154,6 @@
                             "expected string or Unicode object, %T found",
                             from_ref(space, ref))
         ref_str = rffi.cast(PyBytesObject, ref)
    -    if not pyobj_has_w_obj(ref):
    -        # XXX Force the ref?
    -        bytes_realize(space, ref)
         return ref_str.c_ob_sval
     
     @cpython_api([rffi.VOIDP], rffi.CCHARP, error=0)
    diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py
    --- a/pypy/module/cpyext/import_.py
    +++ b/pypy/module/cpyext/import_.py
    @@ -123,5 +123,4 @@
             pathname = code.co_filename
         w_mod = importing.add_module(space, w_name)
         space.setattr(w_mod, space.wrap('__file__'), space.wrap(pathname))
    -    importing.exec_code_module(space, w_mod, code)
    -    return w_mod
    +    return importing.exec_code_module(space, w_mod, code, w_name)
    diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h
    --- a/pypy/module/cpyext/include/object.h
    +++ b/pypy/module/cpyext/include/object.h
    @@ -142,7 +142,8 @@
     typedef Py_ssize_t (*segcountproc)(PyObject *, Py_ssize_t *);
     typedef Py_ssize_t (*charbufferproc)(PyObject *, Py_ssize_t, char **);
     
    -/* Py3k buffer interface */
    +/* Py3k buffer interface, adapted for PyPy */
    +#define Py_MAX_NDIMS 32
     typedef struct bufferinfo {
         void *buf;
         PyObject *obj;        /* owned reference */
    @@ -156,12 +157,14 @@
         char *format;
         Py_ssize_t *shape;
         Py_ssize_t *strides;
    -    Py_ssize_t *suboffsets;
    -
    +    Py_ssize_t *suboffsets; /* alway NULL for app-level objects*/
    +    unsigned char _format;
    +    Py_ssize_t _strides[Py_MAX_NDIMS];
    +    Py_ssize_t _shape[Py_MAX_NDIMS];
         /* static store for shape and strides of
            mono-dimensional buffers. */
         /* Py_ssize_t smalltable[2]; */
    -    void *internal;
    +    void *internal; /* always NULL for app-level objects */
     } Py_buffer;
     
     
    diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h
    --- a/pypy/module/cpyext/include/patchlevel.h
    +++ b/pypy/module/cpyext/include/patchlevel.h
    @@ -29,8 +29,8 @@
     #define PY_VERSION		"2.7.10"
     
     /* PyPy version as a string */
    -#define PYPY_VERSION "5.4.0"
    -#define PYPY_VERSION_NUM  0x05040000
    +#define PYPY_VERSION "5.5.0-alpha0"
    +#define PYPY_VERSION_NUM  0x05050000
     
     /* Defined to mean a PyPy where cpyext holds more regular references
        to PyObjects, e.g. staying alive as long as the internal PyPy object
    diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py
    --- a/pypy/module/cpyext/memoryobject.py
    +++ b/pypy/module/cpyext/memoryobject.py
    @@ -1,7 +1,8 @@
     from pypy.module.cpyext.api import (cpython_api, Py_buffer, CANNOT_FAIL,
    -                                    build_type_checkers)
    -from pypy.module.cpyext.pyobject import PyObject
    -from rpython.rtyper.lltypesystem import lltype
    +                               Py_MAX_NDIMS, build_type_checkers, Py_ssize_tP)
    +from pypy.module.cpyext.pyobject import PyObject, make_ref, incref
    +from rpython.rtyper.lltypesystem import lltype, rffi
    +from pypy.objspace.std.memoryobject import W_MemoryView
     
     PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView", "w_memoryview")
     
    @@ -12,6 +13,7 @@
     @cpython_api([PyObject], PyObject)
     def PyMemoryView_GET_BASE(space, w_obj):
         # return the obj field of the Py_buffer created by PyMemoryView_GET_BUFFER
    +    # XXX needed for numpy on py3k
         raise NotImplementedError('PyMemoryView_GET_BUFFER')
     
     @cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL)
    @@ -20,21 +22,35 @@
         object.  The object must be a memoryview instance; this macro doesn't
         check its type, you must do it yourself or you will risk crashes."""
         view = lltype.malloc(Py_buffer, flavor='raw', zero=True)
    -    # TODO - fill in fields
    -    '''
    -    view.c_buf = buf
    -    view.c_len = length
    -    view.c_obj = obj
    -    Py_IncRef(space, obj)
    -    view.c_itemsize = 1
    -    rffi.setintfield(view, 'c_readonly', readonly)
    -    rffi.setintfield(view, 'c_ndim', 0)
    -    view.c_format = lltype.nullptr(rffi.CCHARP.TO)
    -    view.c_shape = lltype.nullptr(Py_ssize_tP.TO)
    -    view.c_strides = lltype.nullptr(Py_ssize_tP.TO)
    +    if not isinstance(w_obj, W_MemoryView):
    +        return view
    +    ndim = w_obj.buf.getndim()
    +    if ndim >= Py_MAX_NDIMS:
    +        # XXX warn?
    +        return view
    +    try:
    +        view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address())
    +        view.c_obj = make_ref(space, w_obj)
    +        rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly)
    +        isstr = False
    +    except ValueError:
    +        w_s = w_obj.descr_tobytes(space)
    +        view.c_obj = make_ref(space, w_s)
    +        rffi.setintfield(view, 'c_readonly', 1)
    +        isstr = True
    +    view.c_len = w_obj.getlength()
    +    view.c_itemsize = w_obj.buf.getitemsize()
    +    rffi.setintfield(view, 'c_ndim', ndim)
    +    view.c__format = rffi.cast(rffi.UCHAR, w_obj.buf.getformat())
    +    view.c_format = rffi.cast(rffi.CCHARP, view.c__format)
    +    view.c_shape = rffi.cast(Py_ssize_tP, view.c__shape)
    +    view.c_strides = rffi.cast(Py_ssize_tP, view.c__strides)
    +    shape = w_obj.buf.getshape()
    +    strides = w_obj.buf.getstrides()
    +    for i in range(ndim):
    +        view.c_shape[i] = shape[i]
    +        view.c_strides[i] = strides[i]
         view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO)
         view.c_internal = lltype.nullptr(rffi.VOIDP.TO)
    -    ''' 
         return view
     
    -
    diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
    --- a/pypy/module/cpyext/object.py
    +++ b/pypy/module/cpyext/object.py
    @@ -508,10 +508,9 @@
     @cpython_api([lltype.Ptr(Py_buffer)], lltype.Void, error=CANNOT_FAIL)
     def PyBuffer_Release(space, view):
         """
    -    Releases a Py_buffer obtained from getbuffer ParseTuple's s*.
    -
    -    This is not a complete re-implementation of the CPython API; it only
    -    provides a subset of CPython's behavior.
    +    Release the buffer view. This should be called when the buffer is 
    +    no longer being used as it may free memory from it
         """
         Py_DecRef(space, view.c_obj)
         view.c_obj = lltype.nullptr(PyObject.TO)
    +    # XXX do other fields leak memory?
    diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
    --- a/pypy/module/cpyext/slotdefs.py
    +++ b/pypy/module/cpyext/slotdefs.py
    @@ -335,9 +335,15 @@
         def getshape(self):
             return self.shape
     
    +    def getstrides(self):
    +        return self.strides
    +
         def getitemsize(self):
             return self.itemsize
     
    +    def getndim(self):
    +        return self.ndim
    +
     def wrap_getreadbuffer(space, w_self, w_args, func):
         func_target = rffi.cast(readbufferproc, func)
         with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr:
    diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c
    --- a/pypy/module/cpyext/test/buffer_test.c
    +++ b/pypy/module/cpyext/test/buffer_test.c
    @@ -107,14 +107,11 @@
     PyMyArray_getbuffer(PyObject *obj, Py_buffer *view, int flags)
     {
       PyMyArray* self = (PyMyArray*)obj;
    -  fprintf(stdout, "in PyMyArray_getbuffer\n");
       if (view == NULL) {
    -    fprintf(stdout, "view is NULL\n");
         PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer");
         return -1;
       }
       if (flags == 0) {
    -    fprintf(stdout, "flags is 0\n");
         PyErr_SetString(PyExc_ValueError, "flags == 0 in getbuffer");
         return -1;
       }
    @@ -188,7 +185,131 @@
         (initproc)PyMyArray_init,     /* tp_init */
     };
     
    +static PyObject*
    +test_buffer(PyObject* self, PyObject* args)
    +{
    +    Py_buffer* view = NULL;
    +    PyObject* obj = PyTuple_GetItem(args, 0);
    +    PyObject* memoryview = PyMemoryView_FromObject(obj);
    +    if (memoryview == NULL)
    +        return PyInt_FromLong(-1);
    +    view = PyMemoryView_GET_BUFFER(memoryview);
    +    Py_DECREF(memoryview);
    +    return PyInt_FromLong(view->len);
    +}
    +
    +/* Copied from numpy tests */
    +/*
    + * Create python string from a FLAG and or the corresponding PyBuf flag
    + * for the use in get_buffer_info.
    + */
    +#define GET_PYBUF_FLAG(FLAG)                                        \
    +    buf_flag = PyUnicode_FromString(#FLAG);                         \
    +    flag_matches = PyObject_RichCompareBool(buf_flag, tmp, Py_EQ);  \
    +    Py_DECREF(buf_flag);                                            \
    +    if (flag_matches == 1) {                                        \
    +        Py_DECREF(tmp);                                             \
    +        flags |= PyBUF_##FLAG;                                      \
    +        continue;                                                   \
    +    }                                                               \
    +    else if (flag_matches == -1) {                                  \
    +        Py_DECREF(tmp);                                             \
    +        return NULL;                                                \
    +    }
    +
    +
    +/*
    + * Get information for a buffer through PyBuf_GetBuffer with the
    + * corresponding flags or'ed. Note that the python caller has to
    + * make sure that or'ing those flags actually makes sense.
    + * More information should probably be returned for future tests.
    + */
    +static PyObject *
    +get_buffer_info(PyObject *self, PyObject *args)
    +{
    +    PyObject *buffer_obj, *pyflags;
    +    PyObject *tmp, *buf_flag;
    +    Py_buffer buffer;
    +    PyObject *shape, *strides;
    +    Py_ssize_t i, n;
    +    int flag_matches;
    +    int flags = 0;
    +
    +    if (!PyArg_ParseTuple(args, "OO", &buffer_obj, &pyflags)) {
    +        return NULL;
    +    }
    +
    +    n = PySequence_Length(pyflags);
    +    if (n < 0) {
    +        return NULL;
    +    }
    +
    +    for (i=0; i < n; i++) {
    +        tmp = PySequence_GetItem(pyflags, i);
    +        if (tmp == NULL) {
    +            return NULL;
    +        }
    +
    +        GET_PYBUF_FLAG(SIMPLE);
    +        GET_PYBUF_FLAG(WRITABLE);
    +        GET_PYBUF_FLAG(STRIDES);
    +        GET_PYBUF_FLAG(ND);
    +        GET_PYBUF_FLAG(C_CONTIGUOUS);
    +        GET_PYBUF_FLAG(F_CONTIGUOUS);
    +        GET_PYBUF_FLAG(ANY_CONTIGUOUS);
    +        GET_PYBUF_FLAG(INDIRECT);
    +        GET_PYBUF_FLAG(FORMAT);
    +        GET_PYBUF_FLAG(STRIDED);
    +        GET_PYBUF_FLAG(STRIDED_RO);
    +        GET_PYBUF_FLAG(RECORDS);
    +        GET_PYBUF_FLAG(RECORDS_RO);
    +        GET_PYBUF_FLAG(FULL);
    +        GET_PYBUF_FLAG(FULL_RO);
    +        GET_PYBUF_FLAG(CONTIG);
    +        GET_PYBUF_FLAG(CONTIG_RO);
    +
    +        Py_DECREF(tmp);
    +
    +        /* One of the flags must match */
    +        PyErr_SetString(PyExc_ValueError, "invalid flag used.");
    +        return NULL;
    +    }
    +
    +    if (PyObject_GetBuffer(buffer_obj, &buffer, flags) < 0) {
    +        return NULL;
    +    }
    +
    +    if (buffer.shape == NULL) {
    +        Py_INCREF(Py_None);
    +        shape = Py_None;
    +    }
    +    else {
    +        shape = PyTuple_New(buffer.ndim);
    +        for (i=0; i < buffer.ndim; i++) {
    +            PyTuple_SET_ITEM(shape, i, PyLong_FromSsize_t(buffer.shape[i]));
    +        }
    +    }
    +
    +    if (buffer.strides == NULL) {
    +        Py_INCREF(Py_None);
    +        strides = Py_None;
    +    }
    +    else {
    +        strides = PyTuple_New(buffer.ndim);
    +        for (i=0; i < buffer.ndim; i++) {
    +            PyTuple_SET_ITEM(strides, i, PyLong_FromSsize_t(buffer.strides[i]));
    +        }
    +    }
    +
    +    PyBuffer_Release(&buffer);
    +    return Py_BuildValue("(NN)", shape, strides);
    +}
    +
    +
    +
     static PyMethodDef buffer_functions[] = {
    +    {"test_buffer",   (PyCFunction)test_buffer, METH_VARARGS, NULL},
    +    {"get_buffer_info",   (PyCFunction)get_buffer_info, METH_VARARGS, NULL},
         {NULL,        NULL}    /* Sentinel */
     };
     
    @@ -198,7 +319,7 @@
         "buffer_test",
         "Module Doc",
         -1,
    -    buffer_functions;
    +    buffer_functions,
         NULL,
         NULL,
         NULL,
    diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
    --- a/pypy/module/cpyext/test/test_bytesobject.py
    +++ b/pypy/module/cpyext/test/test_bytesobject.py
    @@ -183,8 +183,27 @@
                      Py_INCREF(Py_None);
                      return Py_None;
                  """),
    +            ("c_only", "METH_NOARGS",
    +            """
    +                int ret;
    +                char * buf2;
    +                PyObject * obj = PyBytes_FromStringAndSize(NULL, 1024);
    +                if (!obj)
    +                    return NULL;
    +                buf2 = PyBytes_AsString(obj);
    +                if (!buf2)
    +                    return NULL;
    +                /* buf should not have been forced, issue #2395 */
    +                ret = _PyBytes_Resize(&obj, 512);
    +                if (ret < 0)
    +                    return NULL;
    +                 Py_DECREF(obj);
    +                 Py_INCREF(Py_None);
    +                 return Py_None;
    +            """),
                 ])
             module.getbytes()
    +        module.c_only()
     
         def test_py_string_as_string_Unicode(self):
             module = self.import_extension('foo', [
    diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
    --- a/pypy/module/cpyext/test/test_cpyext.py
    +++ b/pypy/module/cpyext/test/test_cpyext.py
    @@ -92,10 +92,20 @@
                 link_extra=link_extra,
                 libraries=libraries)
         from pypy.module.imp.importing import get_so_extension
    -    pydname = soname.new(purebasename=modname, ext=get_so_extension(space))
    +    ext = get_so_extension(space)
    +    pydname = soname.new(purebasename=modname, ext=ext)
         soname.rename(pydname)
         return str(pydname)
     
    +def get_so_suffix():
    +    from imp import get_suffixes, C_EXTENSION
    +    for suffix, mode, typ in get_suffixes():
    +        if typ == C_EXTENSION:
    +            return suffix
    +    else:
    +        raise RuntimeError("This interpreter does not define a filename "
    +            "suffix for C extensions!")
    +
     def compile_extension_module_applevel(space, modname, include_dirs=[],
             source_files=None, source_strings=None):
         """
    @@ -126,13 +136,9 @@
                 source_strings=source_strings,
                 compile_extra=compile_extra,
                 link_extra=link_extra)
    -    from imp import get_suffixes, C_EXTENSION
    -    pydname = soname
    -    for suffix, mode, typ in get_suffixes():
    -        if typ == C_EXTENSION:
    -            pydname = soname.new(purebasename=modname, ext=suffix)
    -            soname.rename(pydname)
    -            break
    +    ext = get_so_suffix()
    +    pydname = soname.new(purebasename=modname, ext=ext)
    +    soname.rename(pydname)
         return str(pydname)
     
     def freeze_refcnts(self):
    @@ -145,6 +151,24 @@
         #state.print_refcounts()
         self.frozen_ll2callocations = set(ll2ctypes.ALLOCATED.values())
     
    +class FakeSpace(object):
    +    """Like TinyObjSpace, but different"""
    +    def __init__(self, config):
    +        from distutils.sysconfig import get_python_inc
    +        self.config = config
    +        self.include_dir = get_python_inc()
    +
    +    def passthrough(self, arg):
    +        return arg
    +    listview = passthrough
    +    str_w = passthrough
    +
    +    def unwrap(self, args):
    +        try:
    +            return args.str_w(None)
    +        except:
    +            return args
    +
     class LeakCheckingTest(object):
         """Base class for all cpyext tests."""
         spaceconfig = dict(usemodules=['cpyext', 'thread', '_rawffi', 'array',
    @@ -433,21 +457,8 @@
             self.imported_module_names = []
     
             if self.runappdirect:
    +            fake = FakeSpace(self.space.config)
                 def interp2app(func):
    -                from distutils.sysconfig import get_python_inc
    -                class FakeSpace(object):
    -                    def passthrough(self, arg):
    -                        return arg
    -                    listview = passthrough
    -                    str_w = passthrough
    -                    def unwrap(self, args):
    -                        try:
    -                            return args.str_w(None)
    -                        except:
    -                            return args
    -                fake = FakeSpace()
    -                fake.include_dir = get_python_inc()
    -                fake.config = self.space.config
                     def run(*args, **kwargs):
                         for k in kwargs.keys():
                             if k not in func.unwrap_spec and not k.startswith('w_'):
    diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py
    --- a/pypy/module/cpyext/test/test_memoryobject.py
    +++ b/pypy/module/cpyext/test/test_memoryobject.py
    @@ -1,14 +1,9 @@
    -import pytest
     from pypy.module.cpyext.test.test_api import BaseApiTest
     from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
    -
    +from rpython.rlib.buffer import StringBuffer
     
     class TestMemoryViewObject(BaseApiTest):
         def test_fromobject(self, space, api):
    -        if space.is_true(space.lt(space.sys.get('version_info'),
    -                                  space.wrap((2, 7)))):
    -            py.test.skip("unsupported before Python 2.7")
    -
             w_hello = space.newbytes("hello")
             assert api.PyObject_CheckBuffer(w_hello)
             w_view = api.PyMemoryView_FromObject(w_hello)
    @@ -17,6 +12,12 @@
             w_bytes = space.call_method(w_view, "tobytes")
             assert space.unwrap(w_bytes) == "hello"
     
    +    def test_frombuffer(self, space, api):
    +        w_buf = space.newbuffer(StringBuffer("hello"))
    +        w_memoryview = api.PyMemoryView_FromObject(w_buf)
    +        w_view = api.PyMemoryView_GET_BUFFER(w_memoryview)
    +        ndim = w_view.c_ndim
    +        assert ndim == 1
     
     class AppTestBufferProtocol(AppTestCpythonExtensionBase):
         def test_buffer_protocol(self):
    @@ -26,7 +27,25 @@
             y = memoryview(arr)
             assert y.format == 'i'
             assert y.shape == (10,)
    +        assert len(y) == 10
             s = y[3]
             assert len(s) == struct.calcsize('i')
             assert s == struct.pack('i', 3)
    +        viewlen = module.test_buffer(arr)
    +        assert viewlen == y.itemsize * len(y)
     
    +    def test_buffer_info(self):
    +        from _numpypy import multiarray as np
    +        module = self.import_module(name='buffer_test')
    +        get_buffer_info = module.get_buffer_info
    +        # test_export_flags from numpy test_multiarray
    +        raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
    +        # test_relaxed_strides from numpy test_multiarray
    +        arr = np.zeros((1, 10))
    +        if arr.flags.f_contiguous:
    +            shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
    +            assert strides[0] == 8
    +            arr = np.ones((10, 1), order='F')
    +            shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
    +            assert strides[-1] == 8
    +
    diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py
    --- a/pypy/module/cpyext/test/test_version.py
    +++ b/pypy/module/cpyext/test/test_version.py
    @@ -32,9 +32,11 @@
             assert module.py_minor_version == sys.version_info.minor
             assert module.py_micro_version == sys.version_info.micro
     
    -    @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
    +    #@pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
         def test_pypy_versions(self):
             import sys
    +        if '__pypy__' not in sys.builtin_module_names:
    +            py.test.skip("pypy only test")
             init = """
             if (Py_IsInitialized()) {
                 PyObject *m = Py_InitModule("foo", NULL);
    diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
    --- a/pypy/module/cpyext/typeobject.py
    +++ b/pypy/module/cpyext/typeobject.py
    @@ -293,6 +293,8 @@
                         STRUCT_TYPE = PyNumberMethods
                     elif slot_names[0] == 'c_tp_as_sequence':
                         STRUCT_TYPE = PySequenceMethods
    +                elif slot_names[0] == 'c_tp_as_buffer':
    +                    STRUCT_TYPE = PyBufferProcs
                     else:
                         raise AssertionError(
                             "Structure not allocated: %s" % (slot_names[0],))
    diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
    --- a/pypy/module/imp/importing.py
    +++ b/pypy/module/imp/importing.py
    @@ -597,6 +597,11 @@
     
     @jit.dont_look_inside
     def load_module(space, w_modulename, find_info, reuse=False):
    +    """Like load_module() in CPython's import.c, this will normally
    +    make a module object, store it in sys.modules, execute code in it,
    +    and then fetch it again from sys.modules.  But this logic is not
    +    used if we're calling a PEP302 loader.
    +    """
         if find_info is None:
             return
     
    @@ -625,17 +630,15 @@
     
             try:
                 if find_info.modtype == PY_SOURCE:
    -                load_source_module(
    +                return load_source_module(
                         space, w_modulename, w_mod,
                         find_info.filename, find_info.stream.readall(),
                         find_info.stream.try_to_find_file_descriptor())
    -                return w_mod
                 elif find_info.modtype == PY_COMPILED:
                     magic = _r_long(find_info.stream)
                     timestamp = _r_long(find_info.stream)
    -                load_compiled_module(space, w_modulename, w_mod, find_info.filename,
    +                return load_compiled_module(space, w_modulename, w_mod, find_info.filename,
                                          magic, timestamp, find_info.stream.readall())
    -                return w_mod
                 elif find_info.modtype == PKG_DIRECTORY:
                     w_path = space.newlist([space.wrap(find_info.filename)])
                     space.setattr(w_mod, space.wrap('__path__'), w_path)
    @@ -644,14 +647,13 @@
                     if find_info is None:
                         return w_mod
                     try:
    -                    load_module(space, w_modulename, find_info, reuse=True)
    +                    w_mod = load_module(space, w_modulename, find_info,
    +                                        reuse=True)
                     finally:
                         try:
                             find_info.stream.close()
                         except StreamErrors:
                             pass
    -                # fetch the module again, in case of "substitution"
    -                w_mod = check_sys_modules(space, w_modulename)
                     return w_mod
                 elif find_info.modtype == C_EXTENSION and has_so_extension(space):
                     load_c_extension(space, find_info.filename, space.str_w(w_modulename))
    @@ -677,13 +679,6 @@
             try:
                 if find_info:
                     w_mod = load_module(space, w_modulename, find_info)
    -                try:
    -                    w_mod = space.getitem(space.sys.get("modules"),
    -                                          w_modulename)
    -                except OperationError as oe:
    -                    if not oe.match(space, space.w_KeyError):
    -                        raise
    -                    raise OperationError(space.w_ImportError, w_modulename)
                     if w_parent is not None:
                         space.setattr(w_parent, space.wrap(partname), w_mod)
                     return w_mod
    @@ -875,20 +870,32 @@
         pycode = ec.compiler.compile(source, pathname, 'exec', 0)
         return pycode
     
    -def exec_code_module(space, w_mod, code_w):
    +def exec_code_module(space, w_mod, code_w, w_modulename, check_afterwards=True):
    +    """
    +    Execute a code object in the module's dict.  Returns
    +    'sys.modules[modulename]', which must exist.
    +    """
         w_dict = space.getattr(w_mod, space.wrap('__dict__'))
         space.call_method(w_dict, 'setdefault',
                           space.wrap('__builtins__'),
                           space.wrap(space.builtin))
         code_w.exec_code(space, w_dict, w_dict)
     
    +    if check_afterwards:
    +        w_mod = check_sys_modules(space, w_modulename)
    +        if w_mod is None:
    +            raise oefmt(space.w_ImportError,
    +                        "Loaded module %R not found in sys.modules",
    +                        w_modulename)
    +    return w_mod
    +
     
     @jit.dont_look_inside
     def load_source_module(space, w_modulename, w_mod, pathname, source, fd,
    -                       write_pyc=True):
    +                       write_pyc=True, check_afterwards=True):
         """
    -    Load a source module from a given file and return its module
    -    object.
    +    Load a source module from a given file.  Returns the result
    +    of sys.modules[modulename], which must exist.
         """
         w = space.wrap
     
    @@ -927,9 +934,8 @@
             code_w.remove_docstrings(space)
     
         update_code_filenames(space, code_w, pathname)
    -    exec_code_module(space, w_mod, code_w)
    -
    -    return w_mod
    +    return exec_code_module(space, w_mod, code_w, w_modulename,
    +                            check_afterwards=check_afterwards)
     
     def update_code_filenames(space, code_w, pathname, oldname=None):
         assert isinstance(code_w, PyCode)
    @@ -1012,10 +1018,10 @@
     
     @jit.dont_look_inside
     def load_compiled_module(space, w_modulename, w_mod, cpathname, magic,
    -                         timestamp, source):
    +                         timestamp, source, check_afterwards=True):
         """
    -    Load a module from a compiled file, execute it, and return its
    -    module object.
    +    Load a module from a compiled file and execute it.  Returns
    +    'sys.modules[modulename]', which must exist.
         """
         log_pyverbose(space, 1, "import %s # compiled from %s\n" %
                       (space.str_w(w_modulename), cpathname))
    @@ -1032,9 +1038,8 @@
         if optimize >= 2:
             code_w.remove_docstrings(space)
     
    -    exec_code_module(space, w_mod, code_w)
    -
    -    return w_mod
    +    return exec_code_module(space, w_mod, code_w, w_modulename,
    +                            check_afterwards=check_afterwards)
     
     def open_exclusive(space, cpathname, mode):
         try:
    diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py
    --- a/pypy/module/imp/interp_imp.py
    +++ b/pypy/module/imp/interp_imp.py
    @@ -98,33 +98,35 @@
         w_mod = space.wrap(Module(space, w_modulename))
         importing._prepare_module(space, w_mod, filename, None)
     
    -    importing.load_source_module(
    +    w_mod = importing.load_source_module(
             space, w_modulename, w_mod,
             filename, stream.readall(), stream.try_to_find_file_descriptor())
         if space.is_none(w_file):
             stream.close()
         return w_mod
     
    - at unwrap_spec(filename='str0')
    -def _run_compiled_module(space, w_modulename, filename, w_file, w_module):
    + at unwrap_spec(filename='str0', check_afterwards=int)
    +def _run_compiled_module(space, w_modulename, filename, w_file, w_module,
    +                         check_afterwards=False):
         # the function 'imp._run_compiled_module' is a pypy-only extension
         stream = get_file(space, w_file, filename, 'rb')
     
         magic = importing._r_long(stream)
         timestamp = importing._r_long(stream)
     
    -    importing.load_compiled_module(
    +    w_mod = importing.load_compiled_module(
             space, w_modulename, w_module, filename, magic, timestamp,
    -        stream.readall())
    +        stream.readall(), check_afterwards=check_afterwards)
         if space.is_none(w_file):
             stream.close()
    +    return w_mod
     
     @unwrap_spec(filename='str0')
     def load_compiled(space, w_modulename, filename, w_file=None):
         w_mod = space.wrap(Module(space, w_modulename))
         importing._prepare_module(space, w_mod, filename, None)
    -    _run_compiled_module(space, w_modulename, filename, w_file, w_mod)
    -    return w_mod
    +    return _run_compiled_module(space, w_modulename, filename, w_file, w_mod,
    +                                check_afterwards=True)
     
     @unwrap_spec(filename=str)
     def load_dynamic(space, w_modulename, filename, w_file=None):
    diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
    --- a/pypy/module/imp/test/test_import.py
    +++ b/pypy/module/imp/test/test_import.py
    @@ -118,7 +118,7 @@
             filename = str(p.join("x.py"))
             stream = streamio.open_file_as_stream(filename, "r")
             try:
    -            importing.load_source_module(
    +            _load_source_module(
                     space, w_modname, w(importing.Module(space, w_modname)),
                     filename, stream.readall(),
                     stream.try_to_find_file_descriptor())
    @@ -139,6 +139,15 @@
     
         return str(root)
     
    +def _load_source_module(space, w_modname, w_mod, *args, **kwds):
    +    kwds.setdefault('check_afterwards', False)
    +    return importing.load_source_module(space, w_modname, w_mod, *args, **kwds)
    +
    +def _load_compiled_module(space, w_modname, w_mod, *args, **kwds):
    +    kwds.setdefault('check_afterwards', False)
    +    return importing.load_compiled_module(space, w_modname, w_mod,
    +                                          *args, **kwds)
    +
     
     def _setup(space):
         dn = setup_directory_structure(space)
    @@ -887,8 +896,7 @@
                 w_mod = space.wrap(Module(space, w_modulename))
                 magic = importing._r_long(stream)
                 timestamp = importing._r_long(stream)
    -            w_ret = importing.load_compiled_module(space,
    -                                                   w_modulename,
    +            w_ret = _load_compiled_module(space,   w_modulename,
                                                        w_mod,
                                                        cpathname,
                                                        magic,
    @@ -946,7 +954,7 @@
             pathname = _testfilesource()
             stream = streamio.open_file_as_stream(pathname, "r")
             try:
    -            w_ret = importing.load_source_module(
    +            w_ret = _load_source_module(
                     space, w_modulename, w_mod,
                     pathname, stream.readall(),
                     stream.try_to_find_file_descriptor())
    @@ -968,7 +976,7 @@
             pathname = _testfilesource()
             stream = streamio.open_file_as_stream(pathname, "r")
             try:
    -            w_ret = importing.load_source_module(
    +            w_ret = _load_source_module(
                     space, w_modulename, w_mod,
                     pathname, stream.readall(),
                     stream.try_to_find_file_descriptor(),
    @@ -987,7 +995,7 @@
             try:
                 space.setattr(space.sys, space.wrap('dont_write_bytecode'),
                               space.w_True)
    -            w_ret = importing.load_source_module(
    +            w_ret = _load_source_module(
                     space, w_modulename, w_mod,
                     pathname, stream.readall(),
                     stream.try_to_find_file_descriptor())
    @@ -1006,7 +1014,7 @@
             pathname = _testfilesource(source="")
             stream = streamio.open_file_as_stream(pathname, "r")
             try:
    -            w_ret = importing.load_source_module(
    +            w_ret = _load_source_module(
                     space, w_modulename, w_mod,
                     pathname, stream.readall(),
                     stream.try_to_find_file_descriptor())
    @@ -1026,7 +1034,7 @@
             pathname = _testfilesource(source="a = unknown_name")
             stream = streamio.open_file_as_stream(pathname, "r")
             try:
    -            w_ret = importing.load_source_module(
    +            w_ret = _load_source_module(
                     space, w_modulename, w_mod,
                     pathname, stream.readall(),
                     stream.try_to_find_file_descriptor())
    @@ -1114,7 +1122,7 @@
                         magic = importing._r_long(stream)
                         timestamp = importing._r_long(stream)
                         space2.raises_w(space2.w_ImportError,
    -                                    importing.load_compiled_module,
    +                                    _load_compiled_module,
                                         space2,
                                         w_modulename,
                                         w_mod,
    @@ -1326,10 +1334,7 @@
             # use an import hook that doesn't update sys.modules, then the
             # import succeeds; but at the same time, you can have the same
             # result without an import hook (see test_del_from_sys_modules)
    -        # and then the import fails.  This looks like even more mess
    -        # to replicate, so we ignore it until someone really hits this
    -        # case...
    -        skip("looks like an inconsistency in CPython")
    +        # and then the import fails.  Mess mess mess.
     
             class ImportHook(object):
                 def find_module(self, fullname, path=None):
    diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py
    --- a/pypy/module/marshal/interp_marshal.py
    +++ b/pypy/module/marshal/interp_marshal.py
    @@ -225,15 +225,6 @@
     
         def dump_w_obj(self, w_obj):
             space = self.space
    -        if space.type(w_obj).is_heaptype():
    -            try:
    -                buf = space.readbuf_w(w_obj)
    -            except OperationError as e:
    -                if not e.match(space, space.w_TypeError):
    -                    raise
    -                self.raise_exc("unmarshallable object")
    -            else:
    -                w_obj = space.newbuffer(buf)
             try:
                 self.put_w_obj(w_obj)
             except rstackovf.StackOverflow:
    diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py
    --- a/pypy/module/marshal/test/test_marshal.py
    +++ b/pypy/module/marshal/test/test_marshal.py
    @@ -186,6 +186,8 @@
                 assert str(exc.value) == 'unmarshallable object'
                 exc = raises(ValueError, marshal.dumps, subtype())
                 assert str(exc.value) == 'unmarshallable object'
    +            exc = raises(ValueError, marshal.dumps, (subtype(),))
    +            assert str(exc.value) == 'unmarshallable object'
     
         def test_valid_subtypes(self):
             import marshal
    diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py
    --- a/pypy/module/micronumpy/compile.py
    +++ b/pypy/module/micronumpy/compile.py
    @@ -460,6 +460,9 @@
         def getdictvalue(self, space, key):
             return self.items[key]
     
    
    From pypy.commits at gmail.com  Wed Sep  7 08:51:33 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 05:51:33 -0700 (PDT)
    Subject: [pypy-commit] pypy boehm-rawrefcount: test and fix
    Message-ID: <57d00d55.c70a1c0a.5ad4f.5536@mx.google.com>
    
    Author: Armin Rigo 
    Branch: boehm-rawrefcount
    Changeset: r86929:226d5e4b2e41
    Date: 2016-09-07 14:50 +0200
    http://bitbucket.org/pypy/pypy/changeset/226d5e4b2e41/
    
    Log:	test and fix
    
    diff --git a/rpython/rlib/src/boehm-rawrefcount.c b/rpython/rlib/src/boehm-rawrefcount.c
    --- a/rpython/rlib/src/boehm-rawrefcount.c
    +++ b/rpython/rlib/src/boehm-rawrefcount.c
    @@ -190,6 +190,7 @@
                     printf("next_dead: %p\n", result);
     #endif
                     assert(result->ob_refcnt == REFCNT_FROM_PYPY);
    +                result->ob_refcnt = 1;
                     p->pyobj = NULL;
                     *pp = p->next_in_bucket;
                     p->next_in_bucket = hash_free_list;
    diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py
    --- a/rpython/rlib/test/test_rawrefcount.py
    +++ b/rpython/rlib/test/test_rawrefcount.py
    @@ -254,6 +254,9 @@
                 if rawrefcount.next_dead(PyObject) != ob:
                     print "NEXT_DEAD != OB"
                     return 1
    +            if ob.c_ob_refcnt != 1:
    +                print "next_dead().ob_refcnt != 1"
    +                return 1
                 if rawrefcount.next_dead(PyObject) != lltype.nullptr(PyObjectS):
                     print "NEXT_DEAD second time != NULL"
                     return 1
    @@ -294,6 +297,9 @@
                 while True:
                     ob = rawrefcount.next_dead(PyObject)
                     if not ob: break
    +                if ob.c_ob_refcnt != 1:
    +                    print "next_dead().ob_refcnt != 1"
    +                    return 1
                     deadlist.append(ob)
                 if len(deadlist) == 0:
                     print "no dead object"
    
    From pypy.commits at gmail.com  Wed Sep  7 09:00:06 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 06:00:06 -0700 (PDT)
    Subject: [pypy-commit] pypy reverse-debugger: hg merge boehm-rawrefcount
    Message-ID: <57d00f56.0575c20a.329f0.f1de@mx.google.com>
    
    Author: Armin Rigo 
    Branch: reverse-debugger
    Changeset: r86930:22608a742f02
    Date: 2016-09-07 14:59 +0200
    http://bitbucket.org/pypy/pypy/changeset/22608a742f02/
    
    Log:	hg merge boehm-rawrefcount
    
    diff --git a/rpython/rlib/src/boehm-rawrefcount.c b/rpython/rlib/src/boehm-rawrefcount.c
    --- a/rpython/rlib/src/boehm-rawrefcount.c
    +++ b/rpython/rlib/src/boehm-rawrefcount.c
    @@ -190,6 +190,7 @@
                     printf("next_dead: %p\n", result);
     #endif
                     assert(result->ob_refcnt == REFCNT_FROM_PYPY);
    +                result->ob_refcnt = 1;
                     p->pyobj = NULL;
                     *pp = p->next_in_bucket;
                     p->next_in_bucket = hash_free_list;
    diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py
    --- a/rpython/rlib/test/test_rawrefcount.py
    +++ b/rpython/rlib/test/test_rawrefcount.py
    @@ -254,6 +254,9 @@
                 if rawrefcount.next_dead(PyObject) != ob:
                     print "NEXT_DEAD != OB"
                     return 1
    +            if ob.c_ob_refcnt != 1:
    +                print "next_dead().ob_refcnt != 1"
    +                return 1
                 if rawrefcount.next_dead(PyObject) != lltype.nullptr(PyObjectS):
                     print "NEXT_DEAD second time != NULL"
                     return 1
    @@ -294,6 +297,9 @@
                 while True:
                     ob = rawrefcount.next_dead(PyObject)
                     if not ob: break
    +                if ob.c_ob_refcnt != 1:
    +                    print "next_dead().ob_refcnt != 1"
    +                    return 1
                     deadlist.append(ob)
                 if len(deadlist) == 0:
                     print "no dead object"
    
    From pypy.commits at gmail.com  Wed Sep  7 09:51:04 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 06:51:04 -0700 (PDT)
    Subject: [pypy-commit] pypy reverse-debugger: Add test file (not working at
     all so far)
    Message-ID: <57d01b48.2146c20a.7f80b.3291@mx.google.com>
    
    Author: Armin Rigo 
    Branch: reverse-debugger
    Changeset: r86931:6b424d821c6c
    Date: 2016-09-07 15:50 +0200
    http://bitbucket.org/pypy/pypy/changeset/6b424d821c6c/
    
    Log:	Add test file (not working at all so far)
    
    diff --git a/rpython/translator/revdb/test/test_rawrefcount.py b/rpython/translator/revdb/test/test_rawrefcount.py
    new file mode 100644
    --- /dev/null
    +++ b/rpython/translator/revdb/test/test_rawrefcount.py
    @@ -0,0 +1,61 @@
    +from rpython.rlib import objectmodel, rgc, revdb
    +from rpython.rtyper.lltypesystem import lltype
    +from rpython.translator.revdb.test.test_basic import InteractiveTests
    +from rpython.translator.revdb.test.test_basic import compile, fetch_rdb, run
    +from rpython.translator.revdb.message import *
    +
    +from rpython.rlib import rawrefcount
    +
    +
    +class TestRawRefcount(InteractiveTests):
    +    expected_stop_points = 27
    +
    +    def setup_class(cls):
    +        class W_Root(object):
    +            def __init__(self, n):
    +                self.n = n
    +        PyObjectS = lltype.Struct('PyObjectS',
    +                                  ('c_ob_refcnt', lltype.Signed),
    +                                  ('c_ob_pypy_link', lltype.Signed))
    +        PyObject = lltype.Ptr(PyObjectS)
    +        w1 = W_Root(-42)
    +        ob1 = lltype.malloc(PyObjectS, flavor='raw', zero=True,
    +                            immortal=True)
    +        ob1.c_ob_refcnt = rawrefcount.REFCNT_FROM_PYPY
    +
    +        def main(argv):
    +            rawrefcount.create_link_pypy(w1, ob1)
    +            w = None
    +            ob = lltype.nullptr(PyObjectS)
    +            oblist = []
    +            for op in argv[1:]:
    +                revdb.stop_point()
    +                w = W_Root(42)
    +                ob = lltype.malloc(PyObjectS, flavor='raw', zero=True)
    +                ob.c_ob_refcnt = rawrefcount.REFCNT_FROM_PYPY
    +                rawrefcount.create_link_pypy(w, ob)
    +                oblist.append(ob)
    +            del oblist[-1]
    +            #
    +            rgc.collect()
    +            assert rawrefcount.from_obj(PyObject, w) == ob
    +            assert rawrefcount.to_obj(W_Root, ob) == w
    +            while True:
    +                ob = rawrefcount.next_dead(PyObject)
    +                if not ob:
    +                    break
    +                assert ob in oblist
    +                oblist.remove(ob)
    +            objectmodel.keepalive_until_here(w)
    +            revdb.stop_point()
    +            return 9
    +        compile(cls, main, backendopt=False)
    +        ARGS26 = 'a b c d e f g h i j k l m n o p q r s t u v w x y z'
    +        run(cls, ARGS26)
    +        rdb = fetch_rdb(cls, [cls.exename] + ARGS26.split())
    +        assert rdb.number_of_stop_points() == cls.expected_stop_points
    +
    +    def test_go(self):
    +        child = self.replay()
    +        child.send(Message(CMD_FORWARD, 50))
    +        child.expect(ANSWER_AT_END)
    
    From pypy.commits at gmail.com  Wed Sep  7 10:07:18 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Wed, 07 Sep 2016 07:07:18 -0700 (PDT)
    Subject: [pypy-commit] pypy ppc-vsx-support: merge default
    Message-ID: <57d01f16.e856c20a.a904.b99d@mx.google.com>
    
    Author: Richard Plangger 
    Branch: ppc-vsx-support
    Changeset: r86933:0c3342bbecb5
    Date: 2016-09-07 13:58 +0200
    http://bitbucket.org/pypy/pypy/changeset/0c3342bbecb5/
    
    Log:	merge default
    
    diff --git a/.hgtags b/.hgtags
    --- a/.hgtags
    +++ b/.hgtags
    @@ -30,3 +30,6 @@
     68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0
     68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0
     77392ad263504df011ccfcabf6a62e21d04086d0 release-pypy2.7-v5.4.0
    +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1
    +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1
    +0e2d9a73f5a1818d0245d75daccdbe21b2d5c3ef release-pypy2.7-v5.4.1
    diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
    --- a/lib_pypy/cffi.egg-info/PKG-INFO
    +++ b/lib_pypy/cffi.egg-info/PKG-INFO
    @@ -1,6 +1,6 @@
     Metadata-Version: 1.1
     Name: cffi
    -Version: 1.8.1
    +Version: 1.8.2
     Summary: Foreign Function Interface for Python calling C code.
     Home-page: http://cffi.readthedocs.org
     Author: Armin Rigo, Maciej Fijalkowski
    diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
    --- a/lib_pypy/cffi/__init__.py
    +++ b/lib_pypy/cffi/__init__.py
    @@ -4,8 +4,8 @@
     from .api import FFI, CDefError, FFIError
     from .ffiplatform import VerificationError, VerificationMissing
     
    -__version__ = "1.8.1"
    -__version_info__ = (1, 8, 1)
    +__version__ = "1.8.2"
    +__version_info__ = (1, 8, 2)
     
     # The verifier module file names are based on the CRC32 of a string that
     # contains the following version number.  It may be older than __version__
    diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
    --- a/lib_pypy/cffi/_embedding.h
    +++ b/lib_pypy/cffi/_embedding.h
    @@ -233,7 +233,7 @@
             f = PySys_GetObject((char *)"stderr");
             if (f != NULL && f != Py_None) {
                 PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
    -                               "\ncompiled with cffi version: 1.8.1"
    +                               "\ncompiled with cffi version: 1.8.2"
                                    "\n_cffi_backend module: ", f);
                 modules = PyImport_GetModuleDict();
                 mod = PyDict_GetItemString(modules, "_cffi_backend");
    diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
    --- a/pypy/doc/index-of-release-notes.rst
    +++ b/pypy/doc/index-of-release-notes.rst
    @@ -6,6 +6,7 @@
     
     .. toctree::
     
    +   release-pypy2.7-v5.4.1.rst
        release-pypy2.7-v5.4.0.rst
        release-pypy2.7-v5.3.1.rst
        release-pypy2.7-v5.3.0.rst
    diff --git a/pypy/doc/release-pypy2.7-v5.4.1.rst b/pypy/doc/release-pypy2.7-v5.4.1.rst
    new file mode 100644
    --- /dev/null
    +++ b/pypy/doc/release-pypy2.7-v5.4.1.rst
    @@ -0,0 +1,64 @@
    +==========
    +PyPy 5.4.1
    +==========
    +
    +We have released a bugfix for PyPy2.7-v5.4.0, released last week,
    +due to the following issues:
    +
    +  * Update list of contributors in documentation and LICENSE file,
    +    this was unfortunately left out of 5.4.0. My apologies to the new
    +    contributors
    +
    +  * Allow tests run with ``-A`` to find ``libm.so`` even if it is a script not a
    +    dynamically loadable file
    +
    +  * Bump ``sys.setrecursionlimit()`` when translating PyPy, for translating with CPython
    +
    +  * Tweak a float comparison with 0 in ``backendopt.inline`` to avoid rounding errors
    +
    +  * Fix for an issue for translating the sandbox
    +
    +  * Fix for and issue where ``unicode.decode('utf8', 'custom_replace')`` messed up
    +    the last byte of a unicode string sometimes
    +
    +  * Update built-in cffi_ to version 1.8.1
    +
    +  * Explicitly detect that we found as-yet-unsupported OpenSSL 1.1, and crash
    +    translation with a message asking for help porting it
    +
    +  * Fix a regression where a PyBytesObject was forced (converted to a RPython
    +    object) when not required, reported as issue #2395
    +
    +Thanks to those who reported the issues.
    +
    +What is PyPy?
    +=============
    +
    +PyPy is a very compliant Python interpreter, almost a drop-in replacement for
    +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
    +due to its integrated tracing JIT compiler.
    +
    +We also welcome developers of other
    +`dynamic languages`_ to see what RPython can do for them.
    +
    +This release supports:
    +
    +  * **x86** machines on most common operating systems
    +    (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
    +
    +  * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
    +
    +  * big- and little-endian variants of **PPC64** running Linux,
    +
    +  * **s390x** running Linux
    +
    +.. _cffi: https://cffi.readthedocs.io
    +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
    +.. _`dynamic languages`: http://pypyjs.org
    +
    +Please update, and continue to help us make PyPy better.
    +
    +Cheers
    +
    +The PyPy Team
    +
    diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
    --- a/pypy/doc/whatsnew-head.rst
    +++ b/pypy/doc/whatsnew-head.rst
    @@ -7,3 +7,9 @@
     
     .. branch: rpython-resync
     Backport rpython changes made directly on the py3k and py3.5 branches.
    +
    +.. branch: buffer-interface
    +Implement PyObject_GetBuffer, PyMemoryView_GET_BUFFER, and handles memoryviews
    +in numpypy
    +
    +
    diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
    --- a/pypy/interpreter/baseobjspace.py
    +++ b/pypy/interpreter/baseobjspace.py
    @@ -1428,6 +1428,9 @@
         BUF_FORMAT   = 0x0004
         BUF_ND       = 0x0008
         BUF_STRIDES  = 0x0010 | BUF_ND
    +    BUF_C_CONTIGUOUS = 0x0020 | BUF_STRIDES
    +    BUF_F_CONTIGUOUS = 0x0040 | BUF_STRIDES
    +    BUF_ANY_CONTIGUOUS = 0x0080 | BUF_STRIDES
         BUF_INDIRECT = 0x0100 | BUF_STRIDES
     
         BUF_CONTIG_RO = BUF_ND
    diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
    --- a/pypy/module/_cffi_backend/__init__.py
    +++ b/pypy/module/_cffi_backend/__init__.py
    @@ -3,7 +3,7 @@
     from rpython.rlib import rdynload, clibffi, entrypoint
     from rpython.rtyper.lltypesystem import rffi
     
    -VERSION = "1.8.1"
    +VERSION = "1.8.2"
     
     FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
     try:
    diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py
    --- a/pypy/module/_cffi_backend/ctypestruct.py
    +++ b/pypy/module/_cffi_backend/ctypestruct.py
    @@ -105,9 +105,6 @@
                     return True
             return False
     
    -    def _check_only_one_argument_for_union(self, w_ob):
    -        pass
    -
         def convert_from_object(self, cdata, w_ob):
             if not self._copy_from_same(cdata, w_ob):
                 self.convert_struct_from_object(cdata, w_ob, optvarsize=-1)
    @@ -117,19 +114,24 @@
         )
         def convert_struct_from_object(self, cdata, w_ob, optvarsize):
             self.force_lazy_struct()
    -        self._check_only_one_argument_for_union(w_ob)
     
             space = self.space
             if (space.isinstance_w(w_ob, space.w_list) or
                 space.isinstance_w(w_ob, space.w_tuple)):
                 lst_w = space.listview(w_ob)
    -            if len(lst_w) > len(self._fields_list):
    -                raise oefmt(space.w_ValueError,
    -                            "too many initializers for '%s' (got %d)",
    -                            self.name, len(lst_w))
    -            for i in range(len(lst_w)):
    -                optvarsize = self._fields_list[i].write_v(cdata, lst_w[i],
    +            j = 0
    +            for w_obj in lst_w:
    +                try:
    +                    while (self._fields_list[j].flags &
    +                               W_CField.BF_IGNORE_IN_CTOR):
    +                        j += 1
    +                except IndexError:
    +                    raise oefmt(space.w_ValueError,
    +                                "too many initializers for '%s' (got %d)",
    +                                self.name, len(lst_w))
    +                optvarsize = self._fields_list[j].write_v(cdata, w_obj,
                                                               optvarsize)
    +                j += 1
                 return optvarsize
     
             elif space.isinstance_w(w_ob, space.w_dict):
    @@ -185,14 +187,6 @@
     class W_CTypeUnion(W_CTypeStructOrUnion):
         kind = "union"
     
    -    def _check_only_one_argument_for_union(self, w_ob):
    -        space = self.space
    -        n = space.int_w(space.len(w_ob))
    -        if n > 1:
    -            raise oefmt(space.w_ValueError,
    -                        "initializer for '%s': %d items given, but only one "
    -                        "supported (use a dict if needed)", self.name, n)
    -
     
     class W_CField(W_Root):
         _immutable_ = True
    @@ -200,18 +194,21 @@
         BS_REGULAR     = -1
         BS_EMPTY_ARRAY = -2
     
    -    def __init__(self, ctype, offset, bitshift, bitsize):
    +    BF_IGNORE_IN_CTOR = 0x01
    +
    +    def __init__(self, ctype, offset, bitshift, bitsize, flags):
             self.ctype = ctype
             self.offset = offset
             self.bitshift = bitshift # >= 0: bitshift; or BS_REGULAR/BS_EMPTY_ARRAY
             self.bitsize = bitsize
    +        self.flags = flags       # BF_xxx
     
         def is_bitfield(self):
             return self.bitshift >= 0
     
    -    def make_shifted(self, offset):
    +    def make_shifted(self, offset, fflags):
             return W_CField(self.ctype, offset + self.offset,
    -                        self.bitshift, self.bitsize)
    +                        self.bitshift, self.bitsize, self.flags | fflags)
     
         def read(self, cdata):
             cdata = rffi.ptradd(cdata, self.offset)
    @@ -341,5 +338,6 @@
         offset = interp_attrproperty('offset', W_CField),
         bitshift = interp_attrproperty('bitshift', W_CField),
         bitsize = interp_attrproperty('bitsize', W_CField),
    +    flags = interp_attrproperty('flags', W_CField),
         )
     W_CField.typedef.acceptable_as_base_class = False
    diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py
    --- a/pypy/module/_cffi_backend/newtype.py
    +++ b/pypy/module/_cffi_backend/newtype.py
    @@ -345,6 +345,11 @@
             if alignment < falign and do_align:
                 alignment = falign
             #
    +        if is_union and i > 0:
    +            fflags = ctypestruct.W_CField.BF_IGNORE_IN_CTOR
    +        else:
    +            fflags = 0
    +        #
             if fbitsize < 0:
                 # not a bitfield: common case
     
    @@ -372,7 +377,7 @@
                     for name, srcfld in ftype._fields_dict.items():
                         srcfield2names[srcfld] = name
                     for srcfld in ftype._fields_list:
    -                    fld = srcfld.make_shifted(boffset // 8)
    +                    fld = srcfld.make_shifted(boffset // 8, fflags)
                         fields_list.append(fld)
                         try:
                             fields_dict[srcfield2names[srcfld]] = fld
    @@ -382,7 +387,8 @@
                     w_ctype._custom_field_pos = True
                 else:
                     # a regular field
    -                fld = ctypestruct.W_CField(ftype, boffset // 8, bs_flag, -1)
    +                fld = ctypestruct.W_CField(ftype, boffset // 8, bs_flag, -1,
    +                                           fflags)
                     fields_list.append(fld)
                     fields_dict[fname] = fld
     
    @@ -489,7 +495,7 @@
                         bitshift = 8 * ftype.size - fbitsize- bitshift
     
                     fld = ctypestruct.W_CField(ftype, field_offset_bytes,
    -                                           bitshift, fbitsize)
    +                                           bitshift, fbitsize, fflags)
                     fields_list.append(fld)
                     fields_dict[fname] = fld
     
    diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
    --- a/pypy/module/_cffi_backend/test/_backend_test_c.py
    +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
    @@ -1,7 +1,7 @@
     # ____________________________________________________________
     
     import sys
    -assert __version__ == "1.8.1", ("This test_c.py file is for testing a version"
    +assert __version__ == "1.8.2", ("This test_c.py file is for testing a version"
                                     " of cffi that differs from the one that we"
                                     " get from 'import _cffi_backend'")
     if sys.version_info < (3,):
    @@ -2525,6 +2525,25 @@
         assert d[2][1].bitshift == -1
         assert d[2][1].bitsize == -1
     
    +def test_nested_anonymous_struct_2():
    +    BInt = new_primitive_type("int")
    +    BStruct = new_struct_type("struct foo")
    +    BInnerUnion = new_union_type("union bar")
    +    complete_struct_or_union(BInnerUnion, [('a1', BInt, -1),
    +                                           ('a2', BInt, -1)])
    +    complete_struct_or_union(BStruct, [('b1', BInt, -1),
    +                                       ('', BInnerUnion, -1),
    +                                       ('b2', BInt, -1)])
    +    assert sizeof(BInnerUnion) == sizeof(BInt)
    +    assert sizeof(BStruct) == sizeof(BInt) * 3
    +    fields = [(name, fld.offset, fld.flags) for (name, fld) in BStruct.fields]
    +    assert fields == [
    +        ('b1', 0 * sizeof(BInt), 0),
    +        ('a1', 1 * sizeof(BInt), 0),
    +        ('a2', 1 * sizeof(BInt), 1),
    +        ('b2', 2 * sizeof(BInt), 0),
    +    ]
    +
     def test_sizeof_union():
         # a union has the largest alignment of its members, and a total size
         # that is the largest of its items *possibly further aligned* if
    diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py
    --- a/pypy/module/_ssl/test/test_ssl.py
    +++ b/pypy/module/_ssl/test/test_ssl.py
    @@ -450,7 +450,12 @@
                     # For compatibility
                     assert exc.value.errno == _ssl.SSL_ERROR_WANT_READ
                 finally:
    -                c.shutdown()
    +                try:
    +                    c.shutdown()
    +                except _ssl.SSLError:
    +                    # If the expected exception was raised, the SSLContext
    +                    # can't be shut down yet
    +                    pass
             finally:
                 s.close()
     
    diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py
    --- a/pypy/module/array/interp_array.py
    +++ b/pypy/module/array/interp_array.py
    @@ -597,6 +597,18 @@
         def getlength(self):
             return self.array.len * self.array.itemsize
     
    +    def getformat(self):
    +        return self.array.typecode
    +
    +    def getitemsize(self):
    +        return self.array.itemsize
    +
    +    def getndim(self):
    +        return 1
    +
    +    def getstrides(self):
    +        return [self.getitemsize()]
    +
         def getitem(self, index):
             array = self.array
             data = array._charbuf_start()
    diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
    --- a/pypy/module/cpyext/api.py
    +++ b/pypy/module/cpyext/api.py
    @@ -122,7 +122,7 @@
     METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE
     METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HAVE_INPLACEOPS
     Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_TPFLAGS_HAVE_NEWBUFFER
    -Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES
    +Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES Py_MAX_NDIMS
     """.split()
     for name in constant_names:
         setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name))
    @@ -645,6 +645,9 @@
             ('format', rffi.CCHARP),
             ('shape', Py_ssize_tP),
             ('strides', Py_ssize_tP),
    +        ('_format', rffi.UCHAR),
    +        ('_shape', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)),
    +        ('_strides', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)),
             ('suboffsets', Py_ssize_tP),
             #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)),
             ('internal', rffi.VOIDP)
    diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py
    --- a/pypy/module/cpyext/buffer.py
    +++ b/pypy/module/cpyext/buffer.py
    @@ -1,8 +1,9 @@
     from pypy.interpreter.error import oefmt
     from rpython.rtyper.lltypesystem import rffi, lltype
    +from rpython.rlib.rarithmetic import widen
     from pypy.module.cpyext.api import (
    -    cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER)
    -from pypy.module.cpyext.pyobject import PyObject
    +    cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER, Py_ssize_tP)
    +from pypy.module.cpyext.pyobject import PyObject, make_ref, incref
     
     @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL)
     def PyObject_CheckBuffer(space, pyobj):
    @@ -33,13 +34,82 @@
         raise an error if the object can't support a simpler view of its memory.
     
         0 is returned on success and -1 on error."""
    -    raise oefmt(space.w_TypeError,
    -                "PyPy does not yet implement the new buffer interface")
    +    flags = widen(flags)
    +    buf = space.buffer_w(w_obj, flags)
    +    try:
    +        view.c_buf = rffi.cast(rffi.VOIDP, buf.get_raw_address())
    +    except ValueError:
    +        raise BufferError("could not create buffer from object")
    +    view.c_len = buf.getlength()
    +    view.c_obj = make_ref(space, w_obj)
    +    ndim = buf.getndim()
    +    view.c_itemsize = buf.getitemsize()
    +    rffi.setintfield(view, 'c_readonly', int(buf.readonly))
    +    rffi.setintfield(view, 'c_ndim', ndim)
    +    view.c_format = rffi.str2charp(buf.getformat())
    +    view.c_shape = lltype.malloc(Py_ssize_tP.TO, ndim, flavor='raw')
    +    view.c_strides = lltype.malloc(Py_ssize_tP.TO, ndim, flavor='raw')
    +    shape = buf.getshape()
    +    strides = buf.getstrides()
    +    for i in range(ndim):
    +        view.c_shape[i] = shape[i]
    +        view.c_strides[i] = strides[i]
    +    view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO)
    +    view.c_internal = lltype.nullptr(rffi.VOIDP.TO)
    +    return 0
    +
    +def _IsFortranContiguous(view):
    +    ndim = widen(view.c_ndim)
    +    if ndim == 0:
    +        return 1
    +    if not view.c_strides:
    +        return ndim == 1
    +    sd = view.c_itemsize
    +    if ndim == 1:
    +        return view.c_shape[0] == 1 or sd == view.c_strides[0]
    +    for i in range(view.c_ndim):
    +        dim = view.c_shape[i]
    +        if dim == 0:
    +            return 1
    +        if view.c_strides[i] != sd:
    +            return 0
    +        sd *= dim
    +    return 1
    +
    +def _IsCContiguous(view):
    +    ndim = widen(view.c_ndim)
    +    if ndim == 0:
    +        return 1
    +    if not view.c_strides:
    +        return ndim == 1
    +    sd = view.c_itemsize
    +    if ndim == 1:
    +        return view.c_shape[0] == 1 or sd == view.c_strides[0]
    +    for i in range(ndim - 1, -1, -1):
    +        dim = view.c_shape[i]
    +        if dim == 0:
    +            return 1
    +        if view.c_strides[i] != sd:
    +            return 0
    +        sd *= dim
    +    return 1
    +        
     
     @cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL)
    -def PyBuffer_IsContiguous(space, view, fortran):
    +def PyBuffer_IsContiguous(space, view, fort):
         """Return 1 if the memory defined by the view is C-style (fortran is
         'C') or Fortran-style (fortran is 'F') contiguous or either one
         (fortran is 'A').  Return 0 otherwise."""
    -    # PyPy only supports contiguous Py_buffers for now.
    -    return 1
    +    # traverse the strides, checking for consistent stride increases from
    +    # right-to-left (c) or left-to-right (fortran). Copied from cpython
    +    if not view.c_suboffsets:
    +        return 0
    +    if (fort == 'C'):
    +        return _IsCContiguous(view)
    +    elif (fort == 'F'):
    +        return _IsFortranContiguous(view)
    +    elif (fort == 'A'):
    +        return (_IsCContiguous(view) or _IsFortranContiguous(view))
    +    return 0
    +
    +    
    diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
    --- a/pypy/module/cpyext/bytesobject.py
    +++ b/pypy/module/cpyext/bytesobject.py
    @@ -14,45 +14,33 @@
     ## Implementation of PyBytesObject
     ## ================================
     ##
    -## The problem
    -## -----------
    +## PyBytesObject has its own ob_sval buffer, so we have two copies of a string;
    +## one in the PyBytesObject returned from various C-API functions and another
    +## in the corresponding RPython object.
     ##
    -## PyString_AsString() must return a (non-movable) pointer to the underlying
    -## ob_sval, whereas pypy strings are movable.  C code may temporarily store
    -## this address and use it, as long as it owns a reference to the PyObject.
    -## There is no "release" function to specify that the pointer is not needed
    -## any more.
    +## The following calls can create a PyBytesObject without a correspoinding
    +## RPython object:
     ##
    -## Also, the pointer may be used to fill the initial value of string. This is
    -## valid only when the string was just allocated, and is not used elsewhere.
    +## PyBytes_FromStringAndSize(NULL, n) / PyString_FromStringAndSize(NULL, n)
     ##
    -## Solution
    -## --------
    +## In the PyBytesObject returned, the ob_sval buffer may be modified as
    +## long as the freshly allocated PyBytesObject is not "forced" via a call
    +## to any of the more sophisticated C-API functions. 
     ##
    -## PyBytesObject contains two additional members: the ob_size and a pointer to a
    -## char ob_sval; it may be NULL.
    -##
    -## - A string allocated by pypy will be converted into a PyBytesObject with a
    -##   NULL buffer.  The first time PyString_AsString() is called, memory is
    -##   allocated (with flavor='raw') and content is copied.
    -##
    -## - A string allocated with PyString_FromStringAndSize(NULL, size) will
    -##   allocate a PyBytesObject structure, and a buffer with the specified
    -##   size+1, but the reference won't be stored in the global map; there is no
    -##   corresponding object in pypy.  When from_ref() or Py_INCREF() is called,
    -##   the pypy string is created, and added to the global map of tracked
    -##   objects.  The buffer is then supposed to be immutable.
    -##
    -##-  A buffer obtained from PyString_AS_STRING() could be mutable iff
    -##   there is no corresponding pypy object for the string
    -##
    -## - _PyString_Resize() works only on not-yet-pypy'd strings, and returns a
    -##   similar object.
    -##
    -## - PyString_Size() doesn't need to force the object.
    +## Care has been taken in implementing the functions below, so that
    +## if they are called with a non-forced PyBytesObject, they will not 
    +## unintentionally force the creation of a RPython object. As long as only these
    +## are used, the ob_sval buffer is still modifiable:
    +## 
    +## PyBytes_AsString / PyString_AsString 
    +## PyBytes_AS_STRING / PyString_AS_STRING
    +## PyBytes_AsStringAndSize / PyString_AsStringAndSize
    +## PyBytes_Size / PyString_Size
    +## PyBytes_Resize / PyString_Resize
    +## _PyBytes_Resize / _PyString_Resize (raises if called with a forced object)
     ##
     ## - There could be an (expensive!) check in from_ref() that the buffer still
    -##   corresponds to the pypy gc-managed string.
    +##   corresponds to the pypy gc-managed string, 
     ##
     
     PyBytesObjectStruct = lltype.ForwardReference()
    @@ -156,9 +144,6 @@
                             "expected string or Unicode object, %T found",
                             from_ref(space, ref))
         ref_str = rffi.cast(PyBytesObject, ref)
    -    if not pyobj_has_w_obj(ref):
    -        # XXX Force the ref?
    -        bytes_realize(space, ref)
         return ref_str.c_ob_sval
     
     @cpython_api([rffi.VOIDP], rffi.CCHARP, error=0)
    @@ -182,9 +167,6 @@
                 raise oefmt(space.w_TypeError,
                             "expected string or Unicode object, %T found",
                             from_ref(space, ref))
    -    if not pyobj_has_w_obj(ref):
    -        # force the ref
    -        bytes_realize(space, ref)
         ref_str = rffi.cast(PyBytesObject, ref)
         data[0] = ref_str.c_ob_sval
         if length:
    diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h
    --- a/pypy/module/cpyext/include/object.h
    +++ b/pypy/module/cpyext/include/object.h
    @@ -142,7 +142,8 @@
     typedef Py_ssize_t (*segcountproc)(PyObject *, Py_ssize_t *);
     typedef Py_ssize_t (*charbufferproc)(PyObject *, Py_ssize_t, char **);
     
    -/* Py3k buffer interface */
    +/* Py3k buffer interface, adapted for PyPy */
    +#define Py_MAX_NDIMS 32
     typedef struct bufferinfo {
         void *buf;
         PyObject *obj;        /* owned reference */
    @@ -156,12 +157,14 @@
         char *format;
         Py_ssize_t *shape;
         Py_ssize_t *strides;
    -    Py_ssize_t *suboffsets;
    -
    +    Py_ssize_t *suboffsets; /* alway NULL for app-level objects*/
    +    unsigned char _format;
    +    Py_ssize_t _strides[Py_MAX_NDIMS];
    +    Py_ssize_t _shape[Py_MAX_NDIMS];
         /* static store for shape and strides of
            mono-dimensional buffers. */
         /* Py_ssize_t smalltable[2]; */
    -    void *internal;
    +    void *internal; /* always NULL for app-level objects */
     } Py_buffer;
     
     
    diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py
    --- a/pypy/module/cpyext/memoryobject.py
    +++ b/pypy/module/cpyext/memoryobject.py
    @@ -1,7 +1,8 @@
     from pypy.module.cpyext.api import (cpython_api, Py_buffer, CANNOT_FAIL,
    -                                    build_type_checkers)
    -from pypy.module.cpyext.pyobject import PyObject
    -from rpython.rtyper.lltypesystem import lltype
    +                               Py_MAX_NDIMS, build_type_checkers, Py_ssize_tP)
    +from pypy.module.cpyext.pyobject import PyObject, make_ref, incref
    +from rpython.rtyper.lltypesystem import lltype, rffi
    +from pypy.objspace.std.memoryobject import W_MemoryView
     
     PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView", "w_memoryview")
     
    @@ -12,6 +13,7 @@
     @cpython_api([PyObject], PyObject)
     def PyMemoryView_GET_BASE(space, w_obj):
         # return the obj field of the Py_buffer created by PyMemoryView_GET_BUFFER
    +    # XXX needed for numpy on py3k
         raise NotImplementedError('PyMemoryView_GET_BUFFER')
     
     @cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL)
    @@ -20,21 +22,35 @@
         object.  The object must be a memoryview instance; this macro doesn't
         check its type, you must do it yourself or you will risk crashes."""
         view = lltype.malloc(Py_buffer, flavor='raw', zero=True)
    -    # TODO - fill in fields
    -    '''
    -    view.c_buf = buf
    -    view.c_len = length
    -    view.c_obj = obj
    -    Py_IncRef(space, obj)
    -    view.c_itemsize = 1
    -    rffi.setintfield(view, 'c_readonly', readonly)
    -    rffi.setintfield(view, 'c_ndim', 0)
    -    view.c_format = lltype.nullptr(rffi.CCHARP.TO)
    -    view.c_shape = lltype.nullptr(Py_ssize_tP.TO)
    -    view.c_strides = lltype.nullptr(Py_ssize_tP.TO)
    +    if not isinstance(w_obj, W_MemoryView):
    +        return view
    +    ndim = w_obj.buf.getndim()
    +    if ndim >= Py_MAX_NDIMS:
    +        # XXX warn?
    +        return view
    +    try:
    +        view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address())
    +        view.c_obj = make_ref(space, w_obj)
    +        rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly)
    +        isstr = False
    +    except ValueError:
    +        w_s = w_obj.descr_tobytes(space)
    +        view.c_obj = make_ref(space, w_s)
    +        rffi.setintfield(view, 'c_readonly', 1)
    +        isstr = True
    +    view.c_len = w_obj.getlength()
    +    view.c_itemsize = w_obj.buf.getitemsize()
    +    rffi.setintfield(view, 'c_ndim', ndim)
    +    view.c__format = rffi.cast(rffi.UCHAR, w_obj.buf.getformat())
    +    view.c_format = rffi.cast(rffi.CCHARP, view.c__format)
    +    view.c_shape = rffi.cast(Py_ssize_tP, view.c__shape)
    +    view.c_strides = rffi.cast(Py_ssize_tP, view.c__strides)
    +    shape = w_obj.buf.getshape()
    +    strides = w_obj.buf.getstrides()
    +    for i in range(ndim):
    +        view.c_shape[i] = shape[i]
    +        view.c_strides[i] = strides[i]
         view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO)
         view.c_internal = lltype.nullptr(rffi.VOIDP.TO)
    -    ''' 
         return view
     
    -
    diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
    --- a/pypy/module/cpyext/object.py
    +++ b/pypy/module/cpyext/object.py
    @@ -508,10 +508,9 @@
     @cpython_api([lltype.Ptr(Py_buffer)], lltype.Void, error=CANNOT_FAIL)
     def PyBuffer_Release(space, view):
         """
    -    Releases a Py_buffer obtained from getbuffer ParseTuple's s*.
    -
    -    This is not a complete re-implementation of the CPython API; it only
    -    provides a subset of CPython's behavior.
    +    Release the buffer view. This should be called when the buffer is 
    +    no longer being used as it may free memory from it
         """
         Py_DecRef(space, view.c_obj)
         view.c_obj = lltype.nullptr(PyObject.TO)
    +    # XXX do other fields leak memory?
    diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
    --- a/pypy/module/cpyext/slotdefs.py
    +++ b/pypy/module/cpyext/slotdefs.py
    @@ -335,9 +335,15 @@
         def getshape(self):
             return self.shape
     
    +    def getstrides(self):
    +        return self.strides
    +
         def getitemsize(self):
             return self.itemsize
     
    +    def getndim(self):
    +        return self.ndim
    +
     def wrap_getreadbuffer(space, w_self, w_args, func):
         func_target = rffi.cast(readbufferproc, func)
         with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr:
    diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c
    --- a/pypy/module/cpyext/test/buffer_test.c
    +++ b/pypy/module/cpyext/test/buffer_test.c
    @@ -107,14 +107,11 @@
     PyMyArray_getbuffer(PyObject *obj, Py_buffer *view, int flags)
     {
       PyMyArray* self = (PyMyArray*)obj;
    -  fprintf(stdout, "in PyMyArray_getbuffer\n");
       if (view == NULL) {
    -    fprintf(stdout, "view is NULL\n");
         PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer");
         return -1;
       }
       if (flags == 0) {
    -    fprintf(stdout, "flags is 0\n");
         PyErr_SetString(PyExc_ValueError, "flags == 0 in getbuffer");
         return -1;
       }
    @@ -188,7 +185,131 @@
         (initproc)PyMyArray_init,     /* tp_init */
     };
     
    +static PyObject*
    +test_buffer(PyObject* self, PyObject* args)
    +{
    +    Py_buffer* view = NULL;
    +    PyObject* obj = PyTuple_GetItem(args, 0);
    +    PyObject* memoryview = PyMemoryView_FromObject(obj);
    +    if (memoryview == NULL)
    +        return PyInt_FromLong(-1);
    +    view = PyMemoryView_GET_BUFFER(memoryview);
    +    Py_DECREF(memoryview);
    +    return PyInt_FromLong(view->len);
    +}
    +
    +/* Copied from numpy tests */
    +/*
    + * Create python string from a FLAG and or the corresponding PyBuf flag
    + * for the use in get_buffer_info.
    + */
    +#define GET_PYBUF_FLAG(FLAG)                                        \
    +    buf_flag = PyUnicode_FromString(#FLAG);                         \
    +    flag_matches = PyObject_RichCompareBool(buf_flag, tmp, Py_EQ);  \
    +    Py_DECREF(buf_flag);                                            \
    +    if (flag_matches == 1) {                                        \
    +        Py_DECREF(tmp);                                             \
    +        flags |= PyBUF_##FLAG;                                      \
    +        continue;                                                   \
    +    }                                                               \
    +    else if (flag_matches == -1) {                                  \
    +        Py_DECREF(tmp);                                             \
    +        return NULL;                                                \
    +    }
    +
    +
    +/*
    + * Get information for a buffer through PyBuf_GetBuffer with the
    + * corresponding flags or'ed. Note that the python caller has to
    + * make sure that or'ing those flags actually makes sense.
    + * More information should probably be returned for future tests.
    + */
    +static PyObject *
    +get_buffer_info(PyObject *self, PyObject *args)
    +{
    +    PyObject *buffer_obj, *pyflags;
    +    PyObject *tmp, *buf_flag;
    +    Py_buffer buffer;
    +    PyObject *shape, *strides;
    +    Py_ssize_t i, n;
    +    int flag_matches;
    +    int flags = 0;
    +
    +    if (!PyArg_ParseTuple(args, "OO", &buffer_obj, &pyflags)) {
    +        return NULL;
    +    }
    +
    +    n = PySequence_Length(pyflags);
    +    if (n < 0) {
    +        return NULL;
    +    }
    +
    +    for (i=0; i < n; i++) {
    +        tmp = PySequence_GetItem(pyflags, i);
    +        if (tmp == NULL) {
    +            return NULL;
    +        }
    +
    +        GET_PYBUF_FLAG(SIMPLE);
    +        GET_PYBUF_FLAG(WRITABLE);
    +        GET_PYBUF_FLAG(STRIDES);
    +        GET_PYBUF_FLAG(ND);
    +        GET_PYBUF_FLAG(C_CONTIGUOUS);
    +        GET_PYBUF_FLAG(F_CONTIGUOUS);
    +        GET_PYBUF_FLAG(ANY_CONTIGUOUS);
    +        GET_PYBUF_FLAG(INDIRECT);
    +        GET_PYBUF_FLAG(FORMAT);
    +        GET_PYBUF_FLAG(STRIDED);
    +        GET_PYBUF_FLAG(STRIDED_RO);
    +        GET_PYBUF_FLAG(RECORDS);
    +        GET_PYBUF_FLAG(RECORDS_RO);
    +        GET_PYBUF_FLAG(FULL);
    +        GET_PYBUF_FLAG(FULL_RO);
    +        GET_PYBUF_FLAG(CONTIG);
    +        GET_PYBUF_FLAG(CONTIG_RO);
    +
    +        Py_DECREF(tmp);
    +
    +        /* One of the flags must match */
    +        PyErr_SetString(PyExc_ValueError, "invalid flag used.");
    +        return NULL;
    +    }
    +
    +    if (PyObject_GetBuffer(buffer_obj, &buffer, flags) < 0) {
    +        return NULL;
    +    }
    +
    +    if (buffer.shape == NULL) {
    +        Py_INCREF(Py_None);
    +        shape = Py_None;
    +    }
    +    else {
    +        shape = PyTuple_New(buffer.ndim);
    +        for (i=0; i < buffer.ndim; i++) {
    +            PyTuple_SET_ITEM(shape, i, PyLong_FromSsize_t(buffer.shape[i]));
    +        }
    +    }
    +
    +    if (buffer.strides == NULL) {
    +        Py_INCREF(Py_None);
    +        strides = Py_None;
    +    }
    +    else {
    +        strides = PyTuple_New(buffer.ndim);
    +        for (i=0; i < buffer.ndim; i++) {
    +            PyTuple_SET_ITEM(strides, i, PyLong_FromSsize_t(buffer.strides[i]));
    +        }
    +    }
    +
    +    PyBuffer_Release(&buffer);
    +    return Py_BuildValue("(NN)", shape, strides);
    +}
    +
    +
    +
     static PyMethodDef buffer_functions[] = {
    +    {"test_buffer",   (PyCFunction)test_buffer, METH_VARARGS, NULL},
    +    {"get_buffer_info",   (PyCFunction)get_buffer_info, METH_VARARGS, NULL},
         {NULL,        NULL}    /* Sentinel */
     };
     
    diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
    --- a/pypy/module/cpyext/test/test_bytesobject.py
    +++ b/pypy/module/cpyext/test/test_bytesobject.py
    @@ -183,8 +183,27 @@
                      Py_INCREF(Py_None);
                      return Py_None;
                  """),
    +            ("c_only", "METH_NOARGS",
    +            """
    +                int ret;
    +                char * buf2;
    +                PyObject * obj = PyBytes_FromStringAndSize(NULL, 1024);
    +                if (!obj)
    +                    return NULL;
    +                buf2 = PyBytes_AsString(obj);
    +                if (!buf2)
    +                    return NULL;
    +                /* buf should not have been forced, issue #2395 */
    +                ret = _PyBytes_Resize(&obj, 512);
    +                if (ret < 0)
    +                    return NULL;
    +                 Py_DECREF(obj);
    +                 Py_INCREF(Py_None);
    +                 return Py_None;
    +            """),
                 ])
             module.getbytes()
    +        module.c_only()
     
         def test_py_string_as_string_Unicode(self):
             module = self.import_extension('foo', [
    diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py
    --- a/pypy/module/cpyext/test/test_memoryobject.py
    +++ b/pypy/module/cpyext/test/test_memoryobject.py
    @@ -1,6 +1,6 @@
     from pypy.module.cpyext.test.test_api import BaseApiTest
     from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
    -
    +from rpython.rlib.buffer import StringBuffer
     
     class TestMemoryViewObject(BaseApiTest):
         def test_fromobject(self, space, api):
    @@ -12,6 +12,12 @@
             w_bytes = space.call_method(w_view, "tobytes")
             assert space.unwrap(w_bytes) == "hello"
     
    +    def test_frombuffer(self, space, api):
    +        w_buf = space.newbuffer(StringBuffer("hello"))
    +        w_memoryview = api.PyMemoryView_FromObject(w_buf)
    +        w_view = api.PyMemoryView_GET_BUFFER(w_memoryview)
    +        ndim = w_view.c_ndim
    +        assert ndim == 1
     
     class AppTestBufferProtocol(AppTestCpythonExtensionBase):
         def test_buffer_protocol(self):
    @@ -21,6 +27,25 @@
             y = memoryview(arr)
             assert y.format == 'i'
             assert y.shape == (10,)
    +        assert len(y) == 10
             s = y[3]
             assert len(s) == struct.calcsize('i')
             assert s == struct.pack('i', 3)
    +        viewlen = module.test_buffer(arr)
    +        assert viewlen == y.itemsize * len(y)
    +
    +    def test_buffer_info(self):
    +        from _numpypy import multiarray as np
    +        module = self.import_module(name='buffer_test')
    +        get_buffer_info = module.get_buffer_info
    +        # test_export_flags from numpy test_multiarray
    +        raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
    +        # test_relaxed_strides from numpy test_multiarray
    +        arr = np.zeros((1, 10))
    +        if arr.flags.f_contiguous:
    +            shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
    +            assert strides[0] == 8
    +            arr = np.ones((10, 1), order='F')
    +            shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
    +            assert strides[-1] == 8
    +
    diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
    --- a/pypy/module/cpyext/typeobject.py
    +++ b/pypy/module/cpyext/typeobject.py
    @@ -293,6 +293,8 @@
                         STRUCT_TYPE = PyNumberMethods
                     elif slot_names[0] == 'c_tp_as_sequence':
                         STRUCT_TYPE = PySequenceMethods
    +                elif slot_names[0] == 'c_tp_as_buffer':
    +                    STRUCT_TYPE = PyBufferProcs
                     else:
                         raise AssertionError(
                             "Structure not allocated: %s" % (slot_names[0],))
    diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
    --- a/pypy/module/imp/importing.py
    +++ b/pypy/module/imp/importing.py
    @@ -259,10 +259,8 @@
             raise oefmt(space.w_ValueError, "Empty module name")
         w = space.wrap
     
    -    if w_fromlist is not None and space.is_true(w_fromlist):
    -        fromlist_w = space.fixedview(w_fromlist)
    -    else:
    -        fromlist_w = None
    +    if w_fromlist is not None and not space.is_true(w_fromlist):
    +        w_fromlist = None
     
         rel_modulename = None
         if (level != 0 and w_globals is not None and
    @@ -284,19 +282,19 @@
                         w_mod = None
                     else:
                         w_mod = absolute_import(space, rel_modulename, rel_level,
    -                                            fromlist_w, tentative=True)
    +                                            w_fromlist, tentative=True)
                 else:
                     w_mod = absolute_import(space, rel_modulename, rel_level,
    -                                        fromlist_w, tentative=False)
    +                                        w_fromlist, tentative=False)
                 if w_mod is not None:
                     return w_mod
     
    -    w_mod = absolute_import(space, modulename, 0, fromlist_w, tentative=0)
    +    w_mod = absolute_import(space, modulename, 0, w_fromlist, tentative=0)
         if rel_modulename is not None:
             space.setitem(space.sys.get('modules'), w(rel_modulename), space.w_None)
         return w_mod
     
    -def absolute_import(space, modulename, baselevel, fromlist_w, tentative):
    +def absolute_import(space, modulename, baselevel, w_fromlist, tentative):
         # Short path: check in sys.modules, but only if there is no conflict
         # on the import lock.  In the situation of 'import' statements
         # inside tight loops, this should be true, and absolute_import_try()
    @@ -304,25 +302,25 @@
         # if the import lock is currently held by another thread, then we
         # have to wait, and so shouldn't use the fast path.
         if not getimportlock(space).lock_held_by_someone_else():
    -        w_mod = absolute_import_try(space, modulename, baselevel, fromlist_w)
    +        w_mod = absolute_import_try(space, modulename, baselevel, w_fromlist)
             if w_mod is not None and not space.is_w(w_mod, space.w_None):
                 return w_mod
         return absolute_import_with_lock(space, modulename, baselevel,
    -                                     fromlist_w, tentative)
    +                                     w_fromlist, tentative)
     
     @jit.dont_look_inside
     def absolute_import_with_lock(space, modulename, baselevel,
    -                              fromlist_w, tentative):
    +                              w_fromlist, tentative):
         lock = getimportlock(space)
         lock.acquire_lock()
         try:
             return _absolute_import(space, modulename, baselevel,
    -                                fromlist_w, tentative)
    +                                w_fromlist, tentative)
         finally:
             lock.release_lock(silent_after_fork=True)
     
     @jit.unroll_safe
    -def absolute_import_try(space, modulename, baselevel, fromlist_w):
    +def absolute_import_try(space, modulename, baselevel, w_fromlist):
         """ Only look up sys.modules, not actually try to load anything
         """
         w_path = None
    @@ -330,7 +328,7 @@
         if '.' not in modulename:
             w_mod = check_sys_modules_w(space, modulename)
             first = w_mod
    -        if fromlist_w is not None and w_mod is not None:
    +        if w_fromlist is not None and w_mod is not None:
                 w_path = try_getattr(space, w_mod, space.wrap('__path__'))
         else:
             level = 0
    @@ -345,28 +343,36 @@
                     return None
                 if level == baselevel:
                     first = w_mod
    -            if fromlist_w is not None:
    +            if w_fromlist is not None:
                     w_path = try_getattr(space, w_mod, space.wrap('__path__'))
                 level += 1
    -    if fromlist_w is not None:
    +    if w_fromlist is not None:
    +        # bit artificial code but important to not just unwrap w_fromlist
    +        # to get a better trace. if it is unwrapped, the immutability of the
    +        # tuple is lost
             if w_path is not None:
    -            if len(fromlist_w) == 1 and space.eq_w(fromlist_w[0],
    -                                                   space.wrap('*')):
    +            length = space.len_w(w_fromlist)
    +            if length == 1 and space.eq_w(
    +                    space.getitem(w_fromlist, space.wrap(0)),
    +                    space.wrap('*')):
                     w_all = try_getattr(space, w_mod, space.wrap('__all__'))
                     if w_all is not None:
    -                    fromlist_w = space.fixedview(w_all)
    +                    w_fromlist = w_all
                     else:
    -                    fromlist_w = []
    +                    w_fromlist = None
                         # "from x import *" with x already imported and no x.__all__
                         # always succeeds without doing more imports.  It will
                         # just copy everything from x.__dict__ as it is now.
    -            for w_name in fromlist_w:
    -                if try_getattr(space, w_mod, w_name) is None:
    -                    return None
    +
    +            if w_fromlist is not None:
    +                for i in range(length):
    +                    w_name = space.getitem(w_fromlist, space.wrap(i))
    +                    if try_getattr(space, w_mod, w_name) is None:
    +                        return None
             return w_mod
         return first
     
    -def _absolute_import(space, modulename, baselevel, fromlist_w, tentative):
    +def _absolute_import(space, modulename, baselevel, w_fromlist, tentative):
         w = space.wrap
     
         if '/' in modulename or '\\' in modulename:
    @@ -394,18 +400,23 @@
             w_path = try_getattr(space, w_mod, w('__path__'))
             level += 1
     
    -    if fromlist_w is not None:
    +    if w_fromlist is not None:
             if w_path is not None:
    -            if len(fromlist_w) == 1 and space.eq_w(fromlist_w[0],w('*')):
    +            length = space.len_w(w_fromlist)
    +            if length == 1 and space.eq_w(
    +                    space.getitem(w_fromlist, space.wrap(0)),
    +                    space.wrap('*')):
                     w_all = try_getattr(space, w_mod, w('__all__'))
                     if w_all is not None:
    -                    fromlist_w = space.fixedview(w_all)
    +                    w_fromlist = w_all
                     else:
    -                    fromlist_w = []
    -            for w_name in fromlist_w:
    -                if try_getattr(space, w_mod, w_name) is None:
    -                    load_part(space, w_path, prefix, space.str0_w(w_name),
    -                              w_mod, tentative=1)
    +                    w_fromlist = None
    +            if w_fromlist is not None:
    +                for i in range(length):
    +                    w_name = space.getitem(w_fromlist, space.wrap(i))
    +                    if try_getattr(space, w_mod, w_name) is None:
    +                        load_part(space, w_path, prefix, space.str0_w(w_name),
    +                                  w_mod, tentative=1)
             return w_mod
         else:
             return first
    diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py
    --- a/pypy/module/micronumpy/compile.py
    +++ b/pypy/module/micronumpy/compile.py
    @@ -460,6 +460,9 @@
         def getdictvalue(self, space, key):
             return self.items[key]
     
    +    def descr_memoryview(self, space, buf):
    +        raise oefmt(space.w_TypeError, "error")
    +
     class IterDictObject(W_Root):
         def __init__(self, space, w_dict):
             self.space = space
    diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py
    --- a/pypy/module/micronumpy/concrete.py
    +++ b/pypy/module/micronumpy/concrete.py
    @@ -377,7 +377,25 @@
         def __exit__(self, typ, value, traceback):
             keepalive_until_here(self)
     
    -    def get_buffer(self, space, readonly):
    +    def get_buffer(self, space, flags):
    +        errtype = space.w_ValueError # should be BufferError, numpy does this instead
    +        if ((flags & space.BUF_C_CONTIGUOUS) == space.BUF_C_CONTIGUOUS and 
    +                not self.flags & NPY.ARRAY_C_CONTIGUOUS):
    +           raise oefmt(errtype, "ndarray is not C-contiguous")
    +        if ((flags & space.BUF_F_CONTIGUOUS) == space.BUF_F_CONTIGUOUS and 
    +                not self.flags & NPY.ARRAY_F_CONTIGUOUS):
    +           raise oefmt(errtype, "ndarray is not Fortran contiguous")
    +        if ((flags & space.BUF_ANY_CONTIGUOUS) == space.BUF_ANY_CONTIGUOUS and
    +                not (self.flags & NPY.ARRAY_F_CONTIGUOUS and 
    +                     self.flags & NPY.ARRAY_C_CONTIGUOUS)):
    +           raise oefmt(errtype, "ndarray is not contiguous")
    +        if ((flags & space.BUF_STRIDES) != space.BUF_STRIDES and
    +                not self.flags & NPY.ARRAY_C_CONTIGUOUS):
    +           raise oefmt(errtype, "ndarray is not C-contiguous")
    +        if ((flags & space.BUF_WRITABLE) == space.BUF_WRITABLE and
    +            not self.flags & NPY.ARRAY_WRITEABLE):
    +           raise oefmt(errtype, "buffer source array is read-only")
    +        readonly = not (flags & space.BUF_WRITABLE) == space.BUF_WRITABLE
             return ArrayBuffer(self, readonly)
     
         def astype(self, space, dtype, order, copy=True):
    @@ -695,6 +713,7 @@
                      index + self.impl.start)
     
         def setitem(self, index, v):
    +        # XXX what if self.readonly?
             raw_storage_setitem(self.impl.storage, index + self.impl.start,
                                 rffi.cast(lltype.Char, v))
     
    diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py
    --- a/pypy/module/micronumpy/ctors.py
    +++ b/pypy/module/micronumpy/ctors.py
    @@ -1,4 +1,5 @@
     from pypy.interpreter.error import OperationError, oefmt
    +from pypy.interpreter.baseobjspace import BufferInterfaceNotFound
     from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
     from rpython.rlib.buffer import SubBuffer
     from rpython.rlib.rstring import strip_spaces
    @@ -42,7 +43,7 @@
             raise oefmt(space.w_ValueError,
                         "object __array__ method not producing an array")
     
    -def try_interface_method(space, w_object):
    +def try_interface_method(space, w_object, copy):
         try:
             w_interface = space.getattr(w_object, space.wrap("__array_interface__"))
             if w_interface is None:
    @@ -81,17 +82,20 @@
                 raise oefmt(space.w_ValueError,
                         "__array_interface__ could not decode dtype %R", w_dtype
                         )
    -        if w_data is not None and (space.isinstance_w(w_data, space.w_tuple) or space.isinstance_w(w_data, space.w_list)):
    +        if w_data is not None and (space.isinstance_w(w_data, space.w_tuple) or
    +                                   space.isinstance_w(w_data, space.w_list)):
                 data_w = space.listview(w_data)
    -            data = rffi.cast(RAW_STORAGE_PTR, space.int_w(data_w[0]))
    -            read_only = True # XXX why not space.is_true(data_w[1])
    +            w_data = rffi.cast(RAW_STORAGE_PTR, space.int_w(data_w[0]))
    +            read_only = space.is_true(data_w[1]) or copy
                 offset = 0
    -            return W_NDimArray.from_shape_and_storage(space, shape, data, 
    -                                    dtype, strides=strides, start=offset), read_only
    +            w_base = w_object
    +            if read_only:
    +                w_base = None
    +            return W_NDimArray.from_shape_and_storage(space, shape, w_data, 
    +                                dtype, w_base=w_base, strides=strides,
    +                                start=offset), read_only
             if w_data is None:
    -            data = w_object
    -        else:
    -            data = w_data
    +            w_data = w_object
             w_offset = space.finditem(w_interface, space.wrap('offset'))
             if w_offset is None:
                 offset = 0
    @@ -101,7 +105,7 @@
             if strides is not None:
                 raise oefmt(space.w_NotImplementedError,
                        "__array_interface__ strides not fully supported yet") 
    -        arr = frombuffer(space, data, dtype, support.product(shape), offset)
    +        arr = frombuffer(space, w_data, dtype, support.product(shape), offset)
             new_impl = arr.implementation.reshape(arr, shape)
             return W_NDimArray(new_impl), False
             
    @@ -110,6 +114,78 @@
                 return None, False
             raise
     
    +def _descriptor_from_pep3118_format(space, c_format):
    +    descr = descriptor.decode_w_dtype(space, space.wrap(c_format))
    +    if descr:
    +        return descr
    +    msg = "invalid PEP 3118 format string: '%s'" % c_format
    +    space.warn(space.wrap(msg), space.w_RuntimeWarning)
    +    return None 
    +
    +def _array_from_buffer_3118(space, w_object, dtype):
    +    try:
    +        w_buf = space.call_method(space.builtin, "memoryview", w_object)
    +    except OperationError as e:
    +        if e.match(space, space.w_TypeError):
    +            # object does not have buffer interface
    +            return w_object
    +        raise
    +    format = space.getattr(w_buf,space.newbytes('format'))
    +    if format:
    +        descr = _descriptor_from_pep3118_format(space, space.str_w(format))
    +        if not descr:
    +            return w_object
    +        if dtype and descr:
    +            raise oefmt(space.w_NotImplementedError,
    +                "creating an array from a memoryview while specifying dtype "
    +                "not supported")
    +        if descr.elsize != space.int_w(space.getattr(w_buf, space.newbytes('itemsize'))): 
    +            msg = ("Item size computed from the PEP 3118 buffer format "
    +                  "string does not match the actual item size.")
    +            space.warn(space.wrap(msg), space.w_RuntimeWarning)
    +            return w_object
    +        dtype = descr 
    +    elif not dtype:
    +        dtype = descriptor.get_dtype_cache(space).w_stringdtype
    +        dtype.elsize = space.int_w(space.getattr(w_buf, space.newbytes('itemsize')))
    +    nd = space.int_w(space.getattr(w_buf, space.newbytes('ndim')))
    +    shape = [space.int_w(d) for d in space.listview(
    +                            space.getattr(w_buf, space.newbytes('shape')))]
    +    strides = []
    +    buflen = space.len_w(w_buf) * dtype.elsize
    +    if shape:
    +        strides = [space.int_w(d) for d in space.listview(
    +                            space.getattr(w_buf, space.newbytes('strides')))]
    +        if not strides:
    +            d = buflen
    +            strides = [0] * nd
    +            for k in range(nd):
    +                if shape[k] > 0:
    +                    d /= shape[k]
    +                    strides[k] = d
    +    else:
    +        if nd == 1:
    +            shape = [buflen / dtype.elsize, ]
    +            strides = [dtype.elsize, ]
    +        elif nd > 1:
    +            msg = ("ndim computed from the PEP 3118 buffer format "
    +                   "is greater than 1, but shape is NULL.")
    +            space.warn(space.wrap(msg), space.w_RuntimeWarning)
    +            return w_object
    +    try:
    +        w_data = rffi.cast(RAW_STORAGE_PTR, space.int_w(space.call_method(w_buf, '_pypy_raw_address')))
    +    except OperationError as e:
    +        if e.match(space, space.w_ValueError):
    +            return w_object
    +        else:
    +            raise e
    +    writable = not space.bool_w(space.getattr(w_buf, space.newbytes('readonly')))
    +    w_ret = W_NDimArray.from_shape_and_storage(space, shape, w_data,
    +               storage_bytes=buflen, dtype=dtype, w_base=w_object, 
    +               writable=writable, strides=strides)
    +    if w_ret:
    +        return w_ret
    +    return w_object
     
     @unwrap_spec(ndmin=int, copy=bool, subok=bool)
     def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False,
    @@ -127,6 +203,7 @@
     
     def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False):
     
    +    from pypy.module.micronumpy.boxes import W_GenericBox
         # numpy testing calls array(type(array([]))) and expects a ValueError
         if space.isinstance_w(w_object, space.w_type):
             raise oefmt(space.w_ValueError, "cannot create ndarray from type instance")
    @@ -134,13 +211,19 @@
         dtype = descriptor.decode_w_dtype(space, w_dtype)
         if not isinstance(w_object, W_NDimArray):
             w_array = try_array_method(space, w_object, w_dtype)
    -        if w_array is not None:
    +        if w_array is None:
    +            if (    not space.isinstance_w(w_object, space.w_str) and 
    +                    not space.isinstance_w(w_object, space.w_unicode) and
    +                    not isinstance(w_object, W_GenericBox)):
    +                # use buffer interface
    +                w_object = _array_from_buffer_3118(space, w_object, dtype)
    +        else:
                 # continue with w_array, but do further operations in place
                 w_object = w_array
                 copy = False
                 dtype = w_object.get_dtype()
         if not isinstance(w_object, W_NDimArray):
    -        w_array, _copy = try_interface_method(space, w_object)
    +        w_array, _copy = try_interface_method(space, w_object, copy)
             if w_array is not None:
                 w_object = w_array
                 copy = _copy
    diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py
    --- a/pypy/module/micronumpy/ndarray.py
    +++ b/pypy/module/micronumpy/ndarray.py
    @@ -805,19 +805,19 @@
             return w_result
     
         def buffer_w(self, space, flags):
    -        return self.implementation.get_buffer(space, True)
    +        return self.implementation.get_buffer(space, flags)
     
         def readbuf_w(self, space):
    -        return self.implementation.get_buffer(space, True)
    +        return self.implementation.get_buffer(space, space.BUF_FULL_RO)
     
         def writebuf_w(self, space):
    -        return self.implementation.get_buffer(space, False)
    +        return self.implementation.get_buffer(space, space.BUF_FULL)
     
         def charbuf_w(self, space):
    -        return self.implementation.get_buffer(space, True).as_str()
    +        return self.implementation.get_buffer(space, space.BUF_FULL_RO).as_str()
     
         def descr_get_data(self, space):
    -        return space.newbuffer(self.implementation.get_buffer(space, False))
    +        return space.newbuffer(self.implementation.get_buffer(space, space.BUF_FULL))
     
         @unwrap_spec(offset=int, axis1=int, axis2=int)
         def descr_diagonal(self, space, offset=0, axis1=0, axis2=1):
    diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py
    --- a/pypy/module/micronumpy/test/test_ndarray.py
    +++ b/pypy/module/micronumpy/test/test_ndarray.py
    @@ -3215,7 +3215,9 @@
             raises(TypeError, array, Dummy({'version': 3, 'typestr': 'f8', 'shape': ('a', 3)}))
     
             a = array([1, 2, 3])
    -        b = array(Dummy(a.__array_interface__))
    +        d = Dummy(a.__array_interface__)
    +        b = array(d)
    +        assert b.base is None
             b[1] = 200
             assert a[1] == 2 # upstream compatibility, is this a bug?
             interface_a = a.__array_interface__
    @@ -3226,6 +3228,8 @@
             interface_b.pop('data')
             interface_a.pop('data')
             assert interface_a == interface_b
    +        b = array(d, copy=False)
    +        assert b.base is d
     
             b = array(Dummy({'version':3, 'shape': (50,), 'typestr': 'u1',
                              'data': 'a'*100}))
    @@ -3594,6 +3598,7 @@
             cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2))
             cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4))
             cls.w_ulongval = cls.space.wrap(struct.pack('L', 12))
    +        cls.w_one = cls.space.wrap(struct.pack('i', 1))
     
         def test_frombuffer(self):
             import numpy as np
    @@ -3645,8 +3650,6 @@
             else:
                 EMPTY = None
             x = np.array([1, 2, 3, 4, 5], dtype='i')
    -        y = memoryview('abc')
    -        assert y.format == 'B'
             y = memoryview(x)
             assert y.format == 'i'
             assert y.shape == (5,)
    @@ -3654,6 +3657,16 @@
             assert y.strides == (4,)
             assert y.suboffsets == EMPTY
             assert y.itemsize == 4
    +        assert isinstance(y, memoryview)
    +        assert y[0] == self.one
    +        assert (np.array(y) == x).all()
    +
    +        x = np.array([0, 0, 0, 0], dtype='O')
    +        y = memoryview(x)
    +        # handles conversion of address to pinned object?
    +        z = np.array(y)
    +        assert z.dtype == 'O'
    +        assert (z == x).all()
     
         def test_fromstring(self):
             import sys
    diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py
    --- a/pypy/module/micronumpy/test/test_subtype.py
    +++ b/pypy/module/micronumpy/test/test_subtype.py
    @@ -702,3 +702,32 @@
             ret = obj.sum()
             print type(ret)
             assert ret.info == 'spam'
    +
    +    def test_ndarray_subclass_assigns_base(self):
    +        import numpy as np
    +        init_called = []
    +        class _DummyArray(object):
    +            """ Dummy object that just exists to hang __array_interface__ dictionaries
    +            and possibly keep alive a reference to a base array.
    +            """
    +            def __init__(self, interface, base=None):
    +                self.__array_interface__ = interface
    +                init_called.append(1)
    +                self.base = base
    +
    +        x = np.zeros(10)
    +        d = _DummyArray(x.__array_interface__, base=x)
    +        y = np.array(d, copy=False)
    +        assert sum(init_called) == 1
    +        assert y.base is d
    +
    +        x = np.zeros((0,), dtype='float32')
    +        intf = x.__array_interface__.copy()
    +        intf["strides"] = x.strides
    +        x.__array_interface__["strides"] = x.strides
    +        d = _DummyArray(x.__array_interface__, base=x)
    +        y = np.array(d, copy=False)
    +        assert sum(init_called) == 2
    +        assert y.base is d
    +
    +
    diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py
    --- a/pypy/module/micronumpy/types.py
    +++ b/pypy/module/micronumpy/types.py
    @@ -1851,7 +1851,7 @@
                         arr.gcstruct)
     
         def read(self, arr, i, offset, dtype):
    -        if arr.gcstruct is V_OBJECTSTORE:
    +        if arr.gcstruct is V_OBJECTSTORE and not arr.base():
                 raise oefmt(self.space.w_NotImplementedError,
                     "cannot read object from array with no gc hook")
             return self.box(self._read(arr.storage, i, offset))
    diff --git a/pypy/module/pypyjit/test_pypy_c/test_import.py b/pypy/module/pypyjit/test_pypy_c/test_import.py
    --- a/pypy/module/pypyjit/test_pypy_c/test_import.py
    +++ b/pypy/module/pypyjit/test_pypy_c/test_import.py
    @@ -38,3 +38,27 @@
             # call_may_force(absolute_import_with_lock).
             for opname in log.opnames(loop.allops(opcode="IMPORT_NAME")):
                 assert 'call' not in opname    # no call-like opcode
    +
    +    def test_import_fast_path(self, tmpdir):
    +        print tmpdir
    +        pkg = tmpdir.join('mypkg').ensure(dir=True)
    +        subdir = pkg.join("sub").ensure(dir=True)
    +        pkg.join('__init__.py').write("")
    +        subdir.join('__init__.py').write("")
    +        subdir.join('mod.py').write(str(py.code.Source("""
    +            def do_the_import():
    +                import sys
    +        """)))
    +        def main(path, n):
    +            def do_the_import():
    +                from mypkg.sub import mod
    +            import sys
    +            sys.path.append(path)
    +            for i in range(n):
    +                do_the_import()
    +        #
    +        log = self.run(main, [str(tmpdir), 300])
    +        loop, = log.loops_by_filename(self.filepath)
    +        # check that no string compares and other calls are there
    +        for opname in log.opnames(loop.allops(opcode="IMPORT_NAME")):
    +            assert 'call' not in opname    # no call-like opcode
    diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
    --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
    +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
    @@ -1415,6 +1415,7 @@
             assert p.b == 12
             assert p.c == 14
             assert p.d == 14
    +        py.test.raises(ValueError, ffi.new, "struct foo_s *", [0, 0, 0, 0])
     
         def test_nested_field_offset_align(self):
             ffi = FFI(backend=self.Backend())
    @@ -1454,14 +1455,42 @@
             assert p.b == 0
             assert p.c == 14
             assert p.d == 14
    -        p = ffi.new("union foo_u *", {'b': 12})
    -        assert p.a == 0
    +        p = ffi.new("union foo_u *", {'a': -63, 'b': 12})
    +        assert p.a == -63
             assert p.b == 12
    -        assert p.c == 0
    -        assert p.d == 0
    -        # we cannot specify several items in the dict, even though
    -        # in theory in this particular case it would make sense
    -        # to give both 'a' and 'b'
    +        assert p.c == -63
    +        assert p.d == -63
    +        p = ffi.new("union foo_u *", [123, 456])
    +        assert p.a == 123
    +        assert p.b == 456
    +        assert p.c == 123
    +        assert p.d == 123
    +        py.test.raises(ValueError, ffi.new, "union foo_u *", [0, 0, 0])
    +
    +    def test_nested_anonymous_struct_2(self):
    +        ffi = FFI(backend=self.Backend())
    +        ffi.cdef("""
    +            struct foo_s {
    +                int a;
    +                union { int b; union { int c, d; }; };
    +                int e;
    +            };
    +        """)
    +        assert ffi.sizeof("struct foo_s") == 3 * SIZE_OF_INT
    +        p = ffi.new("struct foo_s *", [11, 22, 33])
    +        assert p.a == 11
    +        assert p.b == p.c == p.d == 22
    +        assert p.e == 33
    +        py.test.raises(ValueError, ffi.new, "struct foo_s *", [11, 22, 33, 44])
    +        FOO = ffi.typeof("struct foo_s")
    +        fields = [(name, fld.offset, fld.flags) for (name, fld) in FOO.fields]
    +        assert fields == [
    +            ('a', 0 * SIZE_OF_INT, 0),
    +            ('b', 1 * SIZE_OF_INT, 0),
    +            ('c', 1 * SIZE_OF_INT, 1),
    +            ('d', 1 * SIZE_OF_INT, 1),
    +            ('e', 2 * SIZE_OF_INT, 0),
    +        ]
     
         def test_cast_to_array_type(self):
             ffi = FFI(backend=self.Backend())
    diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py
    --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py
    +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py
    @@ -35,6 +35,9 @@
         def test_nested_anonymous_union(self):
             py.test.skip("ctypes backend: not supported: nested anonymous union")
     
    +    def test_nested_anonymous_struct_2(self):
    +        py.test.skip("ctypes backend: not supported: nested anonymous union")
    +
         def test_CData_CType_2(self):
             if sys.version_info >= (3,):
                 py.test.skip("ctypes backend: not supported in Python 3: CType")
    diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py
    --- a/pypy/objspace/std/celldict.py
    +++ b/pypy/objspace/std/celldict.py
    @@ -64,6 +64,8 @@
     
         def setitem_str(self, w_dict, key, w_value):
             cell = self.getdictvalue_no_unwrapping(w_dict, key)
    +        #if (key == '__package__' or key == "__path__") and cell is not None and w_value is not cell:
    +        #    print "WARNING", key, w_value, cell, self
             return self._setitem_str_cell_known(cell, w_dict, key, w_value)
     
         def _setitem_str_cell_known(self, cell, w_dict, key, w_value):
    diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py
    --- a/pypy/objspace/std/memoryobject.py
    +++ b/pypy/objspace/std/memoryobject.py
    @@ -14,6 +14,7 @@
         """Implement the built-in 'memoryview' type as a wrapper around
         an interp-level buffer.
         """
    +    _attrs_ = ['buf']
     
         def __init__(self, buf):
             assert isinstance(buf, Buffer)
    @@ -115,7 +116,7 @@
                 self.buf.setslice(start, value.as_str())
     
         def descr_len(self, space):
    -        return space.wrap(self.buf.getlength())
    +        return space.wrap(self.buf.getlength() / self.buf.getitemsize())
     
         def w_get_format(self, space):
             return space.wrap(self.buf.getformat())
    diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh
    --- a/pypy/tool/release/repackage.sh
    +++ b/pypy/tool/release/repackage.sh
    @@ -1,7 +1,7 @@
     # Edit these appropriately before running this script
     maj=5
     min=4
    -rev=0
    +rev=1
     branchname=release-$maj.x  # ==OR== release-$maj.$min.x
     tagname=release-pypy2.7-v$maj.$min.$rev  # ==OR== release-$maj.$min
     
    
    From pypy.commits at gmail.com  Wed Sep  7 10:07:20 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Wed, 07 Sep 2016 07:07:20 -0700 (PDT)
    Subject: [pypy-commit] pypy ppc-vsx-support: new test to ensure that
     operations that only the necessary index calculations are emitted
    Message-ID: <57d01f18.010c1c0a.172b6.c7b6@mx.google.com>
    
    Author: Richard Plangger 
    Branch: ppc-vsx-support
    Changeset: r86934:a867b0a573a4
    Date: 2016-09-07 16:03 +0200
    http://bitbucket.org/pypy/pypy/changeset/a867b0a573a4/
    
    Log:	new test to ensure that operations that only the necessary index
    	calculations are emitted
    
    diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    @@ -1384,5 +1384,23 @@
                     for arg in op.getfailargs():
                         assert not arg.is_constant()
     
    +    def test_delay_pure_ops(self):
    +        """ Pure operations can be delayed. Often (e.g. for index calc.) this means they can be omitted.
    +        """
    +        trace = self.parse_loop("""
    +        [p0,i0]
    +        f0 = raw_load_f(p0, i0, descr=floatarraydescr)
    +        i1 = int_add(i0,8)
    +        f1 = raw_load_f(p0, i1, descr=floatarraydescr)
    +        i2 = int_add(i1,8)
    +        jump(p0,i2)
    +        """)
    +        self.schedule(trace)
    +        self.ensure_operations([
    +            'v0[2xf64] = vec_load_f(p0, i0, 8, 0, descr=floatarraydescr)',
    +            'i2 = int_add(i0, 16)',
    +            'jump(p0,i2)',
    +        ], trace)
    +
     class TestLLtype(BaseTestVectorize, LLtypeMixin):
         pass
    
    From pypy.commits at gmail.com  Wed Sep  7 10:07:16 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Wed, 07 Sep 2016 07:07:16 -0700 (PDT)
    Subject: [pypy-commit] pypy ppc-vsx-support: syntax error
    Message-ID: <57d01f14.04a81c0a.afd36.741b@mx.google.com>
    
    Author: Richard Plangger 
    Branch: ppc-vsx-support
    Changeset: r86932:ffce6cd646d6
    Date: 2016-09-05 16:45 +0200
    http://bitbucket.org/pypy/pypy/changeset/ffce6cd646d6/
    
    Log:	syntax error
    
    diff --git a/rpython/jit/backend/x86/vector_ext.py b/rpython/jit/backend/x86/vector_ext.py
    --- a/rpython/jit/backend/x86/vector_ext.py
    +++ b/rpython/jit/backend/x86/vector_ext.py
    @@ -308,7 +308,7 @@
                 self.mc.CMPPD_xxi(lhsloc.value, rhsloc.value, 0)
             self.flush_vec_cc(rx86.Conditions["E"], lhsloc, resloc, sizeloc.value)
     
    -    def flush_vec_cc(self, rev_cond, lhsloc, resloc, size)
    +    def flush_vec_cc(self, rev_cond, lhsloc, resloc, size):
             # After emitting an instruction that leaves a boolean result in
             # a condition code (cc), call this.  In the common case, result_loc
             # will be set to SPP by the regalloc, which in this case means
    
    From pypy.commits at gmail.com  Wed Sep  7 10:12:25 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Wed, 07 Sep 2016 07:12:25 -0700 (PDT)
    Subject: [pypy-commit] pypy py3k: fix translation
    Message-ID: <57d02049.45c8c20a.4d79b.5d6e@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: py3k
    Changeset: r86935:812903497ffc
    Date: 2016-09-07 15:11 +0100
    http://bitbucket.org/pypy/pypy/changeset/812903497ffc/
    
    Log:	fix translation
    
    diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py
    --- a/pypy/objspace/std/memoryobject.py
    +++ b/pypy/objspace/std/memoryobject.py
    @@ -26,7 +26,6 @@
         """Implement the built-in 'memoryview' type as a wrapper around
         an interp-level buffer.
         """
    -    _attrs_ = ['buf']
     
         def __init__(self, buf, format=None, itemsize=1, ndim=-1,
                      shape=None, strides=None, suboffsets=None):
    
    From pypy.commits at gmail.com  Wed Sep  7 10:14:37 2016
    From: pypy.commits at gmail.com (cfbolz)
    Date: Wed, 07 Sep 2016 07:14:37 -0700 (PDT)
    Subject: [pypy-commit] pypy default: fix __all__ code
    Message-ID: <57d020cd.53b81c0a.262cd.7d35@mx.google.com>
    
    Author: Carl Friedrich Bolz 
    Branch: 
    Changeset: r86936:02dc0dd6309c
    Date: 2016-09-07 14:57 +0200
    http://bitbucket.org/pypy/pypy/changeset/02dc0dd6309c/
    
    Log:	fix __all__ code
    
    diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
    --- a/pypy/module/imp/importing.py
    +++ b/pypy/module/imp/importing.py
    @@ -358,6 +358,7 @@
                     w_all = try_getattr(space, w_mod, space.wrap('__all__'))
                     if w_all is not None:
                         w_fromlist = w_all
    +                    length = space.len_w(w_fromlist)
                     else:
                         w_fromlist = None
                         # "from x import *" with x already imported and no x.__all__
    @@ -409,6 +410,7 @@
                     w_all = try_getattr(space, w_mod, w('__all__'))
                     if w_all is not None:
                         w_fromlist = w_all
    +                    length = space.len_w(w_fromlist)
                     else:
                         w_fromlist = None
                 if w_fromlist is not None:
    diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
    --- a/pypy/module/imp/test/test_import.py
    +++ b/pypy/module/imp/test/test_import.py
    @@ -65,8 +65,9 @@
                  )
         setuppkg("pkg.pkg2", a='', b='')
         setuppkg("pkg.withall",
    -             __init__  = "__all__ = ['foobar']",
    -             foobar    = "found = 123")
    +             __init__  = "__all__ = ['foobar', 'barbaz']",
    +             foobar    = "found = 123",
    +             barbaz    = "other = 543")
         setuppkg("pkg.withoutall",
                  __init__  = "",
                  foobar    = "found = 123")
    @@ -707,6 +708,7 @@
                 d = {}
                 exec "from pkg.withall import *" in d
                 assert d["foobar"].found == 123
    +            assert d["barbaz"].other == 543
     
         def test_import_star_does_not_find_submodules_without___all__(self):
             for case in ["not-imported-yet", "already-imported"]:
    
    From pypy.commits at gmail.com  Wed Sep  7 10:14:39 2016
    From: pypy.commits at gmail.com (cfbolz)
    Date: Wed, 07 Sep 2016 07:14:39 -0700 (PDT)
    Subject: [pypy-commit] pypy default: merge
    Message-ID: <57d020cf.c186c20a.99002.507f@mx.google.com>
    
    Author: Carl Friedrich Bolz 
    Branch: 
    Changeset: r86937:cd6eb8be0310
    Date: 2016-09-07 16:13 +0200
    http://bitbucket.org/pypy/pypy/changeset/cd6eb8be0310/
    
    Log:	merge
    
    diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
    --- a/pypy/module/imp/importing.py
    +++ b/pypy/module/imp/importing.py
    @@ -358,6 +358,7 @@
                     w_all = try_getattr(space, w_mod, space.wrap('__all__'))
                     if w_all is not None:
                         w_fromlist = w_all
    +                    length = space.len_w(w_fromlist)
                     else:
                         w_fromlist = None
                         # "from x import *" with x already imported and no x.__all__
    @@ -409,6 +410,7 @@
                     w_all = try_getattr(space, w_mod, w('__all__'))
                     if w_all is not None:
                         w_fromlist = w_all
    +                    length = space.len_w(w_fromlist)
                     else:
                         w_fromlist = None
                 if w_fromlist is not None:
    diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
    --- a/pypy/module/imp/test/test_import.py
    +++ b/pypy/module/imp/test/test_import.py
    @@ -65,8 +65,9 @@
                  )
         setuppkg("pkg.pkg2", a='', b='')
         setuppkg("pkg.withall",
    -             __init__  = "__all__ = ['foobar']",
    -             foobar    = "found = 123")
    +             __init__  = "__all__ = ['foobar', 'barbaz']",
    +             foobar    = "found = 123",
    +             barbaz    = "other = 543")
         setuppkg("pkg.withoutall",
                  __init__  = "",
                  foobar    = "found = 123")
    @@ -707,6 +708,7 @@
                 d = {}
                 exec "from pkg.withall import *" in d
                 assert d["foobar"].found == 123
    +            assert d["barbaz"].other == 543
     
         def test_import_star_does_not_find_submodules_without___all__(self):
             for case in ["not-imported-yet", "already-imported"]:
    
    From pypy.commits at gmail.com  Wed Sep  7 10:25:45 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Wed, 07 Sep 2016 07:25:45 -0700 (PDT)
    Subject: [pypy-commit] pypy py3.5: hg merge py3k
    Message-ID: <57d02369.88711c0a.4e105.8389@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: py3.5
    Changeset: r86938:5db8cf7e71cd
    Date: 2016-09-07 15:24 +0100
    http://bitbucket.org/pypy/pypy/changeset/5db8cf7e71cd/
    
    Log:	hg merge py3k
    
    diff too long, truncating to 2000 out of 2403 lines
    
    diff --git a/.hgtags b/.hgtags
    --- a/.hgtags
    +++ b/.hgtags
    @@ -30,3 +30,6 @@
     68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0
     68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0
     77392ad263504df011ccfcabf6a62e21d04086d0 release-pypy2.7-v5.4.0
    +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1
    +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1
    +0e2d9a73f5a1818d0245d75daccdbe21b2d5c3ef release-pypy2.7-v5.4.1
    diff --git a/_pytest/python.py b/_pytest/python.py
    --- a/_pytest/python.py
    +++ b/_pytest/python.py
    @@ -498,7 +498,10 @@
         """ Collector for test methods. """
         def collect(self):
             if hasinit(self.obj):
    -            pytest.skip("class %s.%s with __init__ won't get collected" % (
    +            # XXX used to be skip(), but silently skipping classes
    +            # XXX just because they have been written long ago is
    +            # XXX imho a very, very, very bad idea
    +            pytest.fail("class %s.%s with __init__ won't get collected" % (
                     self.obj.__module__,
                     self.obj.__name__,
                 ))
    diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
    --- a/lib_pypy/cffi.egg-info/PKG-INFO
    +++ b/lib_pypy/cffi.egg-info/PKG-INFO
    @@ -1,6 +1,6 @@
     Metadata-Version: 1.1
     Name: cffi
    -Version: 1.8.0
    +Version: 1.8.2
     Summary: Foreign Function Interface for Python calling C code.
     Home-page: http://cffi.readthedocs.org
     Author: Armin Rigo, Maciej Fijalkowski
    diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
    --- a/lib_pypy/cffi/__init__.py
    +++ b/lib_pypy/cffi/__init__.py
    @@ -4,8 +4,8 @@
     from .api import FFI, CDefError, FFIError
     from .ffiplatform import VerificationError, VerificationMissing
     
    -__version__ = "1.8.0"
    -__version_info__ = (1, 8, 0)
    +__version__ = "1.8.2"
    +__version_info__ = (1, 8, 2)
     
     # The verifier module file names are based on the CRC32 of a string that
     # contains the following version number.  It may be older than __version__
    diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
    --- a/lib_pypy/cffi/_cffi_include.h
    +++ b/lib_pypy/cffi/_cffi_include.h
    @@ -1,4 +1,20 @@
     #define _CFFI_
    +
    +/* We try to define Py_LIMITED_API before including Python.h.
    +
    +   Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and
    +   Py_REF_DEBUG are not defined.  This is a best-effort approximation:
    +   we can learn about Py_DEBUG from pyconfig.h, but it is unclear if
    +   the same works for the other two macros.  Py_DEBUG implies them,
    +   but not the other way around.
    +*/
    +#ifndef _CFFI_USE_EMBEDDING
    +#  include 
    +#  if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG)
    +#    define Py_LIMITED_API
    +#  endif
    +#endif
    +
     #include 
     #ifdef __cplusplus
     extern "C" {
    diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
    --- a/lib_pypy/cffi/_embedding.h
    +++ b/lib_pypy/cffi/_embedding.h
    @@ -233,7 +233,7 @@
             f = PySys_GetObject((char *)"stderr");
             if (f != NULL && f != Py_None) {
                 PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
    -                               "\ncompiled with cffi version: 1.8.0"
    +                               "\ncompiled with cffi version: 1.8.2"
                                    "\n_cffi_backend module: ", f);
                 modules = PyImport_GetModuleDict();
                 mod = PyDict_GetItemString(modules, "_cffi_backend");
    diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
    --- a/lib_pypy/cffi/api.py
    +++ b/lib_pypy/cffi/api.py
    @@ -652,7 +652,7 @@
             recompile(self, module_name, source,
                       c_file=filename, call_c_compiler=False, **kwds)
     
    -    def compile(self, tmpdir='.', verbose=0, target=None):
    +    def compile(self, tmpdir='.', verbose=0, target=None, debug=None):
             """The 'target' argument gives the final file name of the
             compiled DLL.  Use '*' to force distutils' choice, suitable for
             regular CPython C API modules.  Use a file name ending in '.*'
    @@ -669,7 +669,7 @@
             module_name, source, source_extension, kwds = self._assigned_source
             return recompile(self, module_name, source, tmpdir=tmpdir,
                              target=target, source_extension=source_extension,
    -                         compiler_verbose=verbose, **kwds)
    +                         compiler_verbose=verbose, debug=debug, **kwds)
     
         def init_once(self, func, tag):
             # Read _init_once_cache[tag], which is either (False, lock) if
    diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
    --- a/lib_pypy/cffi/backend_ctypes.py
    +++ b/lib_pypy/cffi/backend_ctypes.py
    @@ -997,29 +997,43 @@
             assert onerror is None   # XXX not implemented
             return BType(source, error)
     
    +    _weakref_cache_ref = None
    +
         def gcp(self, cdata, destructor):
    -        BType = self.typeof(cdata)
    +        if self._weakref_cache_ref is None:
    +            import weakref
    +            class MyRef(weakref.ref):
    +                def __eq__(self, other):
    +                    myref = self()
    +                    return self is other or (
    +                        myref is not None and myref is other())
    +                def __ne__(self, other):
    +                    return not (self == other)
    +                def __hash__(self):
    +                    try:
    +                        return self._hash
    +                    except AttributeError:
    +                        self._hash = hash(self())
    +                        return self._hash
    +            self._weakref_cache_ref = {}, MyRef
    +        weak_cache, MyRef = self._weakref_cache_ref
     
             if destructor is None:
    -            if not (hasattr(BType, '_gcp_type') and
    -                    BType._gcp_type is BType):
    +            try:
    +                del weak_cache[MyRef(cdata)]
    +            except KeyError:
                     raise TypeError("Can remove destructor only on a object "
                                     "previously returned by ffi.gc()")
    -            cdata._destructor = None
                 return None
     
    -        try:
    -            gcp_type = BType._gcp_type
    -        except AttributeError:
    -            class CTypesDataGcp(BType):
    -                __slots__ = ['_orig', '_destructor']
    -                def __del__(self):
    -                    if self._destructor is not None:
    -                        self._destructor(self._orig)
    -            gcp_type = BType._gcp_type = CTypesDataGcp
    -        new_cdata = self.cast(gcp_type, cdata)
    -        new_cdata._orig = cdata
    -        new_cdata._destructor = destructor
    +        def remove(k):
    +            cdata, destructor = weak_cache.pop(k, (None, None))
    +            if destructor is not None:
    +                destructor(cdata)
    +
    +        new_cdata = self.cast(self.typeof(cdata), cdata)
    +        assert new_cdata is not cdata
    +        weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor)
             return new_cdata
     
         typeof = type
    diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py
    --- a/lib_pypy/cffi/ffiplatform.py
    +++ b/lib_pypy/cffi/ffiplatform.py
    @@ -21,12 +21,12 @@
             allsources.append(os.path.normpath(src))
         return Extension(name=modname, sources=allsources, **kwds)
     
    -def compile(tmpdir, ext, compiler_verbose=0):
    +def compile(tmpdir, ext, compiler_verbose=0, debug=None):
         """Compile a C extension module using distutils."""
     
         saved_environ = os.environ.copy()
         try:
    -        outputfilename = _build(tmpdir, ext, compiler_verbose)
    +        outputfilename = _build(tmpdir, ext, compiler_verbose, debug)
             outputfilename = os.path.abspath(outputfilename)
         finally:
             # workaround for a distutils bugs where some env vars can
    @@ -36,7 +36,7 @@
                     os.environ[key] = value
         return outputfilename
     
    -def _build(tmpdir, ext, compiler_verbose=0):
    +def _build(tmpdir, ext, compiler_verbose=0, debug=None):
         # XXX compact but horrible :-(
         from distutils.core import Distribution
         import distutils.errors, distutils.log
    @@ -44,6 +44,9 @@
         dist = Distribution({'ext_modules': [ext]})
         dist.parse_config_files()
         options = dist.get_option_dict('build_ext')
    +    if debug is None:
    +        debug = sys.flags.debug
    +    options['debug'] = ('ffiplatform', debug)
         options['force'] = ('ffiplatform', True)
         options['build_lib'] = ('ffiplatform', tmpdir)
         options['build_temp'] = ('ffiplatform', tmpdir)
    diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
    --- a/lib_pypy/cffi/recompiler.py
    +++ b/lib_pypy/cffi/recompiler.py
    @@ -275,8 +275,8 @@
         def write_c_source_to_f(self, f, preamble):
             self._f = f
             prnt = self._prnt
    -        if self.ffi._embedding is None:
    -            prnt('#define Py_LIMITED_API')
    +        if self.ffi._embedding is not None:
    +            prnt('#define _CFFI_USE_EMBEDDING')
             #
             # first the '#include' (actually done by inlining the file's content)
             lines = self._rel_readlines('_cffi_include.h')
    @@ -1431,7 +1431,7 @@
     
     def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True,
                   c_file=None, source_extension='.c', extradir=None,
    -              compiler_verbose=1, target=None, **kwds):
    +              compiler_verbose=1, target=None, debug=None, **kwds):
         if not isinstance(module_name, str):
             module_name = module_name.encode('ascii')
         if ffi._windows_unicode:
    @@ -1467,7 +1467,8 @@
                     if target != '*':
                         _patch_for_target(patchlist, target)
                     os.chdir(tmpdir)
    -                outputfilename = ffiplatform.compile('.', ext, compiler_verbose)
    +                outputfilename = ffiplatform.compile('.', ext,
    +                                                     compiler_verbose, debug)
                 finally:
                     os.chdir(cwd)
                     _unpatch_meths(patchlist)
    diff --git a/lib_pypy/cffi/setuptools_ext.py b/lib_pypy/cffi/setuptools_ext.py
    --- a/lib_pypy/cffi/setuptools_ext.py
    +++ b/lib_pypy/cffi/setuptools_ext.py
    @@ -69,16 +69,36 @@
         else:
             _add_c_module(dist, ffi, module_name, source, source_extension, kwds)
     
    +def _set_py_limited_api(Extension, kwds):
    +    """
    +    Add py_limited_api to kwds if setuptools >= 26 is in use.
    +    Do not alter the setting if it already exists.
    +    Setuptools takes care of ignoring the flag on Python 2 and PyPy.
    +    """
    +    if 'py_limited_api' not in kwds:
    +        import setuptools
    +        try:
    +            setuptools_major_version = int(setuptools.__version__.partition('.')[0])
    +            if setuptools_major_version >= 26:
    +                kwds['py_limited_api'] = True
    +        except ValueError:  # certain development versions of setuptools
    +            # If we don't know the version number of setuptools, we
    +            # try to set 'py_limited_api' anyway.  At worst, we get a
    +            # warning.
    +            kwds['py_limited_api'] = True
    +    return kwds
     
     def _add_c_module(dist, ffi, module_name, source, source_extension, kwds):
         from distutils.core import Extension
    -    from distutils.command.build_ext import build_ext
    +    # We are a setuptools extension. Need this build_ext for py_limited_api.
    +    from setuptools.command.build_ext import build_ext
         from distutils.dir_util import mkpath
         from distutils import log
         from cffi import recompiler
     
         allsources = ['$PLACEHOLDER']
         allsources.extend(kwds.pop('sources', []))
    +    kwds = _set_py_limited_api(Extension, kwds)
         ext = Extension(name=module_name, sources=allsources, **kwds)
     
         def make_mod(tmpdir, pre_run=None):
    diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
    --- a/pypy/doc/index-of-release-notes.rst
    +++ b/pypy/doc/index-of-release-notes.rst
    @@ -6,6 +6,7 @@
     
     .. toctree::
     
    +   release-pypy2.7-v5.4.1.rst
        release-pypy2.7-v5.4.0.rst
        release-pypy2.7-v5.3.1.rst
        release-pypy2.7-v5.3.0.rst
    diff --git a/pypy/doc/release-pypy2.7-v5.4.1.rst b/pypy/doc/release-pypy2.7-v5.4.1.rst
    new file mode 100644
    --- /dev/null
    +++ b/pypy/doc/release-pypy2.7-v5.4.1.rst
    @@ -0,0 +1,64 @@
    +==========
    +PyPy 5.4.1
    +==========
    +
    +We have released a bugfix for PyPy2.7-v5.4.0, released last week,
    +due to the following issues:
    +
    +  * Update list of contributors in documentation and LICENSE file,
    +    this was unfortunately left out of 5.4.0. My apologies to the new
    +    contributors
    +
    +  * Allow tests run with `-A` to find `libm.so` even if it is a script not a
    +    dynamically loadable file
    +
    +  * Bump `sys.setrecursionlimit()` when translating PyPy, for translating with CPython
    +
    +  * Tweak a float comparison with 0 in `backendopt.inline` to avoid rounding errors
    +
    +  * Fix for an issue where os.access() accepted a float for mode
    +
    +  * Fix for and issue where `unicode.decode('utf8', 'custom_replace')` messed up
    +    the last byte of a unicode string sometimes
    +
    +  * Update built-in cffi_ to version 1.8.1
    +
    +  * Explicitly detect that we found as-yet-unsupported OpenSSL 1.1, and crash
    +    translation with a message asking for help porting it
    +
    +  * Fix a regression where a PyBytesObject was forced (converted to a RPython
    +    object) when not required, reported as issue #2395
    +
    +Thanks to those who reported the issues.
    +
    +What is PyPy?
    +=============
    +
    +PyPy is a very compliant Python interpreter, almost a drop-in replacement for
    +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
    +due to its integrated tracing JIT compiler.
    +
    +We also welcome developers of other
    +`dynamic languages`_ to see what RPython can do for them.
    +
    +This release supports:
    +
    +  * **x86** machines on most common operating systems
    +    (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
    +
    +  * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
    +
    +  * big- and little-endian variants of **PPC64** running Linux,
    +
    +  * **s390x** running Linux
    +
    +.. _cffi: https://cffi.readthedocs.io
    +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
    +.. _`dynamic languages`: http://pypyjs.org
    +
    +Please update, and continue to help us make PyPy better.
    +
    +Cheers
    +
    +The PyPy Team
    +
    diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
    --- a/pypy/doc/whatsnew-head.rst
    +++ b/pypy/doc/whatsnew-head.rst
    @@ -7,3 +7,9 @@
     
     .. branch: rpython-resync
     Backport rpython changes made directly on the py3k and py3.5 branches.
    +
    +.. branch: buffer-interface
    +Implement PyObject_GetBuffer, PyMemoryView_GET_BUFFER, and handles memoryviews
    +in numpypy
    +
    +
    diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
    --- a/pypy/interpreter/baseobjspace.py
    +++ b/pypy/interpreter/baseobjspace.py
    @@ -1437,6 +1437,9 @@
         BUF_FORMAT   = 0x0004
         BUF_ND       = 0x0008
         BUF_STRIDES  = 0x0010 | BUF_ND
    +    BUF_C_CONTIGUOUS = 0x0020 | BUF_STRIDES
    +    BUF_F_CONTIGUOUS = 0x0040 | BUF_STRIDES
    +    BUF_ANY_CONTIGUOUS = 0x0080 | BUF_STRIDES
         BUF_INDIRECT = 0x0100 | BUF_STRIDES
     
         BUF_CONTIG_RO = BUF_ND
    diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
    --- a/pypy/module/_cffi_backend/__init__.py
    +++ b/pypy/module/_cffi_backend/__init__.py
    @@ -3,7 +3,7 @@
     from rpython.rlib import rdynload, clibffi, entrypoint
     from rpython.rtyper.lltypesystem import rffi
     
    -VERSION = "1.8.0"
    +VERSION = "1.8.2"
     
     FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
     try:
    diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py
    --- a/pypy/module/_cffi_backend/ctypestruct.py
    +++ b/pypy/module/_cffi_backend/ctypestruct.py
    @@ -105,9 +105,6 @@
                     return True
             return False
     
    -    def _check_only_one_argument_for_union(self, w_ob):
    -        pass
    -
         def convert_from_object(self, cdata, w_ob):
             if not self._copy_from_same(cdata, w_ob):
                 self.convert_struct_from_object(cdata, w_ob, optvarsize=-1)
    @@ -117,19 +114,24 @@
         )
         def convert_struct_from_object(self, cdata, w_ob, optvarsize):
             self.force_lazy_struct()
    -        self._check_only_one_argument_for_union(w_ob)
     
             space = self.space
             if (space.isinstance_w(w_ob, space.w_list) or
                 space.isinstance_w(w_ob, space.w_tuple)):
                 lst_w = space.listview(w_ob)
    -            if len(lst_w) > len(self._fields_list):
    -                raise oefmt(space.w_ValueError,
    -                            "too many initializers for '%s' (got %d)",
    -                            self.name, len(lst_w))
    -            for i in range(len(lst_w)):
    -                optvarsize = self._fields_list[i].write_v(cdata, lst_w[i],
    +            j = 0
    +            for w_obj in lst_w:
    +                try:
    +                    while (self._fields_list[j].flags &
    +                               W_CField.BF_IGNORE_IN_CTOR):
    +                        j += 1
    +                except IndexError:
    +                    raise oefmt(space.w_ValueError,
    +                                "too many initializers for '%s' (got %d)",
    +                                self.name, len(lst_w))
    +                optvarsize = self._fields_list[j].write_v(cdata, w_obj,
                                                               optvarsize)
    +                j += 1
                 return optvarsize
     
             elif space.isinstance_w(w_ob, space.w_dict):
    @@ -185,14 +187,6 @@
     class W_CTypeUnion(W_CTypeStructOrUnion):
         kind = "union"
     
    -    def _check_only_one_argument_for_union(self, w_ob):
    -        space = self.space
    -        n = space.int_w(space.len(w_ob))
    -        if n > 1:
    -            raise oefmt(space.w_ValueError,
    -                        "initializer for '%s': %d items given, but only one "
    -                        "supported (use a dict if needed)", self.name, n)
    -
     
     class W_CField(W_Root):
         _immutable_ = True
    @@ -200,18 +194,21 @@
         BS_REGULAR     = -1
         BS_EMPTY_ARRAY = -2
     
    -    def __init__(self, ctype, offset, bitshift, bitsize):
    +    BF_IGNORE_IN_CTOR = 0x01
    +
    +    def __init__(self, ctype, offset, bitshift, bitsize, flags):
             self.ctype = ctype
             self.offset = offset
             self.bitshift = bitshift # >= 0: bitshift; or BS_REGULAR/BS_EMPTY_ARRAY
             self.bitsize = bitsize
    +        self.flags = flags       # BF_xxx
     
         def is_bitfield(self):
             return self.bitshift >= 0
     
    -    def make_shifted(self, offset):
    +    def make_shifted(self, offset, fflags):
             return W_CField(self.ctype, offset + self.offset,
    -                        self.bitshift, self.bitsize)
    +                        self.bitshift, self.bitsize, self.flags | fflags)
     
         def read(self, cdata):
             cdata = rffi.ptradd(cdata, self.offset)
    @@ -341,5 +338,6 @@
         offset = interp_attrproperty('offset', W_CField),
         bitshift = interp_attrproperty('bitshift', W_CField),
         bitsize = interp_attrproperty('bitsize', W_CField),
    +    flags = interp_attrproperty('flags', W_CField),
         )
     W_CField.typedef.acceptable_as_base_class = False
    diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py
    --- a/pypy/module/_cffi_backend/newtype.py
    +++ b/pypy/module/_cffi_backend/newtype.py
    @@ -345,6 +345,11 @@
             if alignment < falign and do_align:
                 alignment = falign
             #
    +        if is_union and i > 0:
    +            fflags = ctypestruct.W_CField.BF_IGNORE_IN_CTOR
    +        else:
    +            fflags = 0
    +        #
             if fbitsize < 0:
                 # not a bitfield: common case
     
    @@ -372,7 +377,7 @@
                     for name, srcfld in ftype._fields_dict.items():
                         srcfield2names[srcfld] = name
                     for srcfld in ftype._fields_list:
    -                    fld = srcfld.make_shifted(boffset // 8)
    +                    fld = srcfld.make_shifted(boffset // 8, fflags)
                         fields_list.append(fld)
                         try:
                             fields_dict[srcfield2names[srcfld]] = fld
    @@ -382,7 +387,8 @@
                     w_ctype._custom_field_pos = True
                 else:
                     # a regular field
    -                fld = ctypestruct.W_CField(ftype, boffset // 8, bs_flag, -1)
    +                fld = ctypestruct.W_CField(ftype, boffset // 8, bs_flag, -1,
    +                                           fflags)
                     fields_list.append(fld)
                     fields_dict[fname] = fld
     
    @@ -489,7 +495,7 @@
                         bitshift = 8 * ftype.size - fbitsize- bitshift
     
                     fld = ctypestruct.W_CField(ftype, field_offset_bytes,
    -                                           bitshift, fbitsize)
    +                                           bitshift, fbitsize, fflags)
                     fields_list.append(fld)
                     fields_dict[fname] = fld
     
    diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py
    --- a/pypy/module/_cffi_backend/test/_backend_test_c.py
    +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
    @@ -1,7 +1,7 @@
     # ____________________________________________________________
     
     import sys
    -assert __version__ == "1.8.0", ("This test_c.py file is for testing a version"
    +assert __version__ == "1.8.2", ("This test_c.py file is for testing a version"
                                     " of cffi that differs from the one that we"
                                     " get from 'import _cffi_backend'")
     if sys.version_info < (3,):
    @@ -2525,6 +2525,25 @@
         assert d[2][1].bitshift == -1
         assert d[2][1].bitsize == -1
     
    +def test_nested_anonymous_struct_2():
    +    BInt = new_primitive_type("int")
    +    BStruct = new_struct_type("struct foo")
    +    BInnerUnion = new_union_type("union bar")
    +    complete_struct_or_union(BInnerUnion, [('a1', BInt, -1),
    +                                           ('a2', BInt, -1)])
    +    complete_struct_or_union(BStruct, [('b1', BInt, -1),
    +                                       ('', BInnerUnion, -1),
    +                                       ('b2', BInt, -1)])
    +    assert sizeof(BInnerUnion) == sizeof(BInt)
    +    assert sizeof(BStruct) == sizeof(BInt) * 3
    +    fields = [(name, fld.offset, fld.flags) for (name, fld) in BStruct.fields]
    +    assert fields == [
    +        ('b1', 0 * sizeof(BInt), 0),
    +        ('a1', 1 * sizeof(BInt), 0),
    +        ('a2', 1 * sizeof(BInt), 1),
    +        ('b2', 2 * sizeof(BInt), 0),
    +    ]
    +
     def test_sizeof_union():
         # a union has the largest alignment of its members, and a total size
         # that is the largest of its items *possibly further aligned* if
    diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
    --- a/pypy/module/cpyext/api.py
    +++ b/pypy/module/cpyext/api.py
    @@ -121,7 +121,7 @@
     METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE
     METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HAVE_INPLACEOPS
     Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_TPFLAGS_HAVE_NEWBUFFER
    -Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES
    +Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES Py_MAX_NDIMS
     Py_CLEANUP_SUPPORTED
     """.split()
     for name in constant_names:
    @@ -647,6 +647,9 @@
             ('format', rffi.CCHARP),
             ('shape', Py_ssize_tP),
             ('strides', Py_ssize_tP),
    +        ('_format', rffi.UCHAR),
    +        ('_shape', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)),
    +        ('_strides', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)),
             ('suboffsets', Py_ssize_tP),
             #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)),
             ('internal', rffi.VOIDP)
    diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py
    --- a/pypy/module/cpyext/buffer.py
    +++ b/pypy/module/cpyext/buffer.py
    @@ -1,20 +1,66 @@
     from pypy.interpreter.error import oefmt
     from rpython.rtyper.lltypesystem import rffi, lltype
     from rpython.rlib import buffer
    +from rpython.rlib.rarithmetic import widen
     from pypy.module.cpyext.api import (
         cpython_api, CANNOT_FAIL, Py_buffer)
     from pypy.module.cpyext.pyobject import PyObject, Py_DecRef
     
    -# PyObject_GetBuffer has been removed, it is defined in abstract.c
    -# PyObject_CheckBuffer is also already defined
    +def _IsFortranContiguous(view):
    +    ndim = widen(view.c_ndim)
    +    if ndim == 0:
    +        return 1
    +    if not view.c_strides:
    +        return ndim == 1
    +    sd = view.c_itemsize
    +    if ndim == 1:
    +        return view.c_shape[0] == 1 or sd == view.c_strides[0]
    +    for i in range(view.c_ndim):
    +        dim = view.c_shape[i]
    +        if dim == 0:
    +            return 1
    +        if view.c_strides[i] != sd:
    +            return 0
    +        sd *= dim
    +    return 1
    +
    +def _IsCContiguous(view):
    +    ndim = widen(view.c_ndim)
    +    if ndim == 0:
    +        return 1
    +    if not view.c_strides:
    +        return ndim == 1
    +    sd = view.c_itemsize
    +    if ndim == 1:
    +        return view.c_shape[0] == 1 or sd == view.c_strides[0]
    +    for i in range(ndim - 1, -1, -1):
    +        dim = view.c_shape[i]
    +        if dim == 0:
    +            return 1
    +        if view.c_strides[i] != sd:
    +            return 0
    +        sd *= dim
    +    return 1
    +        
     
     @cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL)
    -def PyBuffer_IsContiguous(space, view, fortran):
    +def PyBuffer_IsContiguous(space, view, fort):
         """Return 1 if the memory defined by the view is C-style (fortran is
         'C') or Fortran-style (fortran is 'F') contiguous or either one
         (fortran is 'A').  Return 0 otherwise."""
    -    # PyPy only supports contiguous Py_buffers for now.
    -    return 1
    +    # traverse the strides, checking for consistent stride increases from
    +    # right-to-left (c) or left-to-right (fortran). Copied from cpython
    +    if not view.c_suboffsets:
    +        return 0
    +    if (fort == 'C'):
    +        return _IsCContiguous(view)
    +    elif (fort == 'F'):
    +        return _IsFortranContiguous(view)
    +    elif (fort == 'A'):
    +        return (_IsCContiguous(view) or _IsFortranContiguous(view))
    +    return 0
    +
    +    
     
     class CBuffer(buffer.Buffer):
     
    diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
    --- a/pypy/module/cpyext/bytesobject.py
    +++ b/pypy/module/cpyext/bytesobject.py
    @@ -14,45 +14,33 @@
     ## Implementation of PyBytesObject
     ## ================================
     ##
    -## The problem
    -## -----------
    +## PyBytesObject has its own ob_sval buffer, so we have two copies of a string;
    +## one in the PyBytesObject returned from various C-API functions and another
    +## in the corresponding RPython object.
     ##
    -## PyBytes_AsString() must return a (non-movable) pointer to the underlying
    -## ob_sval, whereas pypy strings are movable.  C code may temporarily store
    -## this address and use it, as long as it owns a reference to the PyObject.
    -## There is no "release" function to specify that the pointer is not needed
    -## any more.
    +## The following calls can create a PyBytesObject without a correspoinding
    +## RPython object:
     ##
    -## Also, the pointer may be used to fill the initial value of string. This is
    -## valid only when the string was just allocated, and is not used elsewhere.
    +## PyBytes_FromStringAndSize(NULL, n) / PyString_FromStringAndSize(NULL, n)
     ##
    -## Solution
    -## --------
    +## In the PyBytesObject returned, the ob_sval buffer may be modified as
    +## long as the freshly allocated PyBytesObject is not "forced" via a call
    +## to any of the more sophisticated C-API functions. 
     ##
    -## PyBytesObject contains two additional members: the ob_size and a pointer to a
    -## char ob_sval; it may be NULL.
    -##
    -## - A string allocated by pypy will be converted into a PyBytesObject with a
    -##   NULL buffer.  The first time PyBytes_AsString() is called, memory is
    -##   allocated (with flavor='raw') and content is copied.
    -##
    -## - A string allocated with PyBytes_FromStringAndSize(NULL, size) will
    -##   allocate a PyBytesObject structure, and a buffer with the specified
    -##   size+1, but the reference won't be stored in the global map; there is no
    -##   corresponding object in pypy.  When from_ref() or Py_INCREF() is called,
    -##   the pypy string is created, and added to the global map of tracked
    -##   objects.  The buffer is then supposed to be immutable.
    -##
    -##-  A buffer obtained from PyBytes_AS_STRING() could be mutable iff
    -##   there is no corresponding pypy object for the string
    -##
    -## - _PyBytes_Resize() works only on not-yet-pypy'd strings, and returns a
    -##   similar object.
    -##
    -## - PyBytes_Size() doesn't need to force the object.
    +## Care has been taken in implementing the functions below, so that
    +## if they are called with a non-forced PyBytesObject, they will not 
    +## unintentionally force the creation of a RPython object. As long as only these
    +## are used, the ob_sval buffer is still modifiable:
    +## 
    +## PyBytes_AsString / PyString_AsString 
    +## PyBytes_AS_STRING / PyString_AS_STRING
    +## PyBytes_AsStringAndSize / PyString_AsStringAndSize
    +## PyBytes_Size / PyString_Size
    +## PyBytes_Resize / PyString_Resize
    +## _PyBytes_Resize / _PyString_Resize (raises if called with a forced object)
     ##
     ## - There could be an (expensive!) check in from_ref() that the buffer still
    -##   corresponds to the pypy gc-managed string.
    +##   corresponds to the pypy gc-managed string, 
     ##
     
     PyBytesObjectStruct = lltype.ForwardReference()
    @@ -150,9 +138,6 @@
             raise oefmt(space.w_TypeError,
                 "expected bytes, %T found", from_ref(space, ref))
         ref_str = rffi.cast(PyBytesObject, ref)
    -    if not pyobj_has_w_obj(ref):
    -        # XXX Force the ref?
    -        bytes_realize(space, ref)
         return ref_str.c_ob_sval
     
     @cpython_api([rffi.VOIDP], rffi.CCHARP, error=0)
    @@ -170,9 +155,6 @@
         if not PyBytes_Check(space, ref):
             raise oefmt(space.w_TypeError,
                 "expected bytes, %T found", from_ref(space, ref))
    -    if not pyobj_has_w_obj(ref):
    -        # force the ref
    -        bytes_realize(space, ref)
         ref_str = rffi.cast(PyBytesObject, ref)
         data[0] = ref_str.c_ob_sval
         if length:
    diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h
    --- a/pypy/module/cpyext/include/object.h
    +++ b/pypy/module/cpyext/include/object.h
    @@ -131,7 +131,8 @@
     typedef int(*objobjargproc)(PyObject *, PyObject *, PyObject *);
     
     
    -/* Py3k buffer interface */
    +/* Py3k buffer interface, adapted for PyPy */
    +#define Py_MAX_NDIMS 32
     typedef struct bufferinfo {
         void *buf;
         PyObject *obj;        /* owned reference */
    @@ -145,12 +146,14 @@
         char *format;
         Py_ssize_t *shape;
         Py_ssize_t *strides;
    -    Py_ssize_t *suboffsets;
    -
    +    Py_ssize_t *suboffsets; /* alway NULL for app-level objects*/
    +    unsigned char _format;
    +    Py_ssize_t _strides[Py_MAX_NDIMS];
    +    Py_ssize_t _shape[Py_MAX_NDIMS];
         /* static store for shape and strides of
            mono-dimensional buffers. */
         /* Py_ssize_t smalltable[2]; */
    -    void *internal;
    +    void *internal; /* always NULL for app-level objects */
     } Py_buffer;
     
     
    diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h
    --- a/pypy/module/cpyext/include/patchlevel.h
    +++ b/pypy/module/cpyext/include/patchlevel.h
    @@ -29,8 +29,8 @@
     #define PY_VERSION		"3.3.5"
     
     /* PyPy version as a string */
    -#define PYPY_VERSION "5.4.1-alpha0"
    -#define PYPY_VERSION_NUM  0x05040100
    +#define PYPY_VERSION "5.5.0-alpha0"
    +#define PYPY_VERSION_NUM  0x05050000
     
     /* Defined to mean a PyPy where cpyext holds more regular references
        to PyObjects, e.g. staying alive as long as the internal PyPy object
    diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py
    --- a/pypy/module/cpyext/memoryobject.py
    +++ b/pypy/module/cpyext/memoryobject.py
    @@ -1,7 +1,8 @@
     from pypy.module.cpyext.api import (cpython_api, Py_buffer, CANNOT_FAIL,
    -                                    build_type_checkers)
    -from pypy.module.cpyext.pyobject import PyObject
    -from rpython.rtyper.lltypesystem import lltype
    +                               Py_MAX_NDIMS, build_type_checkers, Py_ssize_tP)
    +from pypy.module.cpyext.pyobject import PyObject, make_ref, incref
    +from rpython.rtyper.lltypesystem import lltype, rffi
    +from pypy.objspace.std.memoryobject import W_MemoryView
     
     from pypy.interpreter.error import oefmt
     from pypy.module.cpyext.pyobject import PyObject, from_ref
    @@ -16,6 +17,7 @@
     @cpython_api([PyObject], PyObject)
     def PyMemoryView_GET_BASE(space, w_obj):
         # return the obj field of the Py_buffer created by PyMemoryView_GET_BUFFER
    +    # XXX needed for numpy on py3k
         raise NotImplementedError('PyMemoryView_GET_BUFFER')
     
     @cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL)
    @@ -24,24 +26,38 @@
         object.  The object must be a memoryview instance; this macro doesn't
         check its type, you must do it yourself or you will risk crashes."""
         view = lltype.malloc(Py_buffer, flavor='raw', zero=True)
    -    # TODO - fill in fields
    -    '''
    -    view.c_buf = buf
    -    view.c_len = length
    -    view.c_obj = obj
    -    Py_IncRef(space, obj)
    -    view.c_itemsize = 1
    -    rffi.setintfield(view, 'c_readonly', readonly)
    -    rffi.setintfield(view, 'c_ndim', 0)
    -    view.c_format = lltype.nullptr(rffi.CCHARP.TO)
    -    view.c_shape = lltype.nullptr(Py_ssize_tP.TO)
    -    view.c_strides = lltype.nullptr(Py_ssize_tP.TO)
    +    if not isinstance(w_obj, W_MemoryView):
    +        return view
    +    ndim = w_obj.buf.getndim()
    +    if ndim >= Py_MAX_NDIMS:
    +        # XXX warn?
    +        return view
    +    try:
    +        view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address())
    +        view.c_obj = make_ref(space, w_obj)
    +        rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly)
    +        isstr = False
    +    except ValueError:
    +        w_s = w_obj.descr_tobytes(space)
    +        view.c_obj = make_ref(space, w_s)
    +        rffi.setintfield(view, 'c_readonly', 1)
    +        isstr = True
    +    view.c_len = w_obj.getlength()
    +    view.c_itemsize = w_obj.buf.getitemsize()
    +    rffi.setintfield(view, 'c_ndim', ndim)
    +    view.c__format = rffi.cast(rffi.UCHAR, w_obj.buf.getformat())
    +    view.c_format = rffi.cast(rffi.CCHARP, view.c__format)
    +    view.c_shape = rffi.cast(Py_ssize_tP, view.c__shape)
    +    view.c_strides = rffi.cast(Py_ssize_tP, view.c__strides)
    +    shape = w_obj.buf.getshape()
    +    strides = w_obj.buf.getstrides()
    +    for i in range(ndim):
    +        view.c_shape[i] = shape[i]
    +        view.c_strides[i] = strides[i]
         view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO)
         view.c_internal = lltype.nullptr(rffi.VOIDP.TO)
    -    ''' 
         return view
     
    -
     @cpython_api([lltype.Ptr(Py_buffer)], PyObject)
     def PyMemoryView_FromBuffer(space, view):
         """Create a memoryview object wrapping the given buffer structure view.
    diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
    --- a/pypy/module/cpyext/slotdefs.py
    +++ b/pypy/module/cpyext/slotdefs.py
    @@ -335,9 +335,15 @@
         def getshape(self):
             return self.shape
     
    +    def getstrides(self):
    +        return self.strides
    +
         def getitemsize(self):
             return self.itemsize
     
    +    def getndim(self):
    +        return self.ndim
    +
     def wrap_getbuffer(space, w_self, w_args, func):
         func_target = rffi.cast(getbufferproc, func)
         with lltype.scoped_alloc(Py_buffer) as pybuf:
    diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c
    --- a/pypy/module/cpyext/test/buffer_test.c
    +++ b/pypy/module/cpyext/test/buffer_test.c
    @@ -107,14 +107,11 @@
     PyMyArray_getbuffer(PyObject *obj, Py_buffer *view, int flags)
     {
       PyMyArray* self = (PyMyArray*)obj;
    -  fprintf(stdout, "in PyMyArray_getbuffer\n");
       if (view == NULL) {
    -    fprintf(stdout, "view is NULL\n");
         PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer");
         return -1;
       }
       if (flags == 0) {
    -    fprintf(stdout, "flags is 0\n");
         PyErr_SetString(PyExc_ValueError, "flags == 0 in getbuffer");
         return -1;
       }
    @@ -188,7 +185,131 @@
         (initproc)PyMyArray_init,     /* tp_init */
     };
     
    +static PyObject*
    +test_buffer(PyObject* self, PyObject* args)
    +{
    +    Py_buffer* view = NULL;
    +    PyObject* obj = PyTuple_GetItem(args, 0);
    +    PyObject* memoryview = PyMemoryView_FromObject(obj);
    +    if (memoryview == NULL)
    +        return PyInt_FromLong(-1);
    +    view = PyMemoryView_GET_BUFFER(memoryview);
    +    Py_DECREF(memoryview);
    +    return PyInt_FromLong(view->len);
    +}
    +
    +/* Copied from numpy tests */
    +/*
    + * Create python string from a FLAG and or the corresponding PyBuf flag
    + * for the use in get_buffer_info.
    + */
    +#define GET_PYBUF_FLAG(FLAG)                                        \
    +    buf_flag = PyUnicode_FromString(#FLAG);                         \
    +    flag_matches = PyObject_RichCompareBool(buf_flag, tmp, Py_EQ);  \
    +    Py_DECREF(buf_flag);                                            \
    +    if (flag_matches == 1) {                                        \
    +        Py_DECREF(tmp);                                             \
    +        flags |= PyBUF_##FLAG;                                      \
    +        continue;                                                   \
    +    }                                                               \
    +    else if (flag_matches == -1) {                                  \
    +        Py_DECREF(tmp);                                             \
    +        return NULL;                                                \
    +    }
    +
    +
    +/*
    + * Get information for a buffer through PyBuf_GetBuffer with the
    + * corresponding flags or'ed. Note that the python caller has to
    + * make sure that or'ing those flags actually makes sense.
    + * More information should probably be returned for future tests.
    + */
    +static PyObject *
    +get_buffer_info(PyObject *self, PyObject *args)
    +{
    +    PyObject *buffer_obj, *pyflags;
    +    PyObject *tmp, *buf_flag;
    +    Py_buffer buffer;
    +    PyObject *shape, *strides;
    +    Py_ssize_t i, n;
    +    int flag_matches;
    +    int flags = 0;
    +
    +    if (!PyArg_ParseTuple(args, "OO", &buffer_obj, &pyflags)) {
    +        return NULL;
    +    }
    +
    +    n = PySequence_Length(pyflags);
    +    if (n < 0) {
    +        return NULL;
    +    }
    +
    +    for (i=0; i < n; i++) {
    +        tmp = PySequence_GetItem(pyflags, i);
    +        if (tmp == NULL) {
    +            return NULL;
    +        }
    +
    +        GET_PYBUF_FLAG(SIMPLE);
    +        GET_PYBUF_FLAG(WRITABLE);
    +        GET_PYBUF_FLAG(STRIDES);
    +        GET_PYBUF_FLAG(ND);
    +        GET_PYBUF_FLAG(C_CONTIGUOUS);
    +        GET_PYBUF_FLAG(F_CONTIGUOUS);
    +        GET_PYBUF_FLAG(ANY_CONTIGUOUS);
    +        GET_PYBUF_FLAG(INDIRECT);
    +        GET_PYBUF_FLAG(FORMAT);
    +        GET_PYBUF_FLAG(STRIDED);
    +        GET_PYBUF_FLAG(STRIDED_RO);
    +        GET_PYBUF_FLAG(RECORDS);
    +        GET_PYBUF_FLAG(RECORDS_RO);
    +        GET_PYBUF_FLAG(FULL);
    +        GET_PYBUF_FLAG(FULL_RO);
    +        GET_PYBUF_FLAG(CONTIG);
    +        GET_PYBUF_FLAG(CONTIG_RO);
    +
    +        Py_DECREF(tmp);
    +
    +        /* One of the flags must match */
    +        PyErr_SetString(PyExc_ValueError, "invalid flag used.");
    +        return NULL;
    +    }
    +
    +    if (PyObject_GetBuffer(buffer_obj, &buffer, flags) < 0) {
    +        return NULL;
    +    }
    +
    +    if (buffer.shape == NULL) {
    +        Py_INCREF(Py_None);
    +        shape = Py_None;
    +    }
    +    else {
    +        shape = PyTuple_New(buffer.ndim);
    +        for (i=0; i < buffer.ndim; i++) {
    +            PyTuple_SET_ITEM(shape, i, PyLong_FromSsize_t(buffer.shape[i]));
    +        }
    +    }
    +
    +    if (buffer.strides == NULL) {
    +        Py_INCREF(Py_None);
    +        strides = Py_None;
    +    }
    +    else {
    +        strides = PyTuple_New(buffer.ndim);
    +        for (i=0; i < buffer.ndim; i++) {
    +            PyTuple_SET_ITEM(strides, i, PyLong_FromSsize_t(buffer.strides[i]));
    +        }
    +    }
    +
    +    PyBuffer_Release(&buffer);
    +    return Py_BuildValue("(NN)", shape, strides);
    +}
    +
    +
    +
     static PyMethodDef buffer_functions[] = {
    +    {"test_buffer",   (PyCFunction)test_buffer, METH_VARARGS, NULL},
    +    {"get_buffer_info",   (PyCFunction)get_buffer_info, METH_VARARGS, NULL},
         {NULL,        NULL}    /* Sentinel */
     };
     
    diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
    --- a/pypy/module/cpyext/test/test_bytesobject.py
    +++ b/pypy/module/cpyext/test/test_bytesobject.py
    @@ -179,8 +179,27 @@
                      Py_INCREF(Py_None);
                      return Py_None;
                  """),
    +            ("c_only", "METH_NOARGS",
    +            """
    +                int ret;
    +                char * buf2;
    +                PyObject * obj = PyBytes_FromStringAndSize(NULL, 1024);
    +                if (!obj)
    +                    return NULL;
    +                buf2 = PyBytes_AsString(obj);
    +                if (!buf2)
    +                    return NULL;
    +                /* buf should not have been forced, issue #2395 */
    +                ret = _PyBytes_Resize(&obj, 512);
    +                if (ret < 0)
    +                    return NULL;
    +                 Py_DECREF(obj);
    +                 Py_INCREF(Py_None);
    +                 return Py_None;
    +            """),
                 ])
             module.getbytes()
    +        module.c_only()
     
     
     class TestBytes(BaseApiTest):
    diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py
    --- a/pypy/module/cpyext/test/test_memoryobject.py
    +++ b/pypy/module/cpyext/test/test_memoryobject.py
    @@ -1,6 +1,6 @@
     from pypy.module.cpyext.test.test_api import BaseApiTest
     from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
    -
    +from rpython.rlib.buffer import StringBuffer
     class TestMemoryViewObject(BaseApiTest):
         def test_fromobject(self, space, api):
             w_hello = space.newbytes("hello")
    @@ -11,6 +11,13 @@
             w_bytes = space.call_method(w_view, "tobytes")
             assert space.unwrap(w_bytes) == "hello"
     
    +    def test_frombuffer(self, space, api):
    +        w_buf = space.newbuffer(StringBuffer("hello"))
    +        w_memoryview = api.PyMemoryView_FromObject(w_buf)
    +        w_view = api.PyMemoryView_GET_BUFFER(w_memoryview)
    +        ndim = w_view.c_ndim
    +        assert ndim == 1
    +
     class AppTestPyBuffer_FillInfo(AppTestCpythonExtensionBase):
         def test_fillWithObject(self):
             module = self.import_extension('foo', [
    @@ -62,6 +69,25 @@
             y = memoryview(arr)
             assert y.format == 'i'
             assert y.shape == (10,)
    +        assert len(y) == 10
             s = y[3]
             assert len(s) == struct.calcsize('i')
             assert s == struct.pack('i', 3)
    +        viewlen = module.test_buffer(arr)
    +        assert viewlen == y.itemsize * len(y)
    +
    +    def test_buffer_info(self):
    +        from _numpypy import multiarray as np
    +        module = self.import_module(name='buffer_test')
    +        get_buffer_info = module.get_buffer_info
    +        # test_export_flags from numpy test_multiarray
    +        raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
    +        # test_relaxed_strides from numpy test_multiarray
    +        arr = np.zeros((1, 10))
    +        if arr.flags.f_contiguous:
    +            shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
    +            assert strides[0] == 8
    +            arr = np.ones((10, 1), order='F')
    +            shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
    +            assert strides[-1] == 8
    +
    diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py
    --- a/pypy/module/cpyext/test/test_version.py
    +++ b/pypy/module/cpyext/test/test_version.py
    @@ -41,9 +41,11 @@
             assert module.py_minor_version == sys.version_info.minor
             assert module.py_micro_version == sys.version_info.micro
     
    -    @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
    +    #@pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test')
         def test_pypy_versions(self):
             import sys
    +        if '__pypy__' not in sys.builtin_module_names:
    +            py.test.skip("pypy only test")
             init = """
             if (Py_IsInitialized()) {
                 PyObject *m = Py_InitModule("foo", NULL);
    diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
    --- a/pypy/module/cpyext/typeobject.py
    +++ b/pypy/module/cpyext/typeobject.py
    @@ -293,6 +293,8 @@
                         STRUCT_TYPE = PyNumberMethods
                     elif slot_names[0] == 'c_tp_as_sequence':
                         STRUCT_TYPE = PySequenceMethods
    +                elif slot_names[0] == 'c_tp_as_buffer':
    +                    STRUCT_TYPE = PyBufferProcs
                     else:
                         raise AssertionError(
                             "Structure not allocated: %s" % (slot_names[0],))
    diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py
    --- a/pypy/module/micronumpy/compile.py
    +++ b/pypy/module/micronumpy/compile.py
    @@ -460,6 +460,9 @@
         def getdictvalue(self, space, key):
             return self.items[key]
     
    +    def descr_memoryview(self, space, buf):
    +        raise oefmt(space.w_TypeError, "error")
    +
     class IterDictObject(W_Root):
         def __init__(self, space, w_dict):
             self.space = space
    diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py
    --- a/pypy/module/micronumpy/concrete.py
    +++ b/pypy/module/micronumpy/concrete.py
    @@ -377,7 +377,25 @@
         def __exit__(self, typ, value, traceback):
             keepalive_until_here(self)
     
    -    def get_buffer(self, space, readonly):
    +    def get_buffer(self, space, flags):
    +        errtype = space.w_ValueError # should be BufferError, numpy does this instead
    +        if ((flags & space.BUF_C_CONTIGUOUS) == space.BUF_C_CONTIGUOUS and 
    +                not self.flags & NPY.ARRAY_C_CONTIGUOUS):
    +           raise oefmt(errtype, "ndarray is not C-contiguous")
    +        if ((flags & space.BUF_F_CONTIGUOUS) == space.BUF_F_CONTIGUOUS and 
    +                not self.flags & NPY.ARRAY_F_CONTIGUOUS):
    +           raise oefmt(errtype, "ndarray is not Fortran contiguous")
    +        if ((flags & space.BUF_ANY_CONTIGUOUS) == space.BUF_ANY_CONTIGUOUS and
    +                not (self.flags & NPY.ARRAY_F_CONTIGUOUS and 
    +                     self.flags & NPY.ARRAY_C_CONTIGUOUS)):
    +           raise oefmt(errtype, "ndarray is not contiguous")
    +        if ((flags & space.BUF_STRIDES) != space.BUF_STRIDES and
    +                not self.flags & NPY.ARRAY_C_CONTIGUOUS):
    +           raise oefmt(errtype, "ndarray is not C-contiguous")
    +        if ((flags & space.BUF_WRITABLE) == space.BUF_WRITABLE and
    +            not self.flags & NPY.ARRAY_WRITEABLE):
    +           raise oefmt(errtype, "buffer source array is read-only")
    +        readonly = not (flags & space.BUF_WRITABLE) == space.BUF_WRITABLE
             return ArrayBuffer(self, readonly)
     
         def astype(self, space, dtype, order, copy=True):
    @@ -695,6 +713,7 @@
                      index + self.impl.start)
     
         def setitem(self, index, v):
    +        # XXX what if self.readonly?
             raw_storage_setitem(self.impl.storage, index + self.impl.start,
                                 rffi.cast(lltype.Char, v))
     
    diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py
    --- a/pypy/module/micronumpy/ctors.py
    +++ b/pypy/module/micronumpy/ctors.py
    @@ -1,4 +1,5 @@
     from pypy.interpreter.error import OperationError, oefmt
    +from pypy.interpreter.baseobjspace import BufferInterfaceNotFound
     from pypy.interpreter.gateway import unwrap_spec, WrappedDefault
     from rpython.rlib.buffer import SubBuffer
     from rpython.rlib.rstring import strip_spaces
    @@ -42,7 +43,7 @@
             raise oefmt(space.w_ValueError,
                         "object __array__ method not producing an array")
     
    -def try_interface_method(space, w_object):
    +def try_interface_method(space, w_object, copy):
         try:
             w_interface = space.getattr(w_object, space.wrap("__array_interface__"))
             if w_interface is None:
    @@ -81,17 +82,20 @@
                 raise oefmt(space.w_ValueError,
                         "__array_interface__ could not decode dtype %R", w_dtype
                         )
    -        if w_data is not None and (space.isinstance_w(w_data, space.w_tuple) or space.isinstance_w(w_data, space.w_list)):
    +        if w_data is not None and (space.isinstance_w(w_data, space.w_tuple) or
    +                                   space.isinstance_w(w_data, space.w_list)):
                 data_w = space.listview(w_data)
    -            data = rffi.cast(RAW_STORAGE_PTR, space.int_w(data_w[0]))
    -            read_only = True # XXX why not space.is_true(data_w[1])
    +            w_data = rffi.cast(RAW_STORAGE_PTR, space.int_w(data_w[0]))
    +            read_only = space.is_true(data_w[1]) or copy
                 offset = 0
    -            return W_NDimArray.from_shape_and_storage(space, shape, data, 
    -                                    dtype, strides=strides, start=offset), read_only
    +            w_base = w_object
    +            if read_only:
    +                w_base = None
    +            return W_NDimArray.from_shape_and_storage(space, shape, w_data, 
    +                                dtype, w_base=w_base, strides=strides,
    +                                start=offset), read_only
             if w_data is None:
    -            data = w_object
    -        else:
    -            data = w_data
    +            w_data = w_object
             w_offset = space.finditem(w_interface, space.wrap('offset'))
             if w_offset is None:
                 offset = 0
    @@ -101,7 +105,7 @@
             if strides is not None:
                 raise oefmt(space.w_NotImplementedError,
                        "__array_interface__ strides not fully supported yet") 
    -        arr = frombuffer(space, data, dtype, support.product(shape), offset)
    +        arr = frombuffer(space, w_data, dtype, support.product(shape), offset)
             new_impl = arr.implementation.reshape(arr, shape)
             return W_NDimArray(new_impl), False
             
    @@ -110,6 +114,78 @@
                 return None, False
             raise
     
    +def _descriptor_from_pep3118_format(space, c_format):
    +    descr = descriptor.decode_w_dtype(space, space.wrap(c_format))
    +    if descr:
    +        return descr
    +    msg = "invalid PEP 3118 format string: '%s'" % c_format
    +    space.warn(space.wrap(msg), space.w_RuntimeWarning)
    +    return None 
    +
    +def _array_from_buffer_3118(space, w_object, dtype):
    +    try:
    +        w_buf = space.call_method(space.builtin, "memoryview", w_object)
    +    except OperationError as e:
    +        if e.match(space, space.w_TypeError):
    +            # object does not have buffer interface
    +            return w_object
    +        raise
    +    format = space.getattr(w_buf,space.newbytes('format'))
    +    if format:
    +        descr = _descriptor_from_pep3118_format(space, space.str_w(format))
    +        if not descr:
    +            return w_object
    +        if dtype and descr:
    +            raise oefmt(space.w_NotImplementedError,
    +                "creating an array from a memoryview while specifying dtype "
    +                "not supported")
    +        if descr.elsize != space.int_w(space.getattr(w_buf, space.newbytes('itemsize'))): 
    +            msg = ("Item size computed from the PEP 3118 buffer format "
    +                  "string does not match the actual item size.")
    +            space.warn(space.wrap(msg), space.w_RuntimeWarning)
    +            return w_object
    +        dtype = descr 
    +    elif not dtype:
    +        dtype = descriptor.get_dtype_cache(space).w_stringdtype
    +        dtype.elsize = space.int_w(space.getattr(w_buf, space.newbytes('itemsize')))
    +    nd = space.int_w(space.getattr(w_buf, space.newbytes('ndim')))
    +    shape = [space.int_w(d) for d in space.listview(
    +                            space.getattr(w_buf, space.newbytes('shape')))]
    +    strides = []
    +    buflen = space.len_w(w_buf) * dtype.elsize
    +    if shape:
    +        strides = [space.int_w(d) for d in space.listview(
    +                            space.getattr(w_buf, space.newbytes('strides')))]
    +        if not strides:
    +            d = buflen
    +            strides = [0] * nd
    +            for k in range(nd):
    +                if shape[k] > 0:
    +                    d /= shape[k]
    +                    strides[k] = d
    +    else:
    +        if nd == 1:
    +            shape = [buflen / dtype.elsize, ]
    +            strides = [dtype.elsize, ]
    +        elif nd > 1:
    +            msg = ("ndim computed from the PEP 3118 buffer format "
    +                   "is greater than 1, but shape is NULL.")
    +            space.warn(space.wrap(msg), space.w_RuntimeWarning)
    +            return w_object
    +    try:
    +        w_data = rffi.cast(RAW_STORAGE_PTR, space.int_w(space.call_method(w_buf, '_pypy_raw_address')))
    +    except OperationError as e:
    +        if e.match(space, space.w_ValueError):
    +            return w_object
    +        else:
    +            raise e
    +    writable = not space.bool_w(space.getattr(w_buf, space.newbytes('readonly')))
    +    w_ret = W_NDimArray.from_shape_and_storage(space, shape, w_data,
    +               storage_bytes=buflen, dtype=dtype, w_base=w_object, 
    +               writable=writable, strides=strides)
    +    if w_ret:
    +        return w_ret
    +    return w_object
     
     @unwrap_spec(ndmin=int, copy=bool, subok=bool)
     def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False,
    @@ -127,6 +203,7 @@
     
     def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False):
     
    +    from pypy.module.micronumpy.boxes import W_GenericBox
         # numpy testing calls array(type(array([]))) and expects a ValueError
         if space.isinstance_w(w_object, space.w_type):
             raise oefmt(space.w_ValueError, "cannot create ndarray from type instance")
    @@ -134,13 +211,19 @@
         dtype = descriptor.decode_w_dtype(space, w_dtype)
         if not isinstance(w_object, W_NDimArray):
             w_array = try_array_method(space, w_object, w_dtype)
    -        if w_array is not None:
    +        if w_array is None:
    +            if (    not space.isinstance_w(w_object, space.w_str) and 
    +                    not space.isinstance_w(w_object, space.w_unicode) and
    +                    not isinstance(w_object, W_GenericBox)):
    +                # use buffer interface
    +                w_object = _array_from_buffer_3118(space, w_object, dtype)
    +        else:
                 # continue with w_array, but do further operations in place
                 w_object = w_array
                 copy = False
                 dtype = w_object.get_dtype()
         if not isinstance(w_object, W_NDimArray):
    -        w_array, _copy = try_interface_method(space, w_object)
    +        w_array, _copy = try_interface_method(space, w_object, copy)
             if w_array is not None:
                 w_object = w_array
                 copy = _copy
    diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py
    --- a/pypy/module/micronumpy/ndarray.py
    +++ b/pypy/module/micronumpy/ndarray.py
    @@ -806,10 +806,10 @@
     
         def buffer_w(self, space, flags):
             # XXX format isn't always 'B' probably
    -        return self.implementation.get_buffer(space, True)
    +        return self.implementation.get_buffer(space, flags)
     
         def descr_get_data(self, space):
    -        return space.newbuffer(self.implementation.get_buffer(space, False))
    +        return space.newbuffer(self.implementation.get_buffer(space, space.BUF_FULL))
     
         @unwrap_spec(offset=int, axis1=int, axis2=int)
         def descr_diagonal(self, space, offset=0, axis1=0, axis2=1):
    diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py
    --- a/pypy/module/micronumpy/test/test_ndarray.py
    +++ b/pypy/module/micronumpy/test/test_ndarray.py
    @@ -3206,7 +3206,9 @@
             raises(TypeError, array, Dummy({'version': 3, 'typestr': 'f8', 'shape': ('a', 3)}))
     
             a = array([1, 2, 3])
    -        b = array(Dummy(a.__array_interface__))
    +        d = Dummy(a.__array_interface__)
    +        b = array(d)
    +        assert b.base is None
             b[1] = 200
             assert a[1] == 2 # upstream compatibility, is this a bug?
             interface_a = a.__array_interface__
    @@ -3217,6 +3219,8 @@
             interface_b.pop('data')
             interface_a.pop('data')
             assert interface_a == interface_b
    +        b = array(d, copy=False)
    +        assert b.base is d
     
             b = array(Dummy({'version':3, 'shape': (50,), 'typestr': 'u1',
                              'data': 'a'*100}))
    @@ -3585,6 +3589,7 @@
             cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2))
             cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4))
             cls.w_ulongval = cls.space.wrap(struct.pack('L', 12))
    +        cls.w_one = cls.space.wrap(struct.pack('i', 1))
     
         def test_frombuffer(self):
             import numpy as np
    @@ -3636,8 +3641,6 @@
             else:
                 EMPTY = None
             x = np.array([1, 2, 3, 4, 5], dtype='i')
    -        y = memoryview('abc')
    -        assert y.format == 'B'
             y = memoryview(x)
             assert y.format == 'i'
             assert y.shape == (5,)
    @@ -3645,6 +3648,16 @@
             assert y.strides == (4,)
             assert y.suboffsets == EMPTY
             assert y.itemsize == 4
    +        assert isinstance(y, memoryview)
    +        assert y[0] == self.one
    +        assert (np.array(y) == x).all()
    +
    +        x = np.array([0, 0, 0, 0], dtype='O')
    +        y = memoryview(x)
    +        # handles conversion of address to pinned object?
    +        z = np.array(y)
    +        assert z.dtype == 'O'
    +        assert (z == x).all()
     
         def test_fromstring(self):
             import sys
    diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py
    --- a/pypy/module/micronumpy/test/test_subtype.py
    +++ b/pypy/module/micronumpy/test/test_subtype.py
    @@ -702,3 +702,32 @@
             ret = obj.sum()
             print type(ret)
             assert ret.info == 'spam'
    +
    +    def test_ndarray_subclass_assigns_base(self):
    +        import numpy as np
    +        init_called = []
    +        class _DummyArray(object):
    +            """ Dummy object that just exists to hang __array_interface__ dictionaries
    +            and possibly keep alive a reference to a base array.
    +            """
    +            def __init__(self, interface, base=None):
    +                self.__array_interface__ = interface
    +                init_called.append(1)
    +                self.base = base
    +
    +        x = np.zeros(10)
    +        d = _DummyArray(x.__array_interface__, base=x)
    +        y = np.array(d, copy=False)
    +        assert sum(init_called) == 1
    +        assert y.base is d
    +
    +        x = np.zeros((0,), dtype='float32')
    +        intf = x.__array_interface__.copy()
    +        intf["strides"] = x.strides
    +        x.__array_interface__["strides"] = x.strides
    +        d = _DummyArray(x.__array_interface__, base=x)
    +        y = np.array(d, copy=False)
    +        assert sum(init_called) == 2
    +        assert y.base is d
    +
    +
    diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py
    --- a/pypy/module/micronumpy/types.py
    +++ b/pypy/module/micronumpy/types.py
    @@ -1851,7 +1851,7 @@
                         arr.gcstruct)
     
         def read(self, arr, i, offset, dtype):
    -        if arr.gcstruct is V_OBJECTSTORE:
    +        if arr.gcstruct is V_OBJECTSTORE and not arr.base():
                 raise oefmt(self.space.w_NotImplementedError,
                     "cannot read object from array with no gc hook")
             return self.box(self._read(arr.storage, i, offset))
    diff --git a/pypy/module/pypyjit/test_pypy_c/test_import.py b/pypy/module/pypyjit/test_pypy_c/test_import.py
    --- a/pypy/module/pypyjit/test_pypy_c/test_import.py
    +++ b/pypy/module/pypyjit/test_pypy_c/test_import.py
    @@ -38,3 +38,27 @@
             # call_may_force(absolute_import_with_lock).
             for opname in log.opnames(loop.allops(opcode="IMPORT_NAME")):
                 assert 'call' not in opname    # no call-like opcode
    +
    +    def test_import_fast_path(self, tmpdir):
    +        print tmpdir
    +        pkg = tmpdir.join('mypkg').ensure(dir=True)
    +        subdir = pkg.join("sub").ensure(dir=True)
    +        pkg.join('__init__.py').write("")
    +        subdir.join('__init__.py').write("")
    +        subdir.join('mod.py').write(str(py.code.Source("""
    +            def do_the_import():
    +                import sys
    +        """)))
    +        def main(path, n):
    +            def do_the_import():
    +                from mypkg.sub import mod
    +            import sys
    +            sys.path.append(path)
    +            for i in range(n):
    +                do_the_import()
    +        #
    +        log = self.run(main, [str(tmpdir), 300])
    +        loop, = log.loops_by_filename(self.filepath)
    +        # check that no string compares and other calls are there
    +        for opname in log.opnames(loop.allops(opcode="IMPORT_NAME")):
    +            assert 'call' not in opname    # no call-like opcode
    diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py
    --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py
    +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py
    @@ -187,6 +187,43 @@
     
             """)
     
    +    def test_oldstyle_methcall(self):
    +        def main():
    +            def g(): pass
    +            class A:
    +                def f(self):
    +                    return self.x + 1
    +            class I(A):
    +                pass
    +            class J(I):
    +                pass
    +
    +
    +            class B(J):
    +                def __init__(self, x):
    +                    self.x = x
    +
    +            i = 0
    +            b = B(1)
    +            while i < 1000:
    +                g()
    +                v = b.f() # ID: meth
    +                i += v
    +            return i
    +
    +        log = self.run(main, [], threshold=80)
    +        loop, = log.loops_by_filename(self.filepath, is_entry_bridge=True)
    +        assert loop.match_by_id('meth',
    +        '''
    +    guard_nonnull_class(p18, ..., descr=...)
    +    p52 = getfield_gc_r(p18, descr=...) # read map
    +    guard_value(p52, ConstPtr(ptr53), descr=...)
    +    p54 = getfield_gc_r(p18, descr=...) # read class
    +    guard_value(p54, ConstPtr(ptr55), descr=...)
    +    p56 = force_token() # done
    +        ''')
    +
    +
         def test_oldstyle_newstyle_mix(self):
             def main():
                 class A:
    diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py
    --- a/pypy/module/sys/version.py
    +++ b/pypy/module/sys/version.py
    @@ -10,7 +10,7 @@
     #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py
     CPYTHON_API_VERSION        = 1013   #XXX # sync with include/modsupport.h
     
    -PYPY_VERSION               = (5, 4, 1, "alpha", 0)    #XXX # sync patchlevel.h
    +PYPY_VERSION               = (5, 5, 0, "alpha", 0)    #XXX # sync patchlevel.h
     
     
     import pypy
    diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
    --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
    +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py
    @@ -1415,6 +1415,7 @@
             assert p.b == 12
             assert p.c == 14
             assert p.d == 14
    +        py.test.raises(ValueError, ffi.new, "struct foo_s *", [0, 0, 0, 0])
     
         def test_nested_field_offset_align(self):
             ffi = FFI(backend=self.Backend())
    @@ -1454,14 +1455,42 @@
             assert p.b == 0
             assert p.c == 14
             assert p.d == 14
    -        p = ffi.new("union foo_u *", {'b': 12})
    -        assert p.a == 0
    +        p = ffi.new("union foo_u *", {'a': -63, 'b': 12})
    +        assert p.a == -63
             assert p.b == 12
    -        assert p.c == 0
    -        assert p.d == 0
    -        # we cannot specify several items in the dict, even though
    -        # in theory in this particular case it would make sense
    -        # to give both 'a' and 'b'
    +        assert p.c == -63
    +        assert p.d == -63
    +        p = ffi.new("union foo_u *", [123, 456])
    +        assert p.a == 123
    +        assert p.b == 456
    +        assert p.c == 123
    +        assert p.d == 123
    +        py.test.raises(ValueError, ffi.new, "union foo_u *", [0, 0, 0])
    +
    +    def test_nested_anonymous_struct_2(self):
    +        ffi = FFI(backend=self.Backend())
    +        ffi.cdef("""
    +            struct foo_s {
    +                int a;
    +                union { int b; union { int c, d; }; };
    +                int e;
    +            };
    +        """)
    +        assert ffi.sizeof("struct foo_s") == 3 * SIZE_OF_INT
    +        p = ffi.new("struct foo_s *", [11, 22, 33])
    +        assert p.a == 11
    +        assert p.b == p.c == p.d == 22
    +        assert p.e == 33
    +        py.test.raises(ValueError, ffi.new, "struct foo_s *", [11, 22, 33, 44])
    +        FOO = ffi.typeof("struct foo_s")
    +        fields = [(name, fld.offset, fld.flags) for (name, fld) in FOO.fields]
    +        assert fields == [
    +            ('a', 0 * SIZE_OF_INT, 0),
    +            ('b', 1 * SIZE_OF_INT, 0),
    +            ('c', 1 * SIZE_OF_INT, 1),
    +            ('d', 1 * SIZE_OF_INT, 1),
    +            ('e', 2 * SIZE_OF_INT, 0),
    +        ]
     
         def test_cast_to_array_type(self):
             ffi = FFI(backend=self.Backend())
    @@ -1479,6 +1508,7 @@
                 assert p1[0] == 123
                 seen.append(1)
             q = ffi.gc(p, destructor)
    +        assert ffi.typeof(q) is ffi.typeof(p)
             import gc; gc.collect()
             assert seen == []
             del q
    diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py
    --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py
    +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ctypes.py
    @@ -35,6 +35,9 @@
         def test_nested_anonymous_union(self):
             py.test.skip("ctypes backend: not supported: nested anonymous union")
     
    +    def test_nested_anonymous_struct_2(self):
    +        py.test.skip("ctypes backend: not supported: nested anonymous union")
    +
         def test_CData_CType_2(self):
             if sys.version_info >= (3,):
                 py.test.skip("ctypes backend: not supported in Python 3: CType")
    diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py
    --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py
    +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py
    @@ -149,3 +149,28 @@
             p = snip_setuptools_verify2.C.getpwuid(0)
             assert snip_setuptools_verify2.ffi.string(p.pw_name) == b"root"
             ''')
    +
    +    def test_set_py_limited_api(self):
    +        from cffi.setuptools_ext import _set_py_limited_api
    +        try:
    +            import setuptools
    +        except ImportError as e:
    +            py.test.skip(str(e))
    +        orig_version = setuptools.__version__
    +        try:
    +            setuptools.__version__ = '26.0.0'
    +            from setuptools import Extension
    +
    +            kwds = _set_py_limited_api(Extension, {})
    +            assert kwds['py_limited_api'] == True
    +
    +            setuptools.__version__ = '25.0'
    +            kwds = _set_py_limited_api(Extension, {})
    +            assert not kwds
    +
    +            setuptools.__version__ = 'development'
    +            kwds = _set_py_limited_api(Extension, {})
    +            assert kwds['py_limited_api'] == True
    +
    +        finally:
    +            setuptools.__version__ = orig_version
    diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
    --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
    +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
    @@ -1975,9 +1975,9 @@
     
     def test_function_returns_partial_struct():
         ffi = FFI()
    -    ffi.cdef("struct a { int a; ...; }; struct a f1(int);")
    +    ffi.cdef("struct aaa { int a; ...; }; struct aaa f1(int);")
         lib = verify(ffi, "test_function_returns_partial_struct", """
    -        struct a { int b, a, c; };
    -        static struct a f1(int x) { struct a s = {0}; s.a = x; return s; }
    +        struct aaa { int b, a, c; };
    +        static struct aaa f1(int x) { struct aaa s = {0}; s.a = x; return s; }
         """)
         assert lib.f1(52).a == 52
    diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py b/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py
    --- a/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py
    +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_numbers.py
    @@ -194,6 +194,29 @@
                 _fields_ = [('t', enum)]
             assert isinstance(S().t, enum)
     
    +    def test_no_missing_shape_to_ffi_type(self):
    +        # whitebox test
    +        import sys
    +        if '__pypy__' not in sys.builtin_module_names:
    +            skip("only for pypy's ctypes")
    +        skip("re-enable after adding 'g' to _shape_to_ffi_type.typemap, "
    +             "which I think needs fighting all the way up from "
    +             "rpython.rlib.libffi")
    +        from _ctypes.basics import _shape_to_ffi_type
    +        from _rawffi import Array
    +        for i in range(1, 256):
    +            try:
    +                Array(chr(i))
    +            except ValueError:
    +                pass
    +            else:
    +                assert chr(i) in _shape_to_ffi_type.typemap
    +
    +    @py.test.mark.xfail
    +    def test_pointer_to_long_double(self):
    +        import ctypes
    +        ctypes.POINTER(ctypes.c_longdouble)
    +
     ##    def test_perf(self):
     ##        check_perf()
     
    diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py
    --- a/pypy/objspace/std/celldict.py
    +++ b/pypy/objspace/std/celldict.py
    @@ -64,6 +64,8 @@
     
         def setitem_str(self, w_dict, key, w_value):
             cell = self.getdictvalue_no_unwrapping(w_dict, key)
    +        #if (key == '__package__' or key == "__path__") and cell is not None and w_value is not cell:
    +        #    print "WARNING", key, w_value, cell, self
             return self._setitem_str_cell_known(cell, w_dict, key, w_value)
     
         def _setitem_str_cell_known(self, cell, w_dict, key, w_value):
    diff --git a/pypy/objspace/std/test/test_random_attr.py b/pypy/objspace/std/test/test_random_attr.py
    --- a/pypy/objspace/std/test/test_random_attr.py
    +++ b/pypy/objspace/std/test/test_random_attr.py
    @@ -1,4 +1,5 @@
     import pytest
    +pytest.skip("This cannot possibly work on pypy3")
     import sys
     try:
         import __pypy__
    diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh
    --- a/pypy/tool/release/repackage.sh
    +++ b/pypy/tool/release/repackage.sh
    @@ -1,7 +1,7 @@
     # Edit these appropriately before running this script
     maj=5
     min=4
    -rev=0
    +rev=1
     branchname=release-$maj.x  # ==OR== release-$maj.$min.x
     tagname=release-pypy2.7-v$maj.$min.$rev  # ==OR== release-$maj.$min
     
    diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py
    --- a/rpython/annotator/annrpython.py
    +++ b/rpython/annotator/annrpython.py
    @@ -164,8 +164,15 @@
                 # annotations that are passed in, and don't annotate the old
                 # graph -- it's already low-level operations!
                 for a, s_newarg in zip(block.inputargs, cells):
    -                s_oldarg = self.binding(a)
    -                assert annmodel.unionof(s_oldarg, s_newarg) == s_oldarg
    +                s_oldarg = a.annotation
    +                # XXX: Should use s_oldarg.contains(s_newarg) but that breaks
    +                # PyPy translation
    +                if annmodel.unionof(s_oldarg, s_newarg) != s_oldarg:
    +                    raise annmodel.AnnotatorError(
    +                        "Late-stage annotation is not allowed to modify the "
    +                        "existing annotation for variable %s: %s" %
    +                            (a, s_oldarg))
    +
             else:
                 assert not self.frozen
                 if block not in self.annotated:
    diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py
    --- a/rpython/annotator/binaryop.py
    +++ b/rpython/annotator/binaryop.py
    @@ -17,7 +17,7 @@
     from rpython.flowspace.model import Variable, Constant, const
     from rpython.flowspace.operation import op
     from rpython.rlib import rarithmetic
    -from rpython.annotator.model import AnnotatorError
    +from rpython.annotator.model import AnnotatorError, TLS
     
     BINARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values()
                             if oper.dispatch == 2])
    @@ -436,6 +436,11 @@
     class __extend__(pairtype(SomeFloat, SomeFloat)):
     
         def union((flt1, flt2)):
    +        if not TLS.allow_int_to_float:
    +            # in this mode, if one of the two is actually the
    +            # subclass SomeInteger, complain
    +            if isinstance(flt1, SomeInteger) or isinstance(flt2, SomeInteger):
    +                raise UnionError(flt1, flt2)
             return SomeFloat()
     
         add = sub = mul = union
    diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py
    --- a/rpython/annotator/model.py
    +++ b/rpython/annotator/model.py
    @@ -44,6 +44,7 @@
         # A global attribute :-(  Patch it with 'True' to enable checking of
         # the no_nul attribute...
         check_str_without_nul = False
    +    allow_int_to_float = True
     TLS = State()
     
     class SomeObject(object):
    @@ -749,6 +750,7 @@
                     s1 = pair(s1, s2).union()
         else:
             # this is just a performance shortcut
    +        # XXX: This is a lie! Grep for no_side_effects_in_union and weep.
             if s1 != s2:
                 s1 = pair(s1, s2).union()
         return s1
    diff --git a/rpython/rlib/libffi.py b/rpython/rlib/libffi.py
    --- a/rpython/rlib/libffi.py
    +++ b/rpython/rlib/libffi.py
    @@ -47,6 +47,8 @@
             cls.ulonglong = clibffi.cast_type_to_ffitype(rffi.ULONGLONG)
             cls.signed = clibffi.cast_type_to_ffitype(rffi.SIGNED)
             cls.wchar_t = clibffi.cast_type_to_ffitype(lltype.UniChar)
    +        # XXX long double support: clibffi.ffi_type_longdouble, but then
    +        # XXX fix the whole rest of this file to add a case for long double
             del cls._import
     
         @staticmethod
    diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py
    --- a/rpython/rlib/rdynload.py
    +++ b/rpython/rlib/rdynload.py
    @@ -98,8 +98,15 @@
             try:
                 ctypes.CDLL(name)
             except OSError as e:
    +            # common case: ctypes fails too, with the real dlerror()
    +            # message in str(e).  Return that error message.
                 return str(e)
             else:
    +            # uncommon case: may happen if 'name' is a linker script
    +            # (which the C-level dlopen() can't handle) and we are
    +            # directly running on pypy (whose implementation of ctypes
    +            # or cffi will resolve linker scripts).  In that case, 
    +            # unsure what we can do.
                 return ("opening %r with ctypes.CDLL() works, "
                         "but not with c_dlopen()??" % (name,))
     
    @@ -160,7 +167,18 @@
                 mode = _dlopen_default_mode()
             elif (mode & (RTLD_LAZY | RTLD_NOW)) == 0:
                 mode |= RTLD_NOW
    +        #
    +        # haaaack for 'pypy py.test -A' if libm.so is a linker script
    +        # (see reason in _dlerror_on_dlopen_untranslated())
    +        must_free = False
    +        if not we_are_translated() and platform.name == "linux":
    +            if name and rffi.charp2str(name) == 'libm.so':
    +                name = rffi.str2charp('libm.so.6')
    +                must_free = True
    +        #
             res = c_dlopen(name, rffi.cast(rffi.INT, mode))
    +        if must_free:
    +            rffi.free_charp(name)
             if not res:
                 if not we_are_translated():
                     err = _dlerror_on_dlopen_untranslated(name)
    diff --git a/rpython/rlib/rmarshal.py b/rpython/rlib/rmarshal.py
    --- a/rpython/rlib/rmarshal.py
    +++ b/rpython/rlib/rmarshal.py
    @@ -346,11 +346,15 @@
         # on s_bigger.  It relies on the fact that s_bigger was created with
         # an expression like 'annotation([s_item])' which returns a ListDef with
         # no bookkeeper, on which side-effects are not allowed.
    +    saved = annmodel.TLS.allow_int_to_float
         try:
    +        annmodel.TLS.allow_int_to_float = False
             s_union = annmodel.unionof(s_bigger, s_smaller)
             return s_bigger.contains(s_union)
         except (annmodel.UnionError, TooLateForChange):
             return False
    +    finally:
    +        annmodel.TLS.allow_int_to_float = saved
     
     
     class __extend__(pairtype(MTag, annmodel.SomeObject)):
    diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py
    --- a/rpython/rlib/ropenssl.py
    +++ b/rpython/rlib/ropenssl.py
    @@ -97,6 +97,21 @@
     OPENSSL_VERSION_NUMBER = cconfig["OPENSSL_VERSION_NUMBER"]
     HAVE_TLSv1_2 = OPENSSL_VERSION_NUMBER >= 0x10001000
     
    +if OPENSSL_VERSION_NUMBER >= 0x10100000:
    +    eci.pre_include_bits = ()
    +    eci.post_include_bits = ()
    +    raise Exception("""OpenSSL version >= 1.1 not supported yet.
    +
    +    This program requires OpenSSL version 1.0.x, and may also
    +    work with LibreSSL or OpenSSL 0.9.x.  OpenSSL 1.1 is quite
    +    some work to update to; contributions are welcome.  Sorry,
    +    you need to install an older version of OpenSSL for now.
    +    Make sure this older version is the one picked up by this
    +    program when it runs the compiler.
    +    
    +    This is the configuration used: %r""" % (eci,))
    +
    +
     class CConfig:
         _compilation_info_ = eci
     
    diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py
    --- a/rpython/rlib/runicode.py
    +++ b/rpython/rlib/runicode.py
    @@ -1,5 +1,5 @@
     import sys
    -from rpython.rlib.objectmodel import specialize, we_are_translated
    +from rpython.rlib.objectmodel import specialize, we_are_translated, enforceargs
     from rpython.rlib.rstring import StringBuilder, UnicodeBuilder
     from rpython.rlib.rarithmetic import r_uint, intmask, widen
     from rpython.rlib.unicodedata import unicodedb
    @@ -137,7 +137,29 @@
                                      result=result)
         return result.build(), pos
     
    - at specialize.argtype(6)
    +def _invalid_cont_byte(ordch):
    +    return ordch>>6 != 0x2    # 0b10
    +
    +_invalid_byte_2_of_2 = _invalid_cont_byte
    +_invalid_byte_3_of_3 = _invalid_cont_byte
    +_invalid_byte_3_of_4 = _invalid_cont_byte
    +_invalid_byte_4_of_4 = _invalid_cont_byte
    +
    + at enforceargs(allow_surrogates=bool)
    +def _invalid_byte_2_of_3(ordch1, ordch2, allow_surrogates):
    +    return (ordch2>>6 != 0x2 or    # 0b10
    +            (ordch1 == 0xe0 and ordch2 < 0xa0)
    +            # surrogates shouldn't be valid UTF-8!
    +            or (ordch1 == 0xed and ordch2 > 0x9f and not allow_surrogates))
    +
    +def _invalid_byte_2_of_4(ordch1, ordch2):
    +    return (ordch2>>6 != 0x2 or    # 0b10
    +            (ordch1 == 0xf0 and ordch2 < 0x90) or
    +            (ordch1 == 0xf4 and ordch2 > 0x8f))
    +
    +# note: this specialize() is here for rtyper/rstr.py, which calls this
    +# function too but with its own fixed errorhandler
    + at specialize.arg_or_var(4)
     def str_decode_utf_8_impl(s, size, errors, final, errorhandler,
                               allow_surrogates, result):
         if size == 0:
    @@ -157,22 +179,23 @@
             if pos + n > size:
                 if not final:
                     break
    +            # argh, this obscure block of code is mostly a copy of
    +            # what follows :-(
                 charsleft = size - pos - 1 # either 0, 1, 2
    -            # note: when we get the 'unexpected end of data' we don't care
    -            # about the pos anymore and we just ignore the value
    +            # note: when we get the 'unexpected end of data' we need
    +            # to care about the pos returned; it can be lower than size,
    +            # in case we need to continue running this loop
                 if not charsleft:
                     # there's only the start byte and nothing else
                     r, pos = errorhandler(errors, 'utf8',
                                           'unexpected end of data',
                                           s, pos, pos+1)
                     result.append(r)
    -                break
    
    From pypy.commits at gmail.com  Wed Sep  7 10:30:47 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Wed, 07 Sep 2016 07:30:47 -0700 (PDT)
    Subject: [pypy-commit] pypy py3k: fix bytebuffer test
    Message-ID: <57d02497.4a301c0a.60059.7ce2@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: py3k
    Changeset: r86939:c68432b5be9f
    Date: 2016-09-07 15:29 +0100
    http://bitbucket.org/pypy/pypy/changeset/c68432b5be9f/
    
    Log:	fix bytebuffer test
    
    diff --git a/pypy/module/__pypy__/test/test_bytebuffer.py b/pypy/module/__pypy__/test/test_bytebuffer.py
    --- a/pypy/module/__pypy__/test/test_bytebuffer.py
    +++ b/pypy/module/__pypy__/test/test_bytebuffer.py
    @@ -14,12 +14,9 @@
             assert b[-3] == ord(b'+')
             exc = raises(ValueError, "b[3:5] = b'abc'")
             assert str(exc.value) == "cannot modify size of memoryview object"
    -        raises(NotImplementedError, "b[3:7:2] = b'abc'")
     
             b = bytebuffer(10)
             b[1:3] = b'xy'
             assert bytes(b) == b"\x00xy" + b"\x00" * 7
    -        # XXX: supported in 3.3
    -        raises(NotImplementedError, "b[4:8:2] = b'zw'")
    -        #b[4:8:2] = b'zw'
    -        #assert bytes(b) == b"\x00xy\x00z\x00w" + b"\x00" * 3
    +        b[4:8:2] = b'zw'
    +        assert bytes(b) == b"\x00xy\x00z\x00w" + b"\x00" * 3
    
    From pypy.commits at gmail.com  Wed Sep  7 10:51:09 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Wed, 07 Sep 2016 07:51:09 -0700 (PDT)
    Subject: [pypy-commit] pypy py3k: 2to3fy cpyext's test_memoryobject.py
    Message-ID: <57d0295d.081dc20a.e249d.bf82@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: py3k
    Changeset: r86940:aa12ac889991
    Date: 2016-09-07 15:50 +0100
    http://bitbucket.org/pypy/pypy/changeset/aa12ac889991/
    
    Log:	2to3fy cpyext's test_memoryobject.py
    
    diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c
    --- a/pypy/module/cpyext/test/buffer_test.c
    +++ b/pypy/module/cpyext/test/buffer_test.c
    @@ -192,10 +192,10 @@
         PyObject* obj = PyTuple_GetItem(args, 0);
         PyObject* memoryview = PyMemoryView_FromObject(obj);
         if (memoryview == NULL)
    -        return PyInt_FromLong(-1);
    +        return PyLong_FromLong(-1);
         view = PyMemoryView_GET_BUFFER(memoryview);
         Py_DECREF(memoryview);
    -    return PyInt_FromLong(view->len);
    +    return PyLong_FromLong(view->len);
     }
     
     /* Copied from numpy tests */
    diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py
    --- a/pypy/module/cpyext/test/test_memoryobject.py
    +++ b/pypy/module/cpyext/test/test_memoryobject.py
    @@ -1,6 +1,8 @@
    +import pytest
     from pypy.module.cpyext.test.test_api import BaseApiTest
     from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
     from rpython.rlib.buffer import StringBuffer
    +
     class TestMemoryViewObject(BaseApiTest):
         def test_fromobject(self, space, api):
             w_hello = space.newbytes("hello")
    @@ -76,6 +78,7 @@
             viewlen = module.test_buffer(arr)
             assert viewlen == y.itemsize * len(y)
     
    +    @pytest.mark.skipif(True, reason="no _numpypy on py3k")
         def test_buffer_info(self):
             from _numpypy import multiarray as np
             module = self.import_module(name='buffer_test')
    
    From pypy.commits at gmail.com  Wed Sep  7 11:31:08 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 08:31:08 -0700 (PDT)
    Subject: [pypy-commit] pypy reverse-debugger: in-progress
    Message-ID: <57d032bc.c5cb1c0a.0496.d81d@mx.google.com>
    
    Author: Armin Rigo 
    Branch: reverse-debugger
    Changeset: r86941:fad74c992868
    Date: 2016-09-07 17:30 +0200
    http://bitbucket.org/pypy/pypy/changeset/fad74c992868/
    
    Log:	in-progress
    
    diff --git a/rpython/rlib/src/boehm-rawrefcount.h b/rpython/rlib/src/boehm-rawrefcount.h
    --- a/rpython/rlib/src/boehm-rawrefcount.h
    +++ b/rpython/rlib/src/boehm-rawrefcount.h
    @@ -4,6 +4,9 @@
        OP_GC_RAWREFCOUNT_CREATE_LINK_PYOBJ(): not implemented, maybe not needed
     */
     
    +#ifdef RPY_REVERSE_DEBUGGER
    +/* these macros are defined in src-revdb/revdb_include.h */
    +#else
     #define OP_GC_RAWREFCOUNT_CREATE_LINK_PYPY(gcobj, pyobj, r)   \
         gc_rawrefcount_create_link_pypy(gcobj, pyobj)
     
    @@ -15,6 +18,7 @@
     
     #define OP_GC_RAWREFCOUNT_NEXT_DEAD(r)   \
         r = gc_rawrefcount_next_dead()
    +#endif
     
     
     RPY_EXTERN void gc_rawrefcount_create_link_pypy(/*gcobj_t*/void *gcobj, 
    diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
    --- a/rpython/translator/revdb/src-revdb/revdb.c
    +++ b/rpython/translator/revdb/src-revdb/revdb.c
    @@ -1762,7 +1762,8 @@
         return result;
     }
     
    -RPY_EXTERN RPyString *rpy_reverse_db_dtoa(double d)
    +RPY_EXTERN
    +RPyString *rpy_reverse_db_dtoa(double d)
     {
         char buffer[128], *p;
         RPyString *result;
    @@ -1782,6 +1783,153 @@
     }
     
     
    +static void *rawrefcount_tree;    /* {pyobj: gcobj} */
    +
    +struct rawrefcount_link2_s {
    +    void *pyobj;
    +    void *gcobj;
    +};
    +
    +static int _rrtree_compare(const void *obj1, const void *obj2)
    +{
    +    const struct rawrefcount_link2_s *r1 = obj1;
    +    const struct rawrefcount_link2_s *r2 = obj2;
    +    void *p1 = r1->pyobj;
    +    void *p2 = r2->pyobj;
    +    if (p1 < p2)
    +        return -1;
    +    if (p1 == p2)
    +        return 0;
    +    else
    +        return 1;
    +}
    +
    +static void _rrtree_add(void *pyobj, void *gcobj)
    +{
    +    /* Note: we always allocate an indirection through a 
    +       struct rawrefcount_link2_s, so that Boehm knows that
    +       'gcobj' must be kept alive. */
    +    struct rawrefcount_link2_s *node, **item;
    +    node = GC_MALLOC_UNCOLLECTABLE(sizeof(struct rawrefcount_link2_s));
    +    node->pyobj = pyobj;
    +    node->gcobj = gcobj;
    +    item = tsearch(node, &rawrefcount_tree, _rrtree_compare);
    +    if (item == NULL) {
    +        fprintf(stderr, "_rrtree_add: out of memory\n");
    +        exit(1);
    +    }
    +    if (*item != node) {
    +        fprintf(stderr, "_rrtree_add: duplicate object\n");
    +        exit(1);
    +    }
    +}
    +
    +RPY_EXTERN
    +void rpy_reverse_db_rawrefcount_create_link_pypy(void *gcobj, void *pyobj)
    +{
    +    if (!RPY_RDB_REPLAY) {
    +        gc_rawrefcount_create_link_pypy(gcobj, pyobj);
    +    }
    +    else {
    +        _rrtree_add(pyobj, gcobj);
    +    }
    +}
    +
    +RPY_EXTERN
    +void *rpy_reverse_db_rawrefcount_from_obj(void *gcobj)
    +{
    +    void *r;
    +    RPY_REVDB_EMIT(r = gc_rawrefcount_from_obj(gcobj);, void *_e, r);
    +    return r;
    +}
    +
    +RPY_EXTERN
    +void *rpy_reverse_db_rawrefcount_to_obj(void *pyobj)
    +{
    +    unsigned char flag;
    +
    +    if (!RPY_RDB_REPLAY) {
    +        void *r = gc_rawrefcount_to_obj(pyobj);
    +        RPY_REVDB_EMIT(flag = 0xEE + !r;, unsigned char _e, flag);
    +        return r;
    +    }
    +    else {
    +        RPY_REVDB_EMIT(abort();, unsigned char _e, flag);
    +        switch (flag) {
    +
    +        case 0xEF:
    +            /* when recording, this call to to_obj() returned NULL */
    +            return NULL;
    +
    +        case 0xEE:
    +            /* when recording, this call to to_obj() didn't return NULL */
    +            break;
    +
    +        default:
    +            fprintf(stderr, "bad byte in rawrefcount_to_obj\n");
    +            exit(1);
    +        }
    +
    +        struct rawrefcount_link2_s **item, dummy;
    +        dummy.pyobj = pyobj;
    +        item = tfind(&dummy, &rawrefcount_tree, _rrtree_compare);
    +        if (item == NULL) {
    +            fprintf(stderr, "rawrefcount_to_obj: not found in tree\n");
    +            exit(1);
    +        }
    +        return (*item)->gcobj;
    +    }
    +}
    +
    +RPY_EXTERN
    +void *rpy_reverse_db_rawrefcount_next_dead(void)
    +{
    +    unsigned char flag;
    +
    +    if (!RPY_RDB_REPLAY) {
    +        void *r = gc_rawrefcount_next_dead();
    +        RPY_REVDB_EMIT(flag = 0xEC + !r;, unsigned char _e, flag);
    +        if (r) {
    +            RPY_REVDB_EMIT(;, void *_e, r);
    +        }
    +        return r;
    +    }
    +    else {
    +        RPY_REVDB_EMIT(abort();, unsigned char _e, flag);
    +        switch (flag) {
    +
    +        case 0xED:
    +            /* when recording, this call to next_dead() returned NULL */
    +            return NULL;
    +
    +        case 0xEE:
    +            /* when recording, this call to next_dead() didn't return NULL */
    +            break;
    +
    +        default:
    +            fprintf(stderr, "bad byte in rawrefcount_next_dead\n");
    +            exit(1);
    +        }
    +
    +        void *pyobj;
    +        RPY_REVDB_EMIT(abort();, void *_e, pyobj);
    +
    +        struct rawrefcount_link2_s **item, *entry, dummy;
    +        dummy.pyobj = pyobj;
    +        item = tfind(&dummy, &rawrefcount_tree, _rrtree_compare);
    +        if (item == NULL) {
    +            fprintf(stderr, "rawrefcount_next_dead: not found in tree\n");
    +            exit(1);
    +        }
    +        entry = *item;
    +        tdelete(entry, &rawrefcount_tree, _rrtree_compare);
    +        GC_FREE(entry);
    +
    +        return pyobj;
    +    }
    +}
    +
    +
     /* ------------------------------------------------------------ */
     
     
    diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h
    --- a/rpython/translator/revdb/src-revdb/revdb_include.h
    +++ b/rpython/translator/revdb/src-revdb/revdb_include.h
    @@ -38,7 +38,7 @@
         if (rpy_rev_fileno >= 0) {                                          \
             fprintf(stderr,                                                 \
                     "%s %s:%d: %0*llx\n",                                   \
    -                mode, __FILE__, __LINE__, 2 * sizeof(_e),               \
    +                mode, __FILE__, (int)__LINE__, (int)(2 * sizeof(_e)),   \
                     ((unsigned long long)_e) & ((2ULL << (8*sizeof(_e)-1)) - 1)); \
         }
     #endif
    @@ -51,7 +51,7 @@
             seeing_uid(uid);                                                \
             fprintf(stderr,                                                 \
                     "[nobj] %s:%d: obj %llu\n",                             \
    -                __FILE__, __LINE__, (unsigned long long) uid);          \
    +                __FILE__, (int)__LINE__, (unsigned long long) uid);     \
         }
     #endif
     
    @@ -251,6 +251,19 @@
         } while (0)
     
     
    +#define OP_GC_RAWREFCOUNT_CREATE_LINK_PYPY(gcobj, pyobj, r)   \
    +    rpy_reverse_db_rawrefcount_create_link_pypy(gcobj, pyobj)
    +
    +#define OP_GC_RAWREFCOUNT_FROM_OBJ(gcobj, r)   \
    +    r = rpy_reverse_db_rawrefcount_from_obj(gcobj)
    +
    +#define OP_GC_RAWREFCOUNT_TO_OBJ(pyobj, r)   \
    +    r = rpy_reverse_db_rawrefcount_to_obj(pyobj)
    +
    +#define OP_GC_RAWREFCOUNT_NEXT_DEAD(r)   \
    +    r = rpy_reverse_db_rawrefcount_next_dead()
    +
    +
     RPY_EXTERN void rpy_reverse_db_flush(void);  /* must be called with the lock */
     RPY_EXTERN void rpy_reverse_db_fetch(const char *file, int line);
     RPY_EXTERN void rpy_reverse_db_stop_point(long place);
    @@ -275,5 +288,10 @@
     RPY_EXTERN void rpy_reverse_db_set_thread_breakpoint(int64_t tnum);
     RPY_EXTERN double rpy_reverse_db_strtod(RPyString *s);
     RPY_EXTERN RPyString *rpy_reverse_db_dtoa(double d);
    +RPY_EXTERN void rpy_reverse_db_rawrefcount_create_link_pypy(void *gcobj, 
    +                                                            void *pyobj);
    +RPY_EXTERN void *rpy_reverse_db_rawrefcount_from_obj(void *gcobj);
    +RPY_EXTERN void *rpy_reverse_db_rawrefcount_to_obj(void *pyobj);
    +RPY_EXTERN void *rpy_reverse_db_rawrefcount_next_dead(void);
     
     /* ------------------------------------------------------------ */
    
    From pypy.commits at gmail.com  Wed Sep  7 11:31:57 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 08:31:57 -0700 (PDT)
    Subject: [pypy-commit] pypy reverse-debugger: fix, basic test passes
    Message-ID: <57d032ed.c62f1c0a.f3205.9b42@mx.google.com>
    
    Author: Armin Rigo 
    Branch: reverse-debugger
    Changeset: r86942:5a7baf69f279
    Date: 2016-09-07 17:31 +0200
    http://bitbucket.org/pypy/pypy/changeset/5a7baf69f279/
    
    Log:	fix, basic test passes
    
    diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
    --- a/rpython/translator/revdb/src-revdb/revdb.c
    +++ b/rpython/translator/revdb/src-revdb/revdb.c
    @@ -1902,7 +1902,7 @@
                 /* when recording, this call to next_dead() returned NULL */
                 return NULL;
     
    -        case 0xEE:
    +        case 0xEC:
                 /* when recording, this call to next_dead() didn't return NULL */
                 break;
     
    
    From pypy.commits at gmail.com  Wed Sep  7 11:35:37 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 08:35:37 -0700 (PDT)
    Subject: [pypy-commit] pypy py3k: Fix: _utf8 cannot be a quasi-immutable.
     Will document why and do a
    Message-ID: <57d033c9.e440c20a.7ebf.75f1@mx.google.com>
    
    Author: Armin Rigo 
    Branch: py3k
    Changeset: r86943:628ba5d9a6f8
    Date: 2016-09-07 16:44 +0100
    http://bitbucket.org/pypy/pypy/changeset/628ba5d9a6f8/
    
    Log:	Fix: _utf8 cannot be a quasi-immutable. Will document why and do a
    	proper fix
    
    diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py
    --- a/pypy/objspace/std/unicodeobject.py
    +++ b/pypy/objspace/std/unicodeobject.py
    @@ -25,7 +25,7 @@
     
     class W_UnicodeObject(W_Root):
         import_from_mixin(StringMethods)
    -    _immutable_fields_ = ['_value', '_utf8?']
    +    _immutable_fields_ = ['_value']
     
         def __init__(self, unistr):
             assert isinstance(unistr, unicode)
    
    From pypy.commits at gmail.com  Wed Sep  7 11:36:04 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 08:36:04 -0700 (PDT)
    Subject: [pypy-commit] extradoc extradoc: more things to do
    Message-ID: <57d033e4.04e21c0a.5fe1.9c4e@mx.google.com>
    
    Author: Armin Rigo 
    Branch: extradoc
    Changeset: r5700:c70f20cfb870
    Date: 2016-09-07 17:35 +0200
    http://bitbucket.org/pypy/extradoc/changeset/c70f20cfb870/
    
    Log:	more things to do
    
    diff --git a/planning/py3.5/milestone-1-progress.rst b/planning/py3.5/milestone-1-progress.rst
    --- a/planning/py3.5/milestone-1-progress.rst
    +++ b/planning/py3.5/milestone-1-progress.rst
    @@ -48,6 +48,12 @@
       _get_module_lock..cb at 0x00007f118e2c0020> ignored``
       we're getting them now on start-up, investigate
     
    +* ``print 5`` should give
    +  ``SyntaxError: Missing parentheses in call to 'print'``
    +
    +* ``_utf8`` in W_UnicodeObject used to be quasi-immutable,
    +  document why it doesn't work and do a proper fix
    +
     
     Milestone 1 (Aug-Sep-Oct 2016)
     ------------------------------
    
    From pypy.commits at gmail.com  Wed Sep  7 12:45:37 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Wed, 07 Sep 2016 09:45:37 -0700 (PDT)
    Subject: [pypy-commit] pypy default: factor common code between
     compile_extension_module() and compile_extension_module_applevel()
    Message-ID: <57d04431.04a81c0a.afd36.b6e1@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: 
    Changeset: r86944:16d4af61e753
    Date: 2016-09-07 17:44 +0100
    http://bitbucket.org/pypy/pypy/changeset/16d4af61e753/
    
    Log:	factor common code between compile_extension_module() and
    	compile_extension_module_applevel()
    
    diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
    --- a/pypy/module/cpyext/test/test_cpyext.py
    +++ b/pypy/module/cpyext/test/test_cpyext.py
    @@ -53,6 +53,26 @@
             libraries=libraries)
         return soname
     
    +def get_extra_args(space):
    +    state = space.fromcache(State)
    +    api_library = state.api_lib
    +    if sys.platform == 'win32':
    +        libraries = [api_library]
    +        # '%s' undefined; assuming extern returning int
    +        compile_extra = ["/we4013"]
    +        # prevent linking with PythonXX.lib
    +        w_maj, w_min = space.fixedview(space.sys.get('version_info'), 5)[:2]
    +        link_extra = ["/NODEFAULTLIB:Python%d%d.lib" %
    +                              (space.int_w(w_maj), space.int_w(w_min))]
    +    else:
    +        libraries = []
    +        if sys.platform.startswith('linux'):
    +            compile_extra = ["-Werror", "-g", "-O0", "-Wp,-U_FORTIFY_SOURCE", "-fPIC"]
    +            link_extra = ["-g"]
    +        else:
    +            compile_extra = link_extra = None
    +
    +
     def compile_extension_module(space, modname, include_dirs=[],
             source_files=None, source_strings=None):
         """
    @@ -82,17 +102,21 @@
                 link_extra = ["-g"]
             else:
                 compile_extra = link_extra = None
    +    from pypy.module.imp.importing import get_so_extension
    +    ext = get_so_extension(space)
    +    include_extra = api.include_dirs
    +    extra_libs = libraries
    +    return _compile_ext(modname, include_dirs, source_files, source_strings, include_extra, compile_extra, link_extra, extra_libs, ext)
     
    +def _compile_ext(modname, include_dirs, source_files, source_strings, include_extra, compile_extra, link_extra, extra_libs, ext):
         modname = modname.split('.')[-1]
         soname = create_so(modname,
    -            include_dirs=api.include_dirs + include_dirs,
    +            include_dirs=include_extra + include_dirs,
                 source_files=source_files,
                 source_strings=source_strings,
                 compile_extra=compile_extra,
                 link_extra=link_extra,
    -            libraries=libraries)
    -    from pypy.module.imp.importing import get_so_extension
    -    ext = get_so_extension(space)
    +            libraries=extra_libs)
         pydname = soname.new(purebasename=modname, ext=ext)
         soname.rename(pydname)
         return str(pydname)
    @@ -128,18 +152,11 @@
             compile_extra = [
                 "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"]
             link_extra = None
    +    ext = get_so_suffix()
    +    include_extra = [space.include_dir]
    +    extra_libs = None
    +    return _compile_ext(modname, include_dirs, source_files, source_strings, include_extra, compile_extra, link_extra, extra_libs, ext)
     
    -    modname = modname.split('.')[-1]
    -    soname = create_so(modname,
    -            include_dirs=[space.include_dir] + include_dirs,
    -            source_files=source_files,
    -            source_strings=source_strings,
    -            compile_extra=compile_extra,
    -            link_extra=link_extra)
    -    ext = get_so_suffix()
    -    pydname = soname.new(purebasename=modname, ext=ext)
    -    soname.rename(pydname)
    -    return str(pydname)
     
     def freeze_refcnts(self):
         rawrefcount._dont_free_any_more()
    
    From pypy.commits at gmail.com  Wed Sep  7 13:10:08 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 10:10:08 -0700 (PDT)
    Subject: [pypy-commit] pypy reverse-debugger: Comments. Disable a code path
     that (by mistake) casts between a GC
    Message-ID: <57d049f0.02d31c0a.f7eda.ba3a@mx.google.com>
    
    Author: Armin Rigo 
    Branch: reverse-debugger
    Changeset: r86945:5d46dd70887c
    Date: 2016-09-07 19:09 +0200
    http://bitbucket.org/pypy/pypy/changeset/5d46dd70887c/
    
    Log:	Comments. Disable a code path that (by mistake) casts between a GC
    	pointer and a char-sized integer.
    
    diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py
    --- a/pypy/module/cpyext/memoryobject.py
    +++ b/pypy/module/cpyext/memoryobject.py
    @@ -41,6 +41,7 @@
         view.c_len = w_obj.getlength()
         view.c_itemsize = w_obj.buf.getitemsize()
         rffi.setintfield(view, 'c_ndim', ndim)
    +    raise NotImplementedError   # bogus lines follow, remove this when fixed
         view.c__format = rffi.cast(rffi.UCHAR, w_obj.buf.getformat())
         view.c_format = rffi.cast(rffi.CCHARP, view.c__format)
         view.c_shape = rffi.cast(Py_ssize_tP, view.c__shape)
    diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c
    --- a/rpython/translator/revdb/src-revdb/revdb.c
    +++ b/rpython/translator/revdb/src-revdb/revdb.c
    @@ -1895,6 +1895,17 @@
             return r;
         }
         else {
    +        /* Note: when replaying, the lifetime of the gcobj is a bit
    +           extended when compared with recording.  That shouldn't have
    +           a visible effect.  More precisely, when replaying,
    +           create_link_pypy() calls _rrtree_add(), which makes a
    +           struct rawrefcount_link2_s, which keeps gcobj alive; and
    +           that structure is only freed here, when we call next_dead()
    +           and return the corresponding pyobj.  When recording, the
    +           cause-and-effect relationship is in the opposite direction:
    +           when Boehm frees the gcobj, it causes the pyobj to show up
    +           (sometimes later) in a next_dead() call.
    +        */
             RPY_REVDB_EMIT(abort();, unsigned char _e, flag);
             switch (flag) {
     
    
    From pypy.commits at gmail.com  Wed Sep  7 13:38:36 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 10:38:36 -0700 (PDT)
    Subject: [pypy-commit] pypy py3k: For now,
     we should get most of the intended effect with @jit.elidable
    Message-ID: <57d0509c.48811c0a.97e41.c705@mx.google.com>
    
    Author: Armin Rigo 
    Branch: py3k
    Changeset: r86946:1fc97c413001
    Date: 2016-09-07 19:38 +0200
    http://bitbucket.org/pypy/pypy/changeset/1fc97c413001/
    
    Log:	For now, we should get most of the intended effect with
    	@jit.elidable
    
    diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py
    --- a/pypy/objspace/std/unicodeobject.py
    +++ b/pypy/objspace/std/unicodeobject.py
    @@ -7,6 +7,7 @@
     from rpython.rlib.runicode import (
         make_unicode_escape_function, str_decode_ascii, str_decode_utf_8,
         unicode_encode_ascii, unicode_encode_utf_8, fast_str_decode_ascii)
    +from rpython.rlib import jit
     
     from pypy.interpreter import unicodehelper
     from pypy.interpreter.baseobjspace import W_Root
    @@ -76,6 +77,7 @@
         def unicode_w(self, space):
             return self._value
     
    +    @jit.elidable
         def identifier_w(self, space):
             identifier = self._utf8
             if identifier is not None:
    
    From pypy.commits at gmail.com  Wed Sep  7 13:51:45 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Wed, 07 Sep 2016 10:51:45 -0700 (PDT)
    Subject: [pypy-commit] pypy default: Bundle all the static bits of
     compilation info into a SystemCompilationInfo object
    Message-ID: <57d053b1.8f081c0a.57fa7.ce10@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: 
    Changeset: r86947:006a4e6fc3a9
    Date: 2016-09-07 18:51 +0100
    http://bitbucket.org/pypy/pypy/changeset/006a4e6fc3a9/
    
    Log:	Bundle all the static bits of compilation info into a
    	SystemCompilationInfo object
    
    diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
    --- a/pypy/module/cpyext/test/test_cpyext.py
    +++ b/pypy/module/cpyext/test/test_cpyext.py
    @@ -53,7 +53,21 @@
             libraries=libraries)
         return soname
     
    -def get_extra_args(space):
    +class SystemCompilationInfo(object):
    +    """Bundles all the generic information required to compile extensions.
    +
    +    Note: here, 'system' means OS + target interpreter + test config + ...
    +    """
    +    def __init__(self, include_extra=None, compile_extra=None, link_extra=None,
    +            extra_libs=None, ext=None):
    +        self.include_extra = include_extra or []
    +        self.compile_extra = compile_extra
    +        self.link_extra = link_extra
    +        self.extra_libs = extra_libs
    +        self.ext = ext
    +
    +def get_cpyext_info(space):
    +    from pypy.module.imp.importing import get_so_extension
         state = space.fromcache(State)
         api_library = state.api_lib
         if sys.platform == 'win32':
    @@ -63,14 +77,21 @@
             # prevent linking with PythonXX.lib
             w_maj, w_min = space.fixedview(space.sys.get('version_info'), 5)[:2]
             link_extra = ["/NODEFAULTLIB:Python%d%d.lib" %
    -                              (space.int_w(w_maj), space.int_w(w_min))]
    +            (space.int_w(w_maj), space.int_w(w_min))]
         else:
             libraries = []
             if sys.platform.startswith('linux'):
    -            compile_extra = ["-Werror", "-g", "-O0", "-Wp,-U_FORTIFY_SOURCE", "-fPIC"]
    +            compile_extra = [
    +                "-Werror", "-g", "-O0", "-Wp,-U_FORTIFY_SOURCE", "-fPIC"]
                 link_extra = ["-g"]
             else:
                 compile_extra = link_extra = None
    +    return SystemCompilationInfo(
    +        include_extra=api.include_dirs,
    +        compile_extra=compile_extra,
    +        link_extra=link_extra,
    +        extra_libs=libraries,
    +        ext=get_so_extension(space))
     
     
     def compile_extension_module(space, modname, include_dirs=[],
    @@ -85,39 +106,21 @@
         Any extra keyword arguments are passed on to ExternalCompilationInfo to
         build the module (so specify your source with one of those).
         """
    -    state = space.fromcache(State)
    -    api_library = state.api_lib
    -    if sys.platform == 'win32':
    -        libraries = [api_library]
    -        # '%s' undefined; assuming extern returning int
    -        compile_extra = ["/we4013"]
    -        # prevent linking with PythonXX.lib
    -        w_maj, w_min = space.fixedview(space.sys.get('version_info'), 5)[:2]
    -        link_extra = ["/NODEFAULTLIB:Python%d%d.lib" %
    -                              (space.int_w(w_maj), space.int_w(w_min))]
    -    else:
    -        libraries = []
    -        if sys.platform.startswith('linux'):
    -            compile_extra = ["-Werror", "-g", "-O0", "-Wp,-U_FORTIFY_SOURCE", "-fPIC"]
    -            link_extra = ["-g"]
    -        else:
    -            compile_extra = link_extra = None
    -    from pypy.module.imp.importing import get_so_extension
    -    ext = get_so_extension(space)
    -    include_extra = api.include_dirs
    -    extra_libs = libraries
    -    return _compile_ext(modname, include_dirs, source_files, source_strings, include_extra, compile_extra, link_extra, extra_libs, ext)
    +    sys_info = get_cpyext_info(space)
    +    return _compile_ext(
    +        modname, include_dirs, source_files, source_strings, sys_info)
     
    -def _compile_ext(modname, include_dirs, source_files, source_strings, include_extra, compile_extra, link_extra, extra_libs, ext):
    +
    +def _compile_ext(modname, include_dirs, source_files, source_strings, sys_info):
         modname = modname.split('.')[-1]
         soname = create_so(modname,
    -            include_dirs=include_extra + include_dirs,
    -            source_files=source_files,
    -            source_strings=source_strings,
    -            compile_extra=compile_extra,
    -            link_extra=link_extra,
    -            libraries=extra_libs)
    -    pydname = soname.new(purebasename=modname, ext=ext)
    +        include_dirs=sys_info.include_extra + include_dirs,
    +        source_files=source_files,
    +        source_strings=source_strings,
    +        compile_extra=sys_info.compile_extra,
    +        link_extra=sys_info.link_extra,
    +        libraries=sys_info.extra_libs)
    +    pydname = soname.new(purebasename=modname, ext=sys_info.ext)
         soname.rename(pydname)
         return str(pydname)
     
    @@ -130,6 +133,24 @@
             raise RuntimeError("This interpreter does not define a filename "
                 "suffix for C extensions!")
     
    +def get_sys_info_app(space):
    +    if sys.platform == 'win32':
    +        compile_extra = ["/we4013"]
    +        link_extra = ["/LIBPATH:" + os.path.join(sys.exec_prefix, 'libs')]
    +    elif sys.platform == 'darwin':
    +        compile_extra = link_extra = None
    +        pass
    +    elif sys.platform.startswith('linux'):
    +        compile_extra = [
    +            "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"]
    +        link_extra = None
    +    ext = get_so_suffix()
    +    return SystemCompilationInfo(
    +        include_extra=[space.include_dir],
    +        compile_extra=compile_extra,
    +        link_extra=link_extra,
    +        ext=get_so_suffix())
    +
     def compile_extension_module_applevel(space, modname, include_dirs=[],
             source_files=None, source_strings=None):
         """
    @@ -142,20 +163,9 @@
         Any extra keyword arguments are passed on to ExternalCompilationInfo to
         build the module (so specify your source with one of those).
         """
    -    if sys.platform == 'win32':
    -        compile_extra = ["/we4013"]
    -        link_extra = ["/LIBPATH:" + os.path.join(sys.exec_prefix, 'libs')]
    -    elif sys.platform == 'darwin':
    -        compile_extra = link_extra = None
    -        pass
    -    elif sys.platform.startswith('linux'):
    -        compile_extra = [
    -            "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"]
    -        link_extra = None
    -    ext = get_so_suffix()
    -    include_extra = [space.include_dir]
    -    extra_libs = None
    -    return _compile_ext(modname, include_dirs, source_files, source_strings, include_extra, compile_extra, link_extra, extra_libs, ext)
    +    sys_info = get_sys_info_app(space)
    +    return _compile_ext(
    +        modname, include_dirs, source_files, source_strings, sys_info)
     
     
     def freeze_refcnts(self):
    
    From pypy.commits at gmail.com  Wed Sep  7 14:13:50 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Wed, 07 Sep 2016 11:13:50 -0700 (PDT)
    Subject: [pypy-commit] pypy default: Merge compile_extension_module() and
     compile_extension_module_applevel() into a single version that takes a
     sys_info instead of a space
    Message-ID: <57d058de.4676c20a.2e4e.e6fc@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: 
    Changeset: r86948:df92d833f490
    Date: 2016-09-07 19:13 +0100
    http://bitbucket.org/pypy/pypy/changeset/df92d833f490/
    
    Log:	Merge compile_extension_module() and
    	compile_extension_module_applevel() into a single version that takes
    	a sys_info instead of a space
    
    diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
    --- a/pypy/module/cpyext/test/test_cpyext.py
    +++ b/pypy/module/cpyext/test/test_cpyext.py
    @@ -94,7 +94,7 @@
             ext=get_so_extension(space))
     
     
    -def compile_extension_module(space, modname, include_dirs=[],
    +def compile_extension_module(sys_info, modname, include_dirs=[],
             source_files=None, source_strings=None):
         """
         Build an extension module and return the filename of the resulting native
    @@ -106,12 +106,6 @@
         Any extra keyword arguments are passed on to ExternalCompilationInfo to
         build the module (so specify your source with one of those).
         """
    -    sys_info = get_cpyext_info(space)
    -    return _compile_ext(
    -        modname, include_dirs, source_files, source_strings, sys_info)
    -
    -
    -def _compile_ext(modname, include_dirs, source_files, source_strings, sys_info):
         modname = modname.split('.')[-1]
         soname = create_so(modname,
             include_dirs=sys_info.include_extra + include_dirs,
    @@ -151,22 +145,6 @@
             link_extra=link_extra,
             ext=get_so_suffix())
     
    -def compile_extension_module_applevel(space, modname, include_dirs=[],
    -        source_files=None, source_strings=None):
    -    """
    -    Build an extension module and return the filename of the resulting native
    -    code file.
    -
    -    modname is the name of the module, possibly including dots if it is a module
    -    inside a package.
    -
    -    Any extra keyword arguments are passed on to ExternalCompilationInfo to
    -    build the module (so specify your source with one of those).
    -    """
    -    sys_info = get_sys_info_app(space)
    -    return _compile_ext(
    -        modname, include_dirs, source_files, source_strings, sys_info)
    -
     
     def freeze_refcnts(self):
         rawrefcount._dont_free_any_more()
    @@ -347,8 +325,8 @@
                     assert separate_module_sources is not None
                 else:
                     separate_module_sources = []
    -            pydname = self.compile_extension_module(
    -                space, name,
    +            pydname = compile_extension_module(
    +                self.sys_info, name,
                     source_files=separate_module_files,
                     source_strings=separate_module_sources)
                 return space.wrap(pydname)
    @@ -403,7 +381,7 @@
                     filename = py.path.local(pypydir) / 'module' \
                             / 'cpyext'/ 'test' / (filename + ".c")
                     kwds = dict(source_files=[filename])
    -            mod = self.compile_extension_module(space, name,
    +            mod = compile_extension_module(self.sys_info, name,
                         include_dirs=include_dirs, **kwds)
     
                 if load_it:
    @@ -495,11 +473,11 @@
                     return run
                 def wrap(func):
                     return func
    -            self.compile_extension_module = compile_extension_module_applevel
    +            self.sys_info = get_sys_info_app(space)
             else:
                 interp2app = gateway.interp2app
                 wrap = self.space.wrap
    -            self.compile_extension_module = compile_extension_module
    +            self.sys_info = get_cpyext_info(space)
             self.w_compile_module = wrap(interp2app(compile_module))
             self.w_import_module = wrap(interp2app(import_module))
             self.w_reimport_module = wrap(interp2app(reimport_module))
    
    From pypy.commits at gmail.com  Wed Sep  7 14:55:52 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Wed, 07 Sep 2016 11:55:52 -0700 (PDT)
    Subject: [pypy-commit] pypy default: Kill FakeSpace.include_dir
    Message-ID: <57d062b8.0cce1c0a.30a6.df4b@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: 
    Changeset: r86949:de3eabaf671b
    Date: 2016-09-07 19:50 +0100
    http://bitbucket.org/pypy/pypy/changeset/de3eabaf671b/
    
    Log:	Kill FakeSpace.include_dir
    
    diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
    --- a/pypy/module/cpyext/test/test_cpyext.py
    +++ b/pypy/module/cpyext/test/test_cpyext.py
    @@ -127,7 +127,8 @@
             raise RuntimeError("This interpreter does not define a filename "
                 "suffix for C extensions!")
     
    -def get_sys_info_app(space):
    +def get_sys_info_app():
    +    from distutils.sysconfig import get_python_inc
         if sys.platform == 'win32':
             compile_extra = ["/we4013"]
             link_extra = ["/LIBPATH:" + os.path.join(sys.exec_prefix, 'libs')]
    @@ -140,7 +141,7 @@
             link_extra = None
         ext = get_so_suffix()
         return SystemCompilationInfo(
    -        include_extra=[space.include_dir],
    +        include_extra=[get_python_inc()],
             compile_extra=compile_extra,
             link_extra=link_extra,
             ext=get_so_suffix())
    @@ -159,9 +160,7 @@
     class FakeSpace(object):
         """Like TinyObjSpace, but different"""
         def __init__(self, config):
    -        from distutils.sysconfig import get_python_inc
             self.config = config
    -        self.include_dir = get_python_inc()
     
         def passthrough(self, arg):
             return arg
    @@ -473,7 +472,7 @@
                     return run
                 def wrap(func):
                     return func
    -            self.sys_info = get_sys_info_app(space)
    +            self.sys_info = get_sys_info_app()
             else:
                 interp2app = gateway.interp2app
                 wrap = self.space.wrap
    
    From pypy.commits at gmail.com  Wed Sep  7 18:43:17 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Wed, 07 Sep 2016 15:43:17 -0700 (PDT)
    Subject: [pypy-commit] pypy default: Oops!
    Message-ID: <57d09805.6937c20a.8dc4e.acd4@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: 
    Changeset: r86950:e3d66063913c
    Date: 2016-09-07 23:41 +0100
    http://bitbucket.org/pypy/pypy/changeset/e3d66063913c/
    
    Log:	Oops!
    
    diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
    --- a/pypy/module/cpyext/test/test_cpyext.py
    +++ b/pypy/module/cpyext/test/test_cpyext.py
    @@ -476,7 +476,7 @@
             else:
                 interp2app = gateway.interp2app
                 wrap = self.space.wrap
    -            self.sys_info = get_cpyext_info(space)
    +            self.sys_info = get_cpyext_info(self.space)
             self.w_compile_module = wrap(interp2app(compile_module))
             self.w_import_module = wrap(interp2app(import_module))
             self.w_reimport_module = wrap(interp2app(reimport_module))
    
    From pypy.commits at gmail.com  Wed Sep  7 18:47:25 2016
    From: pypy.commits at gmail.com (sbauman)
    Date: Wed, 07 Sep 2016 15:47:25 -0700 (PDT)
    Subject: [pypy-commit] pypy force-virtual-state: Add test case for
     generating the proper guard on a raw pointer during short preamble creation
    Message-ID: <57d098fd.8f081c0a.57fa7.2fcc@mx.google.com>
    
    Author: Spenser Andrew Bauman 
    Branch: force-virtual-state
    Changeset: r86951:9fea14c01cc4
    Date: 2016-09-07 18:46 -0400
    http://bitbucket.org/pypy/pypy/changeset/9fea14c01cc4/
    
    Log:	Add test case for generating the proper guard on a raw pointer
    	during short preamble creation
    
    diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py
    --- a/rpython/jit/metainterp/test/test_ajit.py
    +++ b/rpython/jit/metainterp/test/test_ajit.py
    @@ -4508,3 +4508,38 @@
                 return i
             self.meta_interp(f, [])
     
    +    def test_round_trip_raw_pointer(self):
    +        driver = JitDriver(greens=[], reds=['i', 'val'])
    +
    +        class Box(object):
    +            _immutable_fields_ = ['_ptr']
    +            _ptr = lltype.nullptr(rffi.CCHARP.TO)
    +
    +        def new_int_buffer(value):
    +            data = lltype.malloc(rffi.CCHARP.TO, noConst(rffi.sizeof(rffi.INT)), flavor='raw', zero=True)
    +            rffi.cast(rffi.INTP, data)[0] = rffi.cast(rffi.INT, value)
    +            return data
    +
    +        def read_int_buffer(buf):
    +            return rffi.cast(rffi.INTP, buf)[0]
    +
    +        def f():
    +            i = 0
    +            val = Box()
    +            val._ptr = new_int_buffer(1)
    +
    +            set_param(None, 'retrace_limit', -1)
    +            while i < 100:
    +                driver.jit_merge_point(i=i, val=val)
    +                driver.can_enter_jit(i=i, val=val)
    +                tgt = val._ptr
    +                if i & 0b100:
    +                    i += 1
    +                i += int(read_int_buffer(val._ptr))
    +                lltype.free(val._ptr, flavor='raw')
    +                val._ptr = new_int_buffer(1)
    +            lltype.free(val._ptr, flavor='raw')
    +
    +        self.meta_interp(f, [])
    +        self.check_resops(guard_nonnull=0)
    +
    
    From pypy.commits at gmail.com  Wed Sep  7 22:38:12 2016
    From: pypy.commits at gmail.com (sbauman)
    Date: Wed, 07 Sep 2016 19:38:12 -0700 (PDT)
    Subject: [pypy-commit] pypy force-virtual-state: Add some explanation for
     the new test
    Message-ID: <57d0cf14.e17cc20a.b3f92.57aa@mx.google.com>
    
    Author: Spenser Andrew Bauman 
    Branch: force-virtual-state
    Changeset: r86952:febf0f869fc5
    Date: 2016-09-07 22:37 -0400
    http://bitbucket.org/pypy/pypy/changeset/febf0f869fc5/
    
    Log:	Add some explanation for the new test
    
    diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py
    --- a/rpython/jit/metainterp/test/test_ajit.py
    +++ b/rpython/jit/metainterp/test/test_ajit.py
    @@ -4509,10 +4509,25 @@
             self.meta_interp(f, [])
     
         def test_round_trip_raw_pointer(self):
    +        # The goal of this test to to get a raw pointer op into the short preamble
    +        # so we can check that the proper guards are generated
    +        # In this case, the resulting short preamble contains
    +        #
    +        # i1 = getfield_gc_i(p0, descr=inst__ptr)
    +        # i2 = int_eq(i1, 0)
    +        # guard_false(i2)
    +        #
    +        # as opposed to what the JIT used to produce
    +        #
    +        # i1 = getfield_gc_i(p0, descr=inst__ptr)
    +        # guard_nonnull(i1)
    +        #
    +        # Which will probably generate correct assembly, but the optimization
    +        # pipline expects guard_nonnull arguments to be pointer ops and may crash
    +        # and may crash on other input types.
             driver = JitDriver(greens=[], reds=['i', 'val'])
     
             class Box(object):
    -            _immutable_fields_ = ['_ptr']
                 _ptr = lltype.nullptr(rffi.CCHARP.TO)
     
             def new_int_buffer(value):
    @@ -4533,6 +4548,7 @@
                     driver.jit_merge_point(i=i, val=val)
                     driver.can_enter_jit(i=i, val=val)
                     tgt = val._ptr
    +                # Just to produce a side exit
                     if i & 0b100:
                         i += 1
                     i += int(read_int_buffer(val._ptr))
    
    From pypy.commits at gmail.com  Thu Sep  8 00:59:01 2016
    From: pypy.commits at gmail.com (sbauman)
    Date: Wed, 07 Sep 2016 21:59:01 -0700 (PDT)
    Subject: [pypy-commit] pypy force-virtual-state: Remove useless line
    Message-ID: <57d0f015.6937c20a.8dc4e.f5fe@mx.google.com>
    
    Author: Spenser Andrew Bauman 
    Branch: force-virtual-state
    Changeset: r86953:c5f3d521062a
    Date: 2016-09-08 00:58 -0400
    http://bitbucket.org/pypy/pypy/changeset/c5f3d521062a/
    
    Log:	Remove useless line
    
    diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py
    --- a/rpython/jit/metainterp/test/test_ajit.py
    +++ b/rpython/jit/metainterp/test/test_ajit.py
    @@ -4547,7 +4547,6 @@
                 while i < 100:
                     driver.jit_merge_point(i=i, val=val)
                     driver.can_enter_jit(i=i, val=val)
    -                tgt = val._ptr
                     # Just to produce a side exit
                     if i & 0b100:
                         i += 1
    
    From pypy.commits at gmail.com  Thu Sep  8 02:16:18 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Wed, 07 Sep 2016 23:16:18 -0700 (PDT)
    Subject: [pypy-commit] pypy.org extradoc: update the values
    Message-ID: <57d10232.c186c20a.99002.62d9@mx.google.com>
    
    Author: Armin Rigo 
    Branch: extradoc
    Changeset: r789:3af077f71333
    Date: 2016-09-08 08:16 +0200
    http://bitbucket.org/pypy/pypy.org/changeset/3af077f71333/
    
    Log:	update the values
    
    diff --git a/don1.html b/don1.html
    --- a/don1.html
    +++ b/don1.html
    @@ -15,7 +15,7 @@
     
     
        
    -   $65097 of $105000 (62.0%)
    +   $65099 of $105000 (62.0%)
        
    @@ -23,7 +23,7 @@
  • Read proposal
  • From pypy.commits at gmail.com Thu Sep 8 07:54:57 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 08 Sep 2016 04:54:57 -0700 (PDT) Subject: [pypy-commit] pypy default: build the hashmap only once, not *every time* we call _has_reflected_op Message-ID: <57d15191.e97ac20a.1eabd.faa2@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r86954:4753c209847a Date: 2016-09-08 13:54 +0200 http://bitbucket.org/pypy/pypy/changeset/4753c209847a/ Log: build the hashmap only once, not *every time* we call _has_reflected_op (found while reading control flow graphs) diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -392,31 +392,39 @@ extobj_w = space.newlist([space.wrap(8192), space.wrap(0), space.w_None]) return extobj_w + +_reflected_ops = { + 'add': 'radd', + 'subtract': 'rsub', + 'multiply': 'rmul', + 'divide': 'rdiv', + 'true_divide': 'rtruediv', + 'floor_divide': 'rfloordiv', + 'remainder': 'rmod', + 'power': 'rpow', + 'left_shift': 'rlshift', + 'right_shift': 'rrshift', + 'bitwise_and': 'rand', + 'bitwise_xor': 'rxor', + 'bitwise_or': 'ror', + #/* Comparisons */ + 'equal': 'eq', + 'not_equal': 'ne', + 'greater': 'lt', + 'less': 'gt', + 'greater_equal': 'le', + 'less_equal': 'ge', +} + +for key, value in _reflected_ops.items(): + _reflected_ops[key] = "__" + value + "__" +del key +del value + def _has_reflected_op(space, w_obj, op): - refops ={ 'add': 'radd', - 'subtract': 'rsub', - 'multiply': 'rmul', - 'divide': 'rdiv', - 'true_divide': 'rtruediv', - 'floor_divide': 'rfloordiv', - 'remainder': 'rmod', - 'power': 'rpow', - 'left_shift': 'rlshift', - 'right_shift': 'rrshift', - 'bitwise_and': 'rand', - 'bitwise_xor': 'rxor', - 'bitwise_or': 'ror', - #/* Comparisons */ - 'equal': 'eq', - 'not_equal': 'ne', - 'greater': 'lt', - 'less': 'gt', - 'greater_equal': 'le', - 'less_equal': 'ge', - } - if op not in refops: + if op not in _reflected_ops: return False - return space.getattr(w_obj, space.wrap('__' + refops[op] + '__')) is not None + return space.getattr(w_obj, space.wrap(_reflected_ops[op])) is not None def safe_casting_mode(casting): assert casting is not None From pypy.commits at gmail.com Thu Sep 8 08:37:09 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 08 Sep 2016 05:37:09 -0700 (PDT) Subject: [pypy-commit] pypy ppc-vsx-support: drafting new implementation Message-ID: <57d15b75.87adc20a.c9daa.fe38@mx.google.com> Author: Richard Plangger Branch: ppc-vsx-support Changeset: r86955:ed75d303a710 Date: 2016-09-07 17:14 +0200 http://bitbucket.org/pypy/pypy/changeset/ed75d303a710/ Log: drafting new implementation diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py --- a/rpython/jit/metainterp/optimizeopt/dependency.py +++ b/rpython/jit/metainterp/optimizeopt/dependency.py @@ -136,6 +136,7 @@ self.schedule_position = -1 self.priority = 0 self._stack = False + self.delayed = None def is_imaginary(self): return False @@ -148,6 +149,7 @@ def getopnum(self): return self.op.getopnum() + def getopname(self): return self.op.getopname() @@ -157,6 +159,9 @@ def can_be_relaxed(self): return self.op.getopnum() in (rop.GUARD_TRUE, rop.GUARD_FALSE) + def is_pure(self): + return rop.is_always_pure(self.op.getopnum()) + def edge_to(self, to, arg=None, failarg=False, label=None): if self is to: return diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -65,9 +65,9 @@ if node.depends_count() == 0: self.worklist.insert(0, node) - def emit(self, node, scheduler): + def try_emit_or_delay(self, node, scheduler): # implement me in subclass. e.g. as in VecScheduleState - return False + raise NotImplementedError def delay(self, node): return False @@ -180,14 +180,7 @@ while state.has_more(): node = self.next(state) if node: - if not state.emit(node, self): - if not node.emitted: - state.pre_emit(node) - self.mark_emitted(node, state) - if not node.is_imaginary(): - op = node.getoperation() - state.seen[op] = None - state.oplist.append(op) + state.try_emit_or_delay(node, scheduler) continue # it happens that packs can emit many nodes that have been @@ -551,6 +544,10 @@ failargs[i] = self.renamer.rename_map.get(seed, seed) op.setfailargs(failargs) + delayed = node.delayed + if delayed: + import pdb; pdb.set_trace() + def profitable(self): return self.costmodel.profitable() @@ -560,18 +557,45 @@ for arg in self.graph.loop.label.getarglist(): self.seen[arg] = None - def emit(self, node, scheduler): - """ If you implement a scheduler this operations is called - to emit the actual operation into the oplist of the scheduler. - """ - if node.pack: + def try_emit_or_delay(self, scheduler, state): + # emission might be blocked by other nodes if this node has a pack! + if self.pack: assert node.pack.numops() > 1 for node in node.pack.operations: self.pre_emit(node) scheduler.mark_emitted(node, self, unpack=False) turn_into_vector(self, node.pack) - return True - return False + return + elif not node.emitted: + if not node.is_imaginary() and node.is_pure(): + # this operation might never be emitted. only if it is really needed + self.delay_emit(scheduler, node) + return + # emit a now! + state.pre_emit(node) + self.mark_emitted(node, state) + if not node.is_imaginary(): + op = node.getoperation() + state.seen[op] = None + state.oplist.append(op) + + def delay_emit(self, scheduler, node): + """ it has been decided that the operation might be scheduled later """ + delayed = node.delayed or [] + delayed.append(self) + node.delayed = None + for to in self.provides(): + self.delegate_delay(to, delayed) + self.mark_emitted(node, state) + + def delegate_delay(self, node, delayed): + """ Chain up delays, this can reduce many more of the operations """ + if node.delayed is None: + node.delayed = delayed + else: + delayedlist = node.delayed + for d in delayed: + delayedlist.append(d) def delay(self, node): if node.pack: From pypy.commits at gmail.com Thu Sep 8 08:37:11 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 08 Sep 2016 05:37:11 -0700 (PDT) Subject: [pypy-commit] pypy ppc-vsx-support: operations now can be delayed (if they are pure), operation arguments can pull them just before the op itself is emitted Message-ID: <57d15b77.4a301c0a.60059.2cf1@mx.google.com> Author: Richard Plangger Branch: ppc-vsx-support Changeset: r86956:545565fe89bc Date: 2016-09-08 14:36 +0200 http://bitbucket.org/pypy/pypy/changeset/545565fe89bc/ Log: operations now can be delayed (if they are pure), operation arguments can pull them just before the op itself is emitted diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py --- a/rpython/jit/metainterp/optimizeopt/dependency.py +++ b/rpython/jit/metainterp/optimizeopt/dependency.py @@ -1042,29 +1042,35 @@ assert isinstance(other, IndexVar) return self.constant - other.constant + def get_operations(self): + var = self.var + tolist = [] + if self.coefficient_mul != 1: + args = [var, ConstInt(self.coefficient_mul)] + var = ResOperation(rop.INT_MUL, args) + tolist.append(var) + if self.coefficient_div != 1: + assert 0 # should never be the case with handling + # of INT_PY_DIV commented out in this file... + if self.constant > 0: + args = [var, ConstInt(self.constant)] + var = ResOperation(rop.INT_ADD, args) + tolist.append(var) + if self.constant < 0: + args = [var, ConstInt(self.constant)] + var = ResOperation(rop.INT_SUB, args) + tolist.append(var) + return tolist + def emit_operations(self, opt, result_box=None): var = self.var if self.is_identity(): return var - if self.coefficient_mul != 1: - args = [var, ConstInt(self.coefficient_mul)] - var = ResOperation(rop.INT_MUL, args) - opt.emit_operation(var) - if self.coefficient_div != 1: - assert 0 # XXX for now; should never be the case with handling - # of INT_PY_DIV commented out in this file... - #args = [var, ConstInt(self.coefficient_div)] - #var = ResOperation(rop.INT_FLOORDIV, args) - #opt.emit_operation(var) - if self.constant > 0: - args = [var, ConstInt(self.constant)] - var = ResOperation(rop.INT_ADD, args) - opt.emit_operation(var) - if self.constant < 0: - args = [var, ConstInt(self.constant)] - var = ResOperation(rop.INT_SUB, args) - opt.emit_operation(var) - return var + last = None + for op in self.get_operations(): + opt.emit_operation(op) + last = op + return last def compare(self, other): """ Returns if the two are compareable as a first result diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -36,9 +36,48 @@ self.invariant_oplist = [] self.invariant_vector_vars = [] self.seen = {} + self.delayed = [] + + def resolve_delayed(self, needs_resolving, delayed, op): + # recursive solving of all delayed objects + if not delayed: + return + args = op.getarglist() + for arg in args: + if arg.is_constant() or arg.is_inputarg(): + continue + if arg not in self.seen: + needs_resolving[arg] = None + indexvars = self.graph.index_vars + i = len(delayed)-1 + while i >= 0: + node = delayed[i] + op = node.getoperation() + if op in needs_resolving: + # either it is a normal operation, or we know that there is a linear combination + if op in indexvars: + indexvar = indexvars[op] + for operation in indexvar.get_operations(): + self.oplist.append(operation) + last = operation + self.renamer.start_renaming(op, last) + del needs_resolving[op] + else: + del needs_resolving[op] + self.resolve_delayed(needs_resolving, delayed, op) + self.oplist.append(op) + i -= 1 + def post_schedule(self): loop = self.graph.loop + # + if self.delayed: + # some operations can be delayed until the jump instruction, + # handle them here + self.resolve_delayed({}, self.delayed, loop.jump) + + # self.renamer.rename(loop.jump) self.ensure_args_unpacked(loop.jump) loop.operations = self.oplist @@ -65,7 +104,7 @@ if node.depends_count() == 0: self.worklist.insert(0, node) - def try_emit_or_delay(self, node, scheduler): + def try_emit_or_delay(self, node): # implement me in subclass. e.g. as in VecScheduleState raise NotImplementedError @@ -137,42 +176,6 @@ return True return node.depends_count() != 0 - def mark_emitted(self, node, state, unpack=True): - """ An operation has been emitted, adds new operations to the worklist - whenever their dependency count drops to zero. - Keeps worklist sorted (see priority) """ - worklist = state.worklist - provides = node.provides()[:] - for dep in provides: # COPY - target = dep.to - node.remove_edge_to(target) - if not target.emitted and target.depends_count() == 0: - # sorts them by priority - i = len(worklist)-1 - while i >= 0: - cur = worklist[i] - c = (cur.priority - target.priority) - if c < 0: # meaning itnode.priority < target.priority: - worklist.insert(i+1, target) - break - elif c == 0: - # if they have the same priority, sort them - # using the original position in the trace - if target.getindex() < cur.getindex(): - worklist.insert(i+1, target) - break - i -= 1 - else: - worklist.insert(0, target) - node.clear_dependencies() - node.emitted = True - if not node.is_imaginary(): - op = node.getoperation() - state.renamer.rename(op) - if unpack: - state.ensure_args_unpacked(op) - state.post_emit(node) - def walk_and_emit(self, state): """ Emit all the operations into the oplist parameter. Initiates the scheduling. """ @@ -180,7 +183,7 @@ while state.has_more(): node = self.next(state) if node: - state.try_emit_or_delay(node, scheduler) + state.try_emit_or_delay(node) continue # it happens that packs can emit many nodes that have been @@ -522,7 +525,7 @@ def post_emit(self, node): pass - def pre_emit(self, node): + def pre_emit(self, node, pack_first=True): op = node.getoperation() if op.is_guard(): # add accumulation info to the descriptor @@ -546,7 +549,21 @@ delayed = node.delayed if delayed: - import pdb; pdb.set_trace() + # there are some nodes that have been delayed just for this operation + if pack_first: + self.resolve_delayed({}, delayed, op) + for node in delayed: + if node is not None: + provides = node.provides() + if len(provides) == 0: + # add this node to the final delay list + # might be emitted before jumping! + self.delayed.append(node) + else: + for to in node.provides(): + tnode = to.target_node() + self.delegate_delay(tnode, [node]) + node.delayed = None def profitable(self): return self.costmodel.profitable() @@ -557,36 +574,41 @@ for arg in self.graph.loop.label.getarglist(): self.seen[arg] = None - def try_emit_or_delay(self, scheduler, state): + def try_emit_or_delay(self, node): # emission might be blocked by other nodes if this node has a pack! - if self.pack: + if node.pack: assert node.pack.numops() > 1 - for node in node.pack.operations: - self.pre_emit(node) - scheduler.mark_emitted(node, self, unpack=False) + for i,node in enumerate(node.pack.operations): + self.pre_emit(node, i==0) + self.mark_emitted(node, unpack=False) turn_into_vector(self, node.pack) return elif not node.emitted: if not node.is_imaginary() and node.is_pure(): # this operation might never be emitted. only if it is really needed - self.delay_emit(scheduler, node) + self.delay_emit(node) return # emit a now! - state.pre_emit(node) - self.mark_emitted(node, state) + self.pre_emit(node) + self.mark_emitted(node) if not node.is_imaginary(): op = node.getoperation() - state.seen[op] = None - state.oplist.append(op) + self.seen[op] = None + self.oplist.append(op) - def delay_emit(self, scheduler, node): + def delay_emit(self, node): """ it has been decided that the operation might be scheduled later """ delayed = node.delayed or [] - delayed.append(self) + delayed.append(node) node.delayed = None - for to in self.provides(): - self.delegate_delay(to, delayed) - self.mark_emitted(node, state) + provides = node.provides() + if len(provides) == 0: + self.delayed.append(node) + else: + for to in node.provides(): + tnode = to.target_node() + self.delegate_delay(tnode, delayed[:]) + self.mark_emitted(node) def delegate_delay(self, node, delayed): """ Chain up delays, this can reduce many more of the operations """ @@ -597,6 +619,42 @@ for d in delayed: delayedlist.append(d) + def mark_emitted(state, node, unpack=True): + """ An operation has been emitted, adds new operations to the worklist + whenever their dependency count drops to zero. + Keeps worklist sorted (see priority) """ + worklist = state.worklist + provides = node.provides()[:] + for dep in provides: # COPY + target = dep.to + node.remove_edge_to(target) + if not target.emitted and target.depends_count() == 0: + # sorts them by priority + i = len(worklist)-1 + while i >= 0: + cur = worklist[i] + c = (cur.priority - target.priority) + if c < 0: # meaning itnode.priority < target.priority: + worklist.insert(i+1, target) + break + elif c == 0: + # if they have the same priority, sort them + # using the original position in the trace + if target.getindex() < cur.getindex(): + worklist.insert(i+1, target) + break + i -= 1 + else: + worklist.insert(0, target) + node.clear_dependencies() + node.emitted = True + if not node.is_imaginary(): + op = node.getoperation() + state.renamer.rename(op) + if unpack: + state.ensure_args_unpacked(op) + state.post_emit(node) + def delay(self, node): if node.pack: pack = node.pack diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -1395,11 +1395,10 @@ i2 = int_add(i1,8) jump(p0,i2) """) - self.schedule(trace) + self.schedule(trace, unroll_factor=0) self.ensure_operations([ 'v0[2xf64] = vec_load_f(p0, i0, 8, 0, descr=floatarraydescr)', 'i2 = int_add(i0, 16)', - 'jump(p0,i2)', ], trace) class TestLLtype(BaseTestVectorize, LLtypeMixin): From pypy.commits at gmail.com Thu Sep 8 09:32:32 2016 From: pypy.commits at gmail.com (sbauman) Date: Thu, 08 Sep 2016 06:32:32 -0700 (PDT) Subject: [pypy-commit] pypy force-virtual-state: Remove more junk Message-ID: <57d16870.811f1c0a.8ba16.1593@mx.google.com> Author: Spenser Andrew Bauman Branch: force-virtual-state Changeset: r86957:2960c83b0fbf Date: 2016-09-08 09:31 -0400 http://bitbucket.org/pypy/pypy/changeset/2960c83b0fbf/ Log: Remove more junk diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4531,7 +4531,7 @@ _ptr = lltype.nullptr(rffi.CCHARP.TO) def new_int_buffer(value): - data = lltype.malloc(rffi.CCHARP.TO, noConst(rffi.sizeof(rffi.INT)), flavor='raw', zero=True) + data = lltype.malloc(rffi.CCHARP.TO, rffi.sizeof(rffi.INT), flavor='raw') rffi.cast(rffi.INTP, data)[0] = rffi.cast(rffi.INT, value) return data From pypy.commits at gmail.com Thu Sep 8 09:40:08 2016 From: pypy.commits at gmail.com (sbauman) Date: Thu, 08 Sep 2016 06:40:08 -0700 (PDT) Subject: [pypy-commit] pypy force-virtual-state: Merge Message-ID: <57d16a38.06a81c0a.1ca02.4c71@mx.google.com> Author: Spenser Andrew Bauman Branch: force-virtual-state Changeset: r86958:3a3067bcf4c9 Date: 2016-09-08 09:39 -0400 http://bitbucket.org/pypy/pypy/changeset/3a3067bcf4c9/ Log: Merge diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -30,3 +30,4 @@ 68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 68bb3510d8212ae9efb687e12e58c09d29e74f87 release-pypy2.7-v5.4.0 77392ad263504df011ccfcabf6a62e21d04086d0 release-pypy2.7-v5.4.0 +050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-pypy2.7-v5.4.1.rst release-pypy2.7-v5.4.0.rst release-pypy2.7-v5.3.1.rst release-pypy2.7-v5.3.0.rst diff --git a/pypy/doc/release-pypy2.7-v5.4.1.rst b/pypy/doc/release-pypy2.7-v5.4.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-pypy2.7-v5.4.1.rst @@ -0,0 +1,61 @@ +========== +PyPy 5.4.1 +========== + +We have released a bugfix for PyPy2.7-v5.4.0, released last week, +due to the following issues: + + * Update list of contributors in documentation and LICENSE file, + this was unfortunately left out of 5.4.0. My apoligies to the new + contributors + + * Allow tests run with `-A` to find `libm.so` even if it is a script not a + dynamically loadable file + + * Bump `sys.setrecursionlimit()` when translating PyPy, for translating with CPython + + * Tweak a float comparison with 0 in `backendopt.inline` to avoid rounding errors + + * Fix for an issue where os.access() accepted a float for mode + + * Fix for and issue where `unicode.decode('utf8', 'custom_replace')` messed up + the last byte of a unicode string sometimes + + * Update built-in cffi_ to the soon-to-be-released 1.8.1 version + + * Explicitly detect that we found as-yet-unsupported OpenSSL 1.1, and crash + translation with a message asking for help porting it + +Thanks to those who reported the issues. + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports: + + * **x86** machines on most common operating systems + (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), + + * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, + + * big- and little-endian variants of **PPC64** running Linux, + + * **s390x** running Linux + +.. _cffi: https://cffi.readthedocs.io +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,7 +1,7 @@ # Edit these appropriately before running this script maj=5 min=4 -rev=0 +rev=1 branchname=release-$maj.x # ==OR== release-$maj.$min.x tagname=release-pypy2.7-v$maj.$min.$rev # ==OR== release-$maj.$min From pypy.commits at gmail.com Thu Sep 8 10:39:00 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 08 Sep 2016 07:39:00 -0700 (PDT) Subject: [pypy-commit] pypy default: Align wrapped method compile_module() API with compile_module() function Message-ID: <57d17804.8f081c0a.57fa7.6c49@mx.google.com> Author: Ronan Lamy Branch: Changeset: r86959:af84bf7ba373 Date: 2016-09-08 15:37 +0100 http://bitbucket.org/pypy/pypy/changeset/af84bf7ba373/ Log: Align wrapped method compile_module() API with compile_module() function diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -307,27 +307,23 @@ def setup_method(self, func): @gateway.unwrap_spec(name=str) def compile_module(space, name, - w_separate_module_files=None, - w_separate_module_sources=None): + w_source_files=None, + w_source_strings=None): """ Build an extension module linked against the cpyext api library. """ - if not space.is_none(w_separate_module_files): - separate_module_files = space.listview_bytes( - w_separate_module_files) - assert separate_module_files is not None + if not space.is_none(w_source_files): + source_files = space.listview_bytes(w_source_files) else: - separate_module_files = [] - if not space.is_none(w_separate_module_sources): - separate_module_sources = space.listview_bytes( - w_separate_module_sources) - assert separate_module_sources is not None + source_files = None + if not space.is_none(w_source_strings): + source_strings = space.listview_bytes(w_source_strings) else: - separate_module_sources = [] + source_strings = None pydname = compile_extension_module( self.sys_info, name, - source_files=separate_module_files, - source_strings=separate_module_sources) + source_files=source_files, + source_strings=source_strings) return space.wrap(pydname) @gateway.unwrap_spec(name=str, init='str_or_None', body=str, @@ -638,10 +634,10 @@ skip('record_imported_module not supported in runappdirect mode') # Build the extensions. banana = self.compile_module( - "apple.banana", separate_module_files=[self.here + 'banana.c']) + "apple.banana", source_files=[self.here + 'banana.c']) self.record_imported_module("apple.banana") date = self.compile_module( - "cherry.date", separate_module_files=[self.here + 'date.c']) + "cherry.date", source_files=[self.here + 'date.c']) self.record_imported_module("cherry.date") # Set up some package state so that the extensions can actually be From pypy.commits at gmail.com Thu Sep 8 11:34:28 2016 From: pypy.commits at gmail.com (sbauman) Date: Thu, 08 Sep 2016 08:34:28 -0700 (PDT) Subject: [pypy-commit] pypy default: Merge in force-virtual-state Message-ID: <57d18504.8628c20a.f6b0b.4f04@mx.google.com> Author: Spenser Andrew Bauman Branch: Changeset: r86960:990479cf9d20 Date: 2016-09-08 11:33 -0400 http://bitbucket.org/pypy/pypy/changeset/990479cf9d20/ Log: Merge in force-virtual-state diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -365,6 +365,13 @@ def visitor_dispatch_virtual_type(self, visitor): raise NotImplementedError("abstract") + def make_guards(self, op, short, optimizer): + from rpython.jit.metainterp.optimizeopt.optimizer import CONST_0 + op = ResOperation(rop.INT_EQ, [op, CONST_0]) + short.append(op) + op = ResOperation(rop.GUARD_FALSE, [op]) + short.append(op) + class RawBufferPtrInfo(AbstractRawPtrInfo): buffer = None diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -16,7 +16,7 @@ from rpython.rlib.debug import debug_print, debug_start, debug_stop,\ have_debug_prints -class UnrollableOptimizer(Optimizer): +class UnrollableOptimizer(Optimizer): def force_op_from_preamble(self, preamble_op): if isinstance(preamble_op, PreambleOp): if self.optunroll.short_preamble_producer is None: @@ -120,7 +120,8 @@ assert op.get_forwarded() is None if check_newops: assert not self.optimizer._newoperations - + + def optimize_preamble(self, trace, runtime_boxes, call_pure_results, memo): info, newops = self.optimizer.propagate_all_forward( trace.get_iter(), call_pure_results, flush=False) @@ -156,7 +157,7 @@ current_vs = self.get_virtual_state(end_jump.getarglist()) # pick the vs we want to jump to assert isinstance(celltoken, JitCellToken) - + target_virtual_state = self.pick_virtual_state(current_vs, state.virtual_state, celltoken.target_tokens) @@ -180,17 +181,27 @@ self.jump_to_preamble(celltoken, end_jump, info) return (UnrollInfo(target_token, label_op, extra_same_as, self.optimizer.quasi_immutable_deps), - self.optimizer._newoperations) + self.optimizer._newoperations) try: - new_virtual_state = self.jump_to_existing_trace(end_jump, label_op, - state.runtime_boxes) + new_virtual_state = self.jump_to_existing_trace( + end_jump, label_op, state.runtime_boxes, force_boxes=False) except InvalidLoop: # inlining short preamble failed, jump to preamble self.jump_to_preamble(celltoken, end_jump, info) return (UnrollInfo(target_token, label_op, extra_same_as, self.optimizer.quasi_immutable_deps), self.optimizer._newoperations) + + if new_virtual_state is not None: + # Attempt to force virtual boxes in order to avoid jumping + # to the preamble. + try: + new_virtual_state = self.jump_to_existing_trace( + end_jump, label_op, state.runtime_boxes, force_boxes=True) + except InvalidLoop: + pass + if new_virtual_state is not None: self.jump_to_preamble(celltoken, end_jump, info) return (UnrollInfo(target_token, label_op, extra_same_as, @@ -199,7 +210,7 @@ self.disable_retracing_if_max_retrace_guards( self.optimizer._newoperations, target_token) - + return (UnrollInfo(target_token, label_op, extra_same_as, self.optimizer.quasi_immutable_deps), self.optimizer._newoperations) @@ -241,7 +252,8 @@ for a in jump_op.getarglist(): self.optimizer.force_box_for_end_of_preamble(a) try: - vs = self.jump_to_existing_trace(jump_op, None, runtime_boxes) + vs = self.jump_to_existing_trace(jump_op, None, runtime_boxes, + force_boxes=False) except InvalidLoop: return self.jump_to_preamble(cell_token, jump_op, info) if vs is None: @@ -252,6 +264,14 @@ cell_token.retraced_count += 1 debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit)) else: + # Try forcing boxes to avoid jumping to the preamble + try: + vs = self.jump_to_existing_trace(jump_op, None, runtime_boxes, + force_boxes=True) + except InvalidLoop: + pass + if vs is None: + return info, self.optimizer._newoperations[:] debug_print("Retrace count reached, jumping to preamble") return self.jump_to_preamble(cell_token, jump_op, info) exported_state = self.export_state(info.jump_op.getarglist(), @@ -288,7 +308,7 @@ return info, self.optimizer._newoperations[:] - def jump_to_existing_trace(self, jump_op, label_op, runtime_boxes): + def jump_to_existing_trace(self, jump_op, label_op, runtime_boxes, force_boxes=False): jitcelltoken = jump_op.getdescr() assert isinstance(jitcelltoken, JitCellToken) virtual_state = self.get_virtual_state(jump_op.getarglist()) @@ -299,7 +319,8 @@ continue try: extra_guards = target_virtual_state.generate_guards( - virtual_state, args, runtime_boxes, self.optimizer) + virtual_state, args, runtime_boxes, self.optimizer, + force_boxes=force_boxes) patchguardop = self.optimizer.patchguardop for guard in extra_guards.extra_guards: if isinstance(guard, GuardResOp): @@ -308,8 +329,18 @@ self.send_extra_operation(guard) except VirtualStatesCantMatch: continue - args, virtuals = target_virtual_state.make_inputargs_and_virtuals( - args, self.optimizer) + + # When force_boxes == True, creating the virtual args can fail when + # components of the virtual state alias. If this occurs, we must + # recompute the virtual state as boxes will have been forced. + try: + args, virtuals = target_virtual_state.make_inputargs_and_virtuals( + args, self.optimizer, force_boxes=force_boxes) + except VirtualStatesCantMatch: + assert force_boxes + virtual_state = self.get_virtual_state(args) + continue + short_preamble = target_token.short_preamble try: extra = self.inline_short_preamble(args + virtuals, args, @@ -452,7 +483,7 @@ # by short preamble label_args = exported_state.virtual_state.make_inputargs( targetargs, self.optimizer) - + self.short_preamble_producer = ShortPreambleBuilder( label_args, exported_state.short_boxes, exported_state.short_inputargs, exported_state.exported_infos, @@ -497,7 +528,7 @@ * runtime_boxes - runtime values for boxes, necessary when generating guards to jump to """ - + def __init__(self, end_args, next_iteration_args, virtual_state, exported_infos, short_boxes, renamed_inputargs, short_inputargs, runtime_boxes, memo): diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -4,7 +4,7 @@ ArrayStructInfo, AbstractStructPtrInfo from rpython.jit.metainterp.optimizeopt.intutils import \ MININT, MAXINT, IntBound, IntLowerBound -from rpython.jit.metainterp.resoperation import rop, ResOperation,\ +from rpython.jit.metainterp.resoperation import rop, ResOperation, \ InputArgInt, InputArgRef, InputArgFloat from rpython.rlib.debug import debug_print @@ -20,7 +20,7 @@ class GenerateGuardState(object): - def __init__(self, optimizer=None, guards=None, renum=None, bad=None): + def __init__(self, optimizer=None, guards=None, renum=None, bad=None, force_boxes=False): self.optimizer = optimizer self.cpu = optimizer.cpu if guards is None: @@ -32,6 +32,7 @@ if bad is None: bad = {} self.bad = bad + self.force_boxes = force_boxes def get_runtime_item(self, box, descr, i): array = box.getref_base() @@ -303,7 +304,7 @@ opinfo = state.optimizer.getptrinfo(box) assert isinstance(opinfo, ArrayPtrInfo) else: - opinfo = None + opinfo = None for i in range(self.length): for descr in self.fielddescrs: index = i * len(self.fielddescrs) + descr.get_index() @@ -514,6 +515,8 @@ NotVirtualStateInfo.__init__(self, cpu, type, info) def _generate_guards(self, other, box, runtime_box, state): + if state.force_boxes and isinstance(other, VirtualStateInfo): + return self._generate_virtual_guards(other, box, runtime_box, state) if not isinstance(other, NotVirtualStateInfoPtr): raise VirtualStatesCantMatch( 'The VirtualStates does not match as a ' + @@ -545,6 +548,23 @@ # to an existing compiled loop or retracing the loop. Both alternatives # will always generate correct behaviour, but performance will differ. + def _generate_virtual_guards(self, other, box, runtime_box, state): + """ + Generate the guards and add state information for unifying a virtual + object with a non-virtual. This involves forcing the object in the + event that unification can succeed. Since virtual objects cannot be null, + this method need only check that the virtual object has the expected type. + """ + assert state.force_boxes and isinstance(other, VirtualStateInfo) + + if self.level == LEVEL_CONSTANT: + raise VirtualStatesCantMatch( + "cannot unify a constant value with a virtual object") + + if self.level == LEVEL_KNOWNCLASS: + if not self.known_class.same_constant(other.known_class): + raise VirtualStatesCantMatch("classes don't match") + def _generate_guards_nonnull(self, other, box, runtime_box, extra_guards, state): if not isinstance(other, NotVirtualStateInfoPtr): @@ -617,10 +637,10 @@ return False return True - def generate_guards(self, other, boxes, runtime_boxes, optimizer): + def generate_guards(self, other, boxes, runtime_boxes, optimizer, force_boxes=False): assert (len(self.state) == len(other.state) == len(boxes) == len(runtime_boxes)) - state = GenerateGuardState(optimizer) + state = GenerateGuardState(optimizer, force_boxes=force_boxes) for i in range(len(self.state)): self.state[i].generate_guards(other.state[i], boxes[i], runtime_boxes[i], state) @@ -644,8 +664,8 @@ return boxes - def make_inputargs_and_virtuals(self, inputargs, optimizer): - inpargs = self.make_inputargs(inputargs, optimizer) + def make_inputargs_and_virtuals(self, inputargs, optimizer, force_boxes=False): + inpargs = self.make_inputargs(inputargs, optimizer, force_boxes) # we append the virtuals here in case some stuff is proven # to be not a virtual and there are getfields in the short preamble # that will read items out of there @@ -653,7 +673,7 @@ for i in range(len(inputargs)): if not isinstance(self.state[i], NotVirtualStateInfo): virtuals.append(inputargs[i]) - + return inpargs, virtuals def debug_print(self, hdr='', bad=None, metainterp_sd=None): diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4507,3 +4507,54 @@ i += 1 return i self.meta_interp(f, []) + + def test_round_trip_raw_pointer(self): + # The goal of this test to to get a raw pointer op into the short preamble + # so we can check that the proper guards are generated + # In this case, the resulting short preamble contains + # + # i1 = getfield_gc_i(p0, descr=inst__ptr) + # i2 = int_eq(i1, 0) + # guard_false(i2) + # + # as opposed to what the JIT used to produce + # + # i1 = getfield_gc_i(p0, descr=inst__ptr) + # guard_nonnull(i1) + # + # Which will probably generate correct assembly, but the optimization + # pipline expects guard_nonnull arguments to be pointer ops and may crash + # and may crash on other input types. + driver = JitDriver(greens=[], reds=['i', 'val']) + + class Box(object): + _ptr = lltype.nullptr(rffi.CCHARP.TO) + + def new_int_buffer(value): + data = lltype.malloc(rffi.CCHARP.TO, rffi.sizeof(rffi.INT), flavor='raw') + rffi.cast(rffi.INTP, data)[0] = rffi.cast(rffi.INT, value) + return data + + def read_int_buffer(buf): + return rffi.cast(rffi.INTP, buf)[0] + + def f(): + i = 0 + val = Box() + val._ptr = new_int_buffer(1) + + set_param(None, 'retrace_limit', -1) + while i < 100: + driver.jit_merge_point(i=i, val=val) + driver.can_enter_jit(i=i, val=val) + # Just to produce a side exit + if i & 0b100: + i += 1 + i += int(read_int_buffer(val._ptr)) + lltype.free(val._ptr, flavor='raw') + val._ptr = new_int_buffer(1) + lltype.free(val._ptr, flavor='raw') + + self.meta_interp(f, []) + self.check_resops(guard_nonnull=0) + diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -1,8 +1,8 @@ import py -from rpython.rlib.jit import JitDriver, promote, dont_look_inside +from rpython.rlib.jit import JitDriver, promote, dont_look_inside, set_param from rpython.rlib.objectmodel import compute_unique_id from rpython.jit.codewriter.policy import StopAtXPolicy -from rpython.jit.metainterp.test.support import LLJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin, get_stats from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper import rclass from rpython.rtyper.lltypesystem.lloperation import llop @@ -965,6 +965,82 @@ self.check_aborted_count(0) self.check_target_token_count(4) + def test_avoid_preamble(self): + driver = JitDriver(greens=[], reds=['i', 'val']) + class X(object): + def __init__(self, v): + self.v = v + + class Box(object): + def __init__(self, v): + self.unbox = v + + mask = -2 + const = Box(X(5)) + def f(): + # Prevent all retracing of side exits. Ensures that the unroll + # optimizer will attempt to jump to either the preamble or loop. + set_param(driver, 'retrace_limit', -1) + set_param(driver, 'threshold', 1) + val = X(0) + i = 0 + const.unbox = X(5) + while i < 17: + driver.can_enter_jit(i=i, val=val) + driver.jit_merge_point(i=i, val=val) + # Logical & rather than comparison to confuse range analysis. + # Test only succeeds on the first 2 iterations + if i & -2 == 0: + val = const.unbox + else: + val = X(i) + i += 1 + return 0 + + self.meta_interp(f, []) + + # With retracing disable, there will be one optimized loop expecting a + # non-virtual X object. The side exit creates a virtual object which must + # be allocated to jump to the optimized trace. + self.check_resops(jump=3, label=2, new_with_vtable=2) + self.check_target_token_count(2) + self.check_trace_count(3) + + def test_conflated_virtual_states(self): + # All cases are covered when forcing one component of the virtual state + # also forces an as yet unseen component. + # i.e. expect [NotVirtual, Virtual] and given a pair of aliasing virtual + # objects + driver = JitDriver(greens=[], reds=['i', 'v1', 'v2']) + class Box(object): + def __init__(self, v): + self.v = v + + class X(object): + def __init__(self, v): + self.v = v + + const = Box(X(0)) + def f(): + set_param(None, 'retrace_limit', -1) + set_param(None, 'threshold', 1) + i = 0 + v1 = X(0) + v2 = X(0) + const.v = X(0) + while i < 17: + driver.jit_merge_point(i=i, v1=v1, v2=v2) + driver.can_enter_jit(i=i, v1=v1, v2=v2) + if i & 1 == 0: + v1 = const.v + v2 = X(i) + else: + v1 = v2 = X(i) + i += 1 + return None + self.meta_interp(f, []) + # assert did not crash + class VirtualMiscTests: def test_multiple_equal_virtuals(self): From pypy.commits at gmail.com Thu Sep 8 11:34:49 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 08 Sep 2016 08:34:49 -0700 (PDT) Subject: [pypy-commit] pypy test-cpyext: Make compile_extension_module() a method of sys_info Message-ID: <57d18519.436ec20a.434cf.46bb@mx.google.com> Author: Ronan Lamy Branch: test-cpyext Changeset: r86961:395605edacba Date: 2016-09-08 16:29 +0100 http://bitbucket.org/pypy/pypy/changeset/395605edacba/ Log: Make compile_extension_module() a method of sys_info diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -66,6 +66,30 @@ self.extra_libs = extra_libs self.ext = ext + def compile_extension_module(self, modname, include_dirs=[], + source_files=None, source_strings=None): + """ + Build an extension module and return the filename of the resulting native + code file. + + modname is the name of the module, possibly including dots if it is a module + inside a package. + + Any extra keyword arguments are passed on to ExternalCompilationInfo to + build the module (so specify your source with one of those). + """ + modname = modname.split('.')[-1] + soname = create_so(modname, + include_dirs=self.include_extra + include_dirs, + source_files=source_files, + source_strings=source_strings, + compile_extra=self.compile_extra, + link_extra=self.link_extra, + libraries=self.extra_libs) + pydname = soname.new(purebasename=modname, ext=self.ext) + soname.rename(pydname) + return str(pydname) + def get_cpyext_info(space): from pypy.module.imp.importing import get_so_extension state = space.fromcache(State) @@ -94,30 +118,6 @@ ext=get_so_extension(space)) -def compile_extension_module(sys_info, modname, include_dirs=[], - source_files=None, source_strings=None): - """ - Build an extension module and return the filename of the resulting native - code file. - - modname is the name of the module, possibly including dots if it is a module - inside a package. - - Any extra keyword arguments are passed on to ExternalCompilationInfo to - build the module (so specify your source with one of those). - """ - modname = modname.split('.')[-1] - soname = create_so(modname, - include_dirs=sys_info.include_extra + include_dirs, - source_files=source_files, - source_strings=source_strings, - compile_extra=sys_info.compile_extra, - link_extra=sys_info.link_extra, - libraries=sys_info.extra_libs) - pydname = soname.new(purebasename=modname, ext=sys_info.ext) - soname.rename(pydname) - return str(pydname) - def get_so_suffix(): from imp import get_suffixes, C_EXTENSION for suffix, mode, typ in get_suffixes(): @@ -320,8 +320,8 @@ source_strings = space.listview_bytes(w_source_strings) else: source_strings = None - pydname = compile_extension_module( - self.sys_info, name, + pydname = self.sys_info.compile_extension_module( + name, source_files=source_files, source_strings=source_strings) return space.wrap(pydname) @@ -376,8 +376,8 @@ filename = py.path.local(pypydir) / 'module' \ / 'cpyext'/ 'test' / (filename + ".c") kwds = dict(source_files=[filename]) - mod = compile_extension_module(self.sys_info, name, - include_dirs=include_dirs, **kwds) + mod = self.sys_info.compile_extension_module( + name, include_dirs=include_dirs, **kwds) if load_it: if self.runappdirect: @@ -469,11 +469,12 @@ def wrap(func): return func self.sys_info = get_sys_info_app() + self.compile_module = self.sys_info.compile_extension_module else: interp2app = gateway.interp2app wrap = self.space.wrap self.sys_info = get_cpyext_info(self.space) - self.w_compile_module = wrap(interp2app(compile_module)) + self.w_compile_module = wrap(interp2app(compile_module)) self.w_import_module = wrap(interp2app(import_module)) self.w_reimport_module = wrap(interp2app(reimport_module)) self.w_import_extension = wrap(interp2app(import_extension)) From pypy.commits at gmail.com Thu Sep 8 11:48:16 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 08 Sep 2016 08:48:16 -0700 (PDT) Subject: [pypy-commit] pypy test-cpyext: Inline create_so() Message-ID: <57d18840.811f1c0a.8ba16.55a3@mx.google.com> Author: Ronan Lamy Branch: test-cpyext Changeset: r86962:0ab65e47f082 Date: 2016-09-08 16:47 +0100 http://bitbucket.org/pypy/pypy/changeset/0ab65e47f082/ Log: Inline create_so() diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -40,19 +40,6 @@ files.append(filename) return files -def create_so(modname, include_dirs, source_strings=None, source_files=None, - compile_extra=None, link_extra=None, libraries=None): - dirname = (udir/uniquemodulename('module')).ensure(dir=1) - if source_strings: - assert not source_files - files = convert_sources_to_files(source_strings, dirname) - source_files = files - soname = c_compile(source_files, outputfilename=str(dirname/modname), - compile_extra=compile_extra, link_extra=link_extra, - include_dirs=include_dirs, - libraries=libraries) - return soname - class SystemCompilationInfo(object): """Bundles all the generic information required to compile extensions. @@ -69,22 +56,25 @@ def compile_extension_module(self, modname, include_dirs=[], source_files=None, source_strings=None): """ - Build an extension module and return the filename of the resulting native - code file. + Build an extension module and return the filename of the resulting + native code file. - modname is the name of the module, possibly including dots if it is a module - inside a package. + modname is the name of the module, possibly including dots if it is a + module inside a package. Any extra keyword arguments are passed on to ExternalCompilationInfo to build the module (so specify your source with one of those). """ modname = modname.split('.')[-1] - soname = create_so(modname, - include_dirs=self.include_extra + include_dirs, - source_files=source_files, - source_strings=source_strings, + dirname = (udir/uniquemodulename('module')).ensure(dir=1) + if source_strings: + assert not source_files + files = convert_sources_to_files(source_strings, dirname) + source_files = files + soname = c_compile(source_files, outputfilename=str(dirname/modname), compile_extra=self.compile_extra, link_extra=self.link_extra, + include_dirs=self.include_extra + include_dirs, libraries=self.extra_libs) pydname = soname.new(purebasename=modname, ext=self.ext) soname.rename(pydname) From pypy.commits at gmail.com Thu Sep 8 12:56:30 2016 From: pypy.commits at gmail.com (sbauman) Date: Thu, 08 Sep 2016 09:56:30 -0700 (PDT) Subject: [pypy-commit] pypy default: Update whatsnew-head for the force-virtual-state Message-ID: <57d1983e.a717c20a.dd7b5.817b@mx.google.com> Author: Spenser Andrew Bauman Branch: Changeset: r86963:b21c171b5632 Date: 2016-09-08 12:55 -0400 http://bitbucket.org/pypy/pypy/changeset/b21c171b5632/ Log: Update whatsnew-head for the force-virtual-state diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -12,4 +12,7 @@ Implement PyObject_GetBuffer, PyMemoryView_GET_BUFFER, and handles memoryviews in numpypy - +.. branch: force-virtual-state +Improve merging of virtual states in the JIT in order to avoid jumping to the +preamble. Accomplished by allocating virtual objects where non-virtuals are +expected. From pypy.commits at gmail.com Thu Sep 8 13:16:25 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 08 Sep 2016 10:16:25 -0700 (PDT) Subject: [pypy-commit] pypy test-cpyext: Simplify import_extension() by extracting make_methods() Message-ID: <57d19ce9.4cc51c0a.85fe4.eb6a@mx.google.com> Author: Ronan Lamy Branch: test-cpyext Changeset: r86964:8ed4d2de2eed Date: 2016-09-08 17:57 +0100 http://bitbucket.org/pypy/pypy/changeset/8ed4d2de2eed/ Log: Simplify import_extension() by extracting make_methods() diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -136,6 +136,28 @@ link_extra=link_extra, ext=get_so_suffix()) +def make_methods(functions, modname): + methods_table = [] + codes = [] + for funcname, flags, code in functions: + cfuncname = "%s_%s" % (modname, funcname) + methods_table.append( + "{\"%s\", %s, %s}," % (funcname, cfuncname, flags)) + func_code = """ + static PyObject* %s(PyObject* self, PyObject* args) + { + %s + } + """ % (cfuncname, code) + codes.append(func_code) + + body = "\n".join(codes) + """ + static PyMethodDef methods[] = { + %s + { NULL } + }; + """ % ('\n'.join(methods_table),) + return body def freeze_refcnts(self): rawrefcount._dont_free_any_more() @@ -402,26 +424,7 @@ def import_extension(space, modname, w_functions, prologue="", w_include_dirs=None, more_init="", PY_SSIZE_T_CLEAN=False): functions = space.unwrap(w_functions) - methods_table = [] - codes = [] - for funcname, flags, code in functions: - cfuncname = "%s_%s" % (modname, funcname) - methods_table.append("{\"%s\", %s, %s}," % - (funcname, cfuncname, flags)) - func_code = """ - static PyObject* %s(PyObject* self, PyObject* args) - { - %s - } - """ % (cfuncname, code) - codes.append(func_code) - - body = prologue + "\n".join(codes) + """ - static PyMethodDef methods[] = { - %s - { NULL } - }; - """ % ('\n'.join(methods_table),) + body = prologue + make_methods(functions, modname) init = """Py_InitModule("%s", methods);""" % (modname,) if more_init: init += more_init From pypy.commits at gmail.com Thu Sep 8 13:16:27 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 08 Sep 2016 10:16:27 -0700 (PDT) Subject: [pypy-commit] pypy test-cpyext: Simplify load_it=False logic in import_module() Message-ID: <57d19ceb.c70a1c0a.5ad4f.a291@mx.google.com> Author: Ronan Lamy Branch: test-cpyext Changeset: r86965:2224e01830ae Date: 2016-09-08 18:15 +0100 http://bitbucket.org/pypy/pypy/changeset/2224e01830ae/ Log: Simplify load_it=False logic in import_module() diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -178,6 +178,7 @@ return arg listview = passthrough str_w = passthrough + wrap = passthrough def unwrap(self, args): try: @@ -391,22 +392,18 @@ mod = self.sys_info.compile_extension_module( name, include_dirs=include_dirs, **kwds) - if load_it: - if self.runappdirect: - import imp - return imp.load_dynamic(name, mod) - else: - api.load_extension_module(space, mod, name) - self.imported_module_names.append(name) - return space.getitem( - space.sys.get('modules'), - space.wrap(name)) + if not load_it: + return space.wrap(mod) + if self.runappdirect: + import imp + return imp.load_dynamic(name, mod) else: - path = os.path.dirname(mod) - if self.runappdirect: - return path - else: - return space.wrap(path) + api.load_extension_module(space, mod, name) + self.imported_module_names.append(name) + return space.getitem( + space.sys.get('modules'), + space.wrap(name)) + @gateway.unwrap_spec(mod=str, name=str) def reimport_module(space, mod, name): diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -1,6 +1,6 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem import rffi class TestImport(BaseApiTest): def test_import(self, space, api): @@ -39,9 +39,8 @@ class AppTestImportLogic(AppTestCpythonExtensionBase): def test_import_logic(self): + import sys, os path = self.import_module(name='test_import_module', load_it=False) - import sys - sys.path.append(path) + sys.path.append(os.path.dirname(path)) import test_import_module assert test_import_module.TEST is None - From pypy.commits at gmail.com Thu Sep 8 16:37:36 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 08 Sep 2016 13:37:36 -0700 (PDT) Subject: [pypy-commit] pypy reverse-debugger: Remove the runtime part, move it to http://bitbucket.org/pypy/revdb/ Message-ID: <57d1cc10.a4fdc20a.adb8f.6e06@mx.google.com> Author: Armin Rigo Branch: reverse-debugger Changeset: r86966:9f8dd66da2a9 Date: 2016-09-08 18:01 +0200 http://bitbucket.org/pypy/pypy/changeset/9f8dd66da2a9/ Log: Remove the runtime part, move it to http://bitbucket.org/pypy/revdb/ diff --git a/rpython/translator/revdb/ancillary.py b/rpython/translator/revdb/ancillary.py deleted file mode 100644 --- a/rpython/translator/revdb/ancillary.py +++ /dev/null @@ -1,57 +0,0 @@ -import py -import os, sys - - -def build(tmpdir): - import cffi - ffibuilder = cffi.FFI() - - ffibuilder.cdef(""" - int ancil_send_fds(int, const int *, unsigned); - int ancil_recv_fds(int, int *, unsigned); - """) - - local_dir = os.path.dirname(os.path.abspath(__file__)) - src_dir = os.path.join(local_dir, 'src-revdb') - - ffibuilder.set_source("_ancillary_cffi", """ - #include - """, include_dirs=[src_dir], - sources=[os.path.join(src_dir, 'fd_send.c'), - os.path.join(src_dir, 'fd_recv.c')]) - - ffibuilder.compile(tmpdir=tmpdir, verbose=True) - -def import_(verbose=False): - import rpython - basedir = py.path.local(rpython.__file__).dirpath() - tmpdir = str(basedir.ensure('_cache', 'ancillary', dir=1)) - if verbose: - print tmpdir - old_sys_path = sys.path[:] - sys.path.insert(0, tmpdir) - try: - import _ancillary_cffi - except ImportError: - build(tmpdir) - import _ancillary_cffi - sys.path[:] = old_sys_path - return _ancillary_cffi.ffi, _ancillary_cffi.lib - - -def send_fds(pipe_num, fd_list): - ffi, lib = import_() - if lib.ancil_send_fds(pipe_num, fd_list, len(fd_list)) < 0: - raise OSError(ffi.errno, "ancil_send_fds() failed") - -def recv_fds(pipe_num, fd_count): - ffi, lib = import_() - p = ffi.new("int[]", fd_count) - result = lib.ancil_recv_fds(pipe_num, p, fd_count) - if result < 0: - raise OSError(ffi.errno, "ancil_recv_fds() failed") - return [p[i] for i in xrange(result)] - - -if __name__ == '__main__': - import_(verbose=True) diff --git a/rpython/translator/revdb/interact.py b/rpython/translator/revdb/interact.py deleted file mode 100644 --- a/rpython/translator/revdb/interact.py +++ /dev/null @@ -1,492 +0,0 @@ -import sys, os, re -import subprocess, socket -import traceback, linecache -from contextlib import contextmanager -try: - import readline -except ImportError: - pass - -from rpython.translator.revdb.process import ReplayProcessGroup -from rpython.translator.revdb.process import Breakpoint - -r_cmdline = re.compile(r"([a-zA-Z0-9_]\S*|.)\s*(.*)") -r_dollar_num = re.compile(r"\$(\d+)\b") - - -class RevDebugControl(object): - - def __init__(self, revdb_log_filename, executable=None, - pygments_background=None): - with open(revdb_log_filename, 'rb') as f: - header = f.readline() - assert header.endswith('\n') - fields = header[:-1].split('\t') - if len(fields) < 2 or fields[0] != 'RevDB:': - raise ValueError("file %r is not a RevDB log" % ( - revdb_log_filename,)) - if executable is None: - executable = fields[1] - if not os.path.isfile(executable): - raise ValueError("executable %r not found" % (executable,)) - linecacheoutput = self.getlinecacheoutput(pygments_background) - self.pgroup = ReplayProcessGroup(executable, revdb_log_filename, - linecacheoutput) - self.print_extra_pending_info = None - - def interact(self): - self.last_command = 'help' - self.previous_time = None - self.previous_thread = 0 - while True: - prompt = self.print_lines_before_prompt() - try: - while True: - cmdline = self.display_prompt(prompt) - self.run_command(cmdline) - prompt = self.print_lines_before_prompt() - except KeyboardInterrupt: - rtime = self.previous_time or 1 - print - print 'KeyboardInterrupt: restoring state at time %d...' % ( - rtime,) - self.pgroup.recreate_subprocess(rtime) - print "(type 'q' or Ctrl-D to quit)" - self.last_command = '' - self.previous_thread = '?' - self.previous_time = '?' - - def print_lines_before_prompt(self): - last_time = self.pgroup.get_current_time() - if last_time != self.previous_time: - print - if self.pgroup.get_current_thread() != self.previous_thread: - self.previous_thread = self.pgroup.get_current_thread() - if self.previous_thread == 0: - print ('-------------------- in main thread #0 ' - '--------------------') - else: - print ('-------------------- in non-main thread ' - '#%d --------------------' % (self.previous_thread,)) - self.pgroup.update_watch_values() - last_time = self.pgroup.get_current_time() - if self.print_extra_pending_info: - print self.print_extra_pending_info - self.print_extra_pending_info = None - if last_time != self.previous_time: - self.pgroup.show_backtrace(complete=0) - self.previous_time = last_time - prompt = '(%d)$ ' % last_time - return prompt - - def display_prompt(self, prompt): - try: - cmdline = raw_input(prompt).strip() - except EOFError: - print - cmdline = 'quit' - if not cmdline: - cmdline = self.last_command - return cmdline - - def run_command(self, cmdline): - match = r_cmdline.match(cmdline) - if not match: - return - self.last_command = cmdline - command, argument = match.groups() - try: - runner = getattr(self, 'command_' + command) - except AttributeError: - print >> sys.stderr, "no command '%s', try 'help'" % (command,) - else: - try: - runner(argument) - except KeyboardInterrupt: - raise - except Exception as e: - traceback.print_exc() - print >> sys.stderr - print >> sys.stderr, 'Something went wrong. You are now', - print >> sys.stderr, 'in a pdb; press Ctrl-D to continue.' - import pdb; pdb.post_mortem(sys.exc_info()[2]) - print >> sys.stderr - print >> sys.stderr, 'You are back running %s.' % ( - sys.argv[0],) - - def command_help(self, argument): - """Display commands summary""" - print 'Available commands:' - lst = dir(self) - commands = [(name[len('command_'):], getattr(self, name)) - for name in lst - if name.startswith('command_')] - seen = {} - for name, func in commands: - seen.setdefault(func, []).append(name) - for _, func in commands: - if func in seen: - names = seen.pop(func) - names.sort(key=len, reverse=True) - docstring = func.__doc__ or 'undocumented' - print '\t%-16s %s' % (', '.join(names), docstring) - - def command_quit(self, argument): - """Exit the debugger""" - self.pgroup.close() - sys.exit(0) - command_q = command_quit - - def command_go(self, argument): - """Jump to time ARG""" - arg = int(argument or self.pgroup.get_current_time()) - self.pgroup.jump_in_time(arg) - - def command_info(self, argument): - """Display various info ('info help' for more)""" - display = getattr(self, 'cmd_info_' + argument, self.cmd_info_help) - return display() - - def cmd_info_help(self): - """Display info topics summary""" - print 'Available info topics:' - for name in dir(self): - if name.startswith('cmd_info_'): - command = name[len('cmd_info_'):] - docstring = getattr(self, name).__doc__ or 'undocumented' - print '\tinfo %-12s %s' % (command, docstring) - - def cmd_info_paused(self): - """List current paused subprocesses""" - lst = [str(n) for n in sorted(self.pgroup.paused)] - print ', '.join(lst) - - def _bp_kind(self, num): - break_at = self.pgroup.all_breakpoints.num2break.get(num, '??') - if break_at[0] == 'B': - kind = 'breakpoint' - name = break_at[4:] - elif break_at[0] == 'W': - kind = 'watchpoint' - name = self.pgroup.all_breakpoints.sources.get(num, '??') - elif num == -3: - kind = 'stoppoint' - name = 'explicit stop' - elif num == -4: - kind = 'switchpoint' - name = 'thread switch' - else: - kind = '?????point' - name = repr(break_at) - return kind, name - - def _bp_new(self, source_expr, break_code, break_at, nids=None): - b = self.pgroup.edit_breakpoints() - new = 1 - while new in b.num2break: - new += 1 - b.set_num2break(new, break_code, break_at) - b.sources[new] = source_expr - if break_code == 'W': - b.watchvalues[new] = '' - if nids: - b.watchuids[new] = self.pgroup.nids_to_uids(nids) - return new - - def cmd_info_breakpoints(self): - """List current breakpoints and watchpoints""" - lst = self.pgroup.all_breakpoints.num2break.keys() - if lst: - for num in sorted(lst): - kind, name = self._bp_kind(num) - print '\t%s %d: %s' % (kind, num, name) - else: - print 'no breakpoints/watchpoints.' - cmd_info_watchpoints = cmd_info_breakpoints - - def move_forward(self, steps): - self.remove_tainting() - try: - self.pgroup.go_forward(steps) - return None - except Breakpoint as b: - self.hit_breakpoints(b) - return b - - def move_backward(self, steps): - try: - self.pgroup.go_backward(steps) - return None - except Breakpoint as b: - self.hit_breakpoints(b, backward=True) - return b - - def hit_breakpoints(self, b, backward=False): - printing = [] - for num in b.regular_breakpoint_nums(): - kind, name = self._bp_kind(num) - printing.append('%s %s%s: %s' % ( - 'Reverse-hit' if backward else 'Hit', - kind, - '' if kind == 'stoppoint' else ' %d' % (num,), - name)) - self.print_extra_pending_info = '\n'.join(printing) - if self.pgroup.get_current_time() != b.time: - target_time = b.time - if backward and any(self._bp_kind(num)[0] == 'watchpoint' - for num in b.regular_breakpoint_nums()): - target_time += 1 - self.pgroup.jump_in_time(target_time) - - def remove_tainting(self): - if self.pgroup.is_tainted(): - self.pgroup.jump_in_time(self.pgroup.get_current_time()) - assert not self.pgroup.is_tainted() - - def command_step(self, argument): - """Run forward ARG steps (default 1)""" - arg = int(argument or '1') - self.move_forward(arg) - command_s = command_step - - def command_bstep(self, argument): - """Run backward ARG steps (default 1)""" - arg = int(argument or '1') - self.move_backward(arg) - command_bs = command_bstep - - @contextmanager - def _stack_id_break(self, stack_id): - # add temporarily a breakpoint that hits when we enter/leave - # a frame from/to the frame identified by 'stack_id' - b = self.pgroup.edit_breakpoints() - b.stack_id = stack_id - try: - yield - finally: - b.stack_id = 0 - - @contextmanager - def _thread_num_break(self, thread_num): - # add temporarily a breakpoint that hits when we enter/leave - # the given thread - b = self.pgroup.edit_breakpoints() - b.thread_num = thread_num - try: - yield - finally: - b.thread_num = -1 - - def command_next(self, argument): - """Run forward for one step, skipping calls""" - while True: - stack_id = self.pgroup.get_stack_id(is_parent=False) - with self._stack_id_break(stack_id): - b = self.move_forward(1) - while b is not None: - # if we hit a regular breakpoint, stop - if any(b.regular_breakpoint_nums()): - return - # we hit only calls and returns inside stack_id. If the - # last one of these is a "return", then we're now back inside - # stack_id, so stop - if b.nums[-1] == -2: - break - # else, the last one is a "call", so we entered another frame. - # Continue running until the next call/return event occurs - # inside stack_id - with self._stack_id_break(stack_id): - b = self.move_forward(self.pgroup.get_max_time() - - self.pgroup.get_current_time()) - # and then look at that 'b' again (closes the loop) - - # we might be at a "<<" position on the same line as before, - # which returns a get_hiddenpos_level() value of 1. Continue - # until we reach a get_hiddenpos_level() value of 0. - if b is None or self.pgroup.get_hiddenpos_level() == 0: - break - command_n = command_next - - def command_bnext(self, argument): - """Run backward for one step, skipping calls""" - while True: - stack_id = self.pgroup.get_stack_id(is_parent=False) - with self._stack_id_break(stack_id): - b = self.move_backward(1) - while b is not None: - # if we hit a regular breakpoint, stop - if any(b.regular_breakpoint_nums()): - return - # we hit only calls and returns inside stack_id. If the - # first one of these is a "call", then we're now back inside - # stack_id, so stop - if b.nums[0] == -1: - break - # else, the first one is a "return", so before, we were - # inside a different frame. Continue running until the next - # call/return event occurs inside stack_id - with self._stack_id_break(stack_id): - b = self.move_backward(self.pgroup.get_current_time() - 1) - # and then look at that 'b' again (closes the loop) - - # we might be at a "<<" position on the same line as before, - # which returns a get_hiddenpos_level() value of 1. Continue - # until we reach a get_hiddenpos_level() value of 0. - if self.pgroup.get_hiddenpos_level() == 0: - break - command_bn = command_bnext - - def command_finish(self, argument): - """Run forward until the current function finishes""" - stack_id = self.pgroup.get_stack_id(is_parent=True) - if stack_id == 0: - print 'No caller.' - else: - with self._stack_id_break(stack_id): - self.command_continue('') - - def command_bfinish(self, argument): - """Run backward until the current function is called""" - stack_id = self.pgroup.get_stack_id(is_parent=True) - if stack_id == 0: - print 'No caller.' - else: - with self._stack_id_break(stack_id): - self.command_bcontinue('') - - def command_continue(self, argument): - """Run forward""" - self.move_forward(self.pgroup.get_max_time() - - self.pgroup.get_current_time()) - command_c = command_continue - - def command_bcontinue(self, argument): - """Run backward""" - self.move_backward(self.pgroup.get_current_time() - 1) - command_bc = command_bcontinue - - def _cmd_thread(self, argument, cmd_continue): - argument = argument.lstrip('#') - if argument: - arg = int(argument) - if arg == self.pgroup.get_current_thread(): - print 'Thread #%d is already the current one.' % (arg,) - return - else: - # use the current thread number to detect switches to any - # other thread (this works because revdb.c issues a - # breakpoint whenever there is a switch FROM or TO the - # thread '#arg'). - arg = self.pgroup.get_current_thread() - # - with self._thread_num_break(arg): - cmd_continue('') - - def command_nthread(self, argument): - """Run forward until thread switch (optionally to #ARG)""" - self._cmd_thread(argument, self.command_continue) - - def command_bthread(self, argument): - """Run backward until thread switch (optionally to #ARG)""" - self._cmd_thread(argument, self.command_bcontinue) - - def command_print(self, argument): - """Print an expression or execute a line of code""" - # locate which $NUM appear used in the expression - nids = map(int, r_dollar_num.findall(argument)) - self.pgroup.print_cmd(argument, nids=nids) - command_p = command_print - locals()['command_!'] = command_print - - def command_backtrace(self, argument): - """Show the backtrace""" - self.pgroup.show_backtrace(complete=1) - command_bt = command_backtrace - - def command_list(self, argument): - """Show the current function""" - self.pgroup.show_backtrace(complete=2) - - def command_locals(self, argument): - """Show the locals""" - self.pgroup.show_locals() - - def command_break(self, argument): - """Add a breakpoint""" - if not argument: - print "Break where?" - return - num = self._bp_new(argument, 'B', argument) - self.pgroup.update_breakpoints() - b = self.pgroup.edit_breakpoints() - if num not in b.num2break: - print "Breakpoint not added" - else: - kind, name = self._bp_kind(num) - print "Breakpoint %d added: %s" % (num, name) - command_b = command_break - - def command_delete(self, argument): - """Delete a breakpoint/watchpoint""" - b = self.pgroup.edit_breakpoints() - try: - arg = int(argument) - except ValueError: - for arg in b.num2break: - if self._bp_kind(arg)[1] == argument: - break - else: - print "No such breakpoint/watchpoint: %s" % (argument,) - return - if arg not in b.num2break: - print "No breakpoint/watchpoint number %d" % (arg,) - else: - kind, name = self._bp_kind(arg) - b.num2break.pop(arg, '') - b.sources.pop(arg, '') - b.watchvalues.pop(arg, '') - b.watchuids.pop(arg, '') - print "%s %d deleted: %s" % (kind.capitalize(), arg, name) - command_del = command_delete - - def command_watch(self, argument): - """Add a watchpoint (use $NUM in the expression to watch)""" - if not argument: - print "Watch what?" - return - # - ok_flag, compiled_code = self.pgroup.compile_watchpoint_expr(argument) - if not ok_flag: - print compiled_code # the error message - print 'Watchpoint not added' - return - # - nids = map(int, r_dollar_num.findall(argument)) - ok_flag, text = self.pgroup.check_watchpoint_expr(compiled_code, nids) - if not ok_flag: - print text - print 'Watchpoint not added' - return - # - new = self._bp_new(argument, 'W', compiled_code, nids=nids) - self.pgroup.update_watch_values() - print "Watchpoint %d added" % (new,) - - def getlinecacheoutput(self, pygments_background): - if not pygments_background or pygments_background == 'off': - return - try: - from pygments import highlight - from pygments.lexers import PythonLexer - from pygments.formatters import TerminalFormatter - except ImportError as e: - print >> sys.stderr, 'ImportError: %s' % (e,) - return None - # - lexer = PythonLexer() - fmt = TerminalFormatter(bg=pygments_background) - # - def linecacheoutput(filename, lineno): - line = linecache.getline(filename, lineno) - return highlight(line, lexer, fmt) - return linecacheoutput diff --git a/rpython/translator/revdb/message.py b/rpython/translator/revdb/message.py deleted file mode 100644 --- a/rpython/translator/revdb/message.py +++ /dev/null @@ -1,120 +0,0 @@ - -INIT_VERSION_NUMBER = 0xd80100 - - -# See the corresponding answers for details about messages. - -CMD_FORK = -1 # Message(CMD_FORK) -CMD_QUIT = -2 # Message(CMD_QUIT) -CMD_FORWARD = -3 # Message(CMD_FORWARD, steps, breakpoint_mode) -CMD_FUTUREIDS = -4 # Message(CMD_FUTUREIDS, extra=list-of-8bytes-uids) -CMD_PING = -5 # Message(CMD_PING) -# extra commands which are not handled by revdb.c, but -# by revdb.register_debug_command() -CMD_PRINT = 1 # Message(CMD_PRINT, extra=expression) -CMD_BACKTRACE = 2 # Message(CMD_BACKTRACE) -CMD_LOCALS = 3 # Message(CMD_LOCALS) -CMD_BREAKPOINTS = 4 # Message(CMD_BREAKPOINTS, stack_id, - # extra="\0-separated names") -CMD_STACKID = 5 # Message(CMD_STACKID, parent-flag) -CMD_ATTACHID = 6 # Message(CMD_ATTACHID, small-num, unique-id) -CMD_COMPILEWATCH= 7 # Message(CMD_COMPILEWATCH, extra=expression) -CMD_CHECKWATCH = 8 # Message(CMD_CHECKWATCH, extra=compiled_code) -CMD_WATCHVALUES = 9 # Message(CMD_WATCHVALUES, extra=texts) - - -# the first message sent by the first child: -# Message(ANSWER_INIT, INIT_VERSION_NUMBER, total_stop_points) -ANSWER_INIT = -20 - -# sent when the child is done and waiting for the next command: -# Message(ANSWER_READY, current_time, currently_created_objects) -ANSWER_READY = -21 - -# sent after CMD_FORK: -# Message(ANSWER_FORKED, child_pid) -ANSWER_FORKED = -22 - -# sent when a child reaches the end (should not occur with process.py) -# Message(ANSWER_AT_END) (the child exits afterwards) -ANSWER_AT_END = -23 - -# breakpoint detected in CMD_FORWARD: -# Message(ANSWER_BREAKPOINT, break_time, break_created_objects, bpkt_num) -# if breakpoint_mode=='b': sent immediately when seeing a breakpoint, -# followed by ANSWER_STD with the same time -# if breakpoint_mode=='r': sent when we're done going forward, about -# the most recently seen breakpoint -# if breakpoint_mode=='i': ignored, never sent -ANSWER_BREAKPOINT = -24 - -# sent after an Attempted to do I/O or access raw memory, as the last message -ANSWER_ATTEMPT_IO = -25 - - -# print one line of a file to the console, for CMD_PRINT -# Message(ANSWER_LINECACHE, linenum, extra=filename) -ANSWER_LINECACHE = 19 - -# print text to the console, for CMD_PRINT and others -# Message(ANSWER_TEXT, extra=text) -ANSWER_TEXT = 20 - -# CMD_STACKID returns the id of the current or parent frame (depending -# on the 'parent-flag' passed in), or 0 if not found. The id can be just -# the stack depth, or it can be the unique id of the frame object. When -# used in CMD_BREAKPOINTS, it means "break if we are entering/leaving a -# frame from/to the given frame". -# Message(ANSWER_STACKID, stack-id) -ANSWER_STACKID = 21 - -# sent from CMD_PRINT to record the existence of a recallable object -# Message(ANSWER_NEXTNID, unique-id) -ANSWER_NEXTNID = 22 - -# sent after CMD_COMPILEWATCH: -# Message(ANSWER_WATCH, ok_flag, extra=marshalled_code) -# sent after CMD_CHECKWATCH: -# Message(ANSWER_WATCH, ok_flag, extra=result_of_expr) -ANSWER_WATCH = 23 - -# sent sometimes after CMD_BREAKPOINTS: -# Message(ANSWER_CHBKPT, bkpt_num, extra=new_breakpoint_text) -ANSWER_CHBKPT = 24 - - -# ____________________________________________________________ - - -class Message(object): - """Represent messages sent and received to subprocesses - started with --revdb-replay. - """ - - def __init__(self, cmd, arg1=0, arg2=0, arg3=0, extra=""): - self.cmd = cmd - self.arg1 = arg1 - self.arg2 = arg2 - self.arg3 = arg3 - self.extra = extra - - def __repr__(self): - cmd = self.cmd - for key, value in globals().items(): - if (key.startswith('CMD_') or key.startswith('ANSWER_')) and ( - value == cmd): - cmd = key - break - return 'Message(%s, %d, %d, %d, %r)' % (cmd, self.arg1, - self.arg2, self.arg3, - self.extra) - - def __eq__(self, other): - return (self.cmd == other.cmd and - self.arg1 == other.arg1 and - self.arg2 == other.arg2 and - self.arg3 == other.arg3 and - self.extra == other.extra) - - def __ne__(self, other): - return not (self == other) diff --git a/rpython/translator/revdb/process.py b/rpython/translator/revdb/process.py deleted file mode 100644 --- a/rpython/translator/revdb/process.py +++ /dev/null @@ -1,637 +0,0 @@ -import sys, os, struct, socket, errno, subprocess -import linecache -from rpython.translator.revdb import ancillary -from rpython.translator.revdb.message import * - - -class Breakpoint(Exception): - def __init__(self, time): - self.time = time # time of the previous stop_point - self.nums = [] # list of breakpoint numbers that occurred, in order - - def record_num(self, num): - self.nums.append(num) - - def regular_breakpoint_nums(self): - for num in self.nums: - if num != -1 and num != -2: - yield num - - def __repr__(self): - return 'Breakpoint(%d, %r)' % (self.time, self.nums) - __str__ = __repr__ - - -class AllBreakpoints(object): - - def __init__(self): - self.num2break = {} # {small number: encoded break/watchpoint} - self.sources = {} # {small number: src text} - self.watchvalues = {} # {small number: resulting text} - self.watchuids = {} # {small number: [uid...]} - self.stack_id = 0 # breaks when leaving/entering a frame from/to - # the frame identified by 'stack_id' - self.thread_num = -1 # breaks when leaving/entering the thread_num - - def __repr__(self): - return 'AllBreakpoints(%r, %r, %r, %r, %r)' % ( - self.num2break, self.watchvalues, self.watchuids, - self.stack_id, self.thread_num) - - def compare(self, other): - if (self.num2break == other.num2break and - self.stack_id == other.stack_id and - self.thread_num == other.thread_num): - if self.watchvalues == other.watchvalues: - return 2 # completely equal - else: - return 1 # equal, but watchvalues out-of-date - else: - return 0 # different - - def is_empty(self): - return (len(self.num2break) == 0 and self.stack_id == 0 - and self.thread_num == -1) - - def duplicate(self): - a = AllBreakpoints() - a.num2break.update(self.num2break) - a.stack_id = self.stack_id - a.thread_num = self.thread_num - return a - - def set_num2break(self, new, break_code, break_at): - if len(break_at) > 0xFFFFFF: - raise OverflowError("break/watchpoint too complex") - self.num2break[new] = (break_code + - chr(len(break_at) & 0xFF) + - chr((len(break_at) >> 8) & 0xFF) + - chr(len(break_at) >> 16) + - break_at) - - -class RecreateSubprocess(Exception): - pass - - -class ReplayProcess(object): - """Represent one replaying subprocess. - - It can be either the one started with --revdb-replay, or a fork. - """ - - def __init__(self, pid, control_socket, - breakpoints_cache=AllBreakpoints(), - printed_objects=frozenset(), - linecacheoutput=None): - self.pid = pid - self.control_socket = control_socket - self.tainted = False - self.breakpoints_cache = breakpoints_cache # don't mutate this - self.printed_objects = printed_objects # don't mutate this - # ^^^ frozenset containing the uids of the objects that are - # either already discovered in this child - # (if uid < currently_created_objects), or that will - # automatically be discovered when we move forward - self.linecacheoutput = linecacheoutput or linecache.getline - - def _recv_all(self, size): - pieces = [] - while size > 0: - data = self.control_socket.recv(size) - if not data: - raise EOFError - size -= len(data) - pieces.append(data) - return ''.join(pieces) - - def send(self, msg): - #print 'SENT:', self.pid, msg - binary = struct.pack("iIqqq", msg.cmd, len(msg.extra), - msg.arg1, msg.arg2, msg.arg3) - self.control_socket.sendall(binary + msg.extra) - - def recv(self): - binary = self._recv_all(struct.calcsize("iIqqq")) - cmd, size, arg1, arg2, arg3 = struct.unpack("iIqqq", binary) - extra = self._recv_all(size) - msg = Message(cmd, arg1, arg2, arg3, extra) - #print 'RECV:', self.pid, msg - return msg - - def expect(self, cmd, arg1=0, arg2=0, arg3=0, extra=""): - msg = self.recv() - assert msg.cmd == cmd, msg - if arg1 is not Ellipsis: - assert msg.arg1 == arg1, msg - if arg2 is not Ellipsis: - assert msg.arg2 == arg2, msg - if arg3 is not Ellipsis: - assert msg.arg3 == arg3, msg - if extra is not Ellipsis: - assert msg.extra == extra, msg - return msg - - def expect_ready(self): - msg = self.expect(ANSWER_READY, Ellipsis, Ellipsis, Ellipsis) - self.update_times(msg) - - def update_times(self, msg): - self.current_time = msg.arg1 - self.currently_created_objects = msg.arg2 - self.current_thread = msg.arg3 - - def clone(self, activate=False): - """Fork this subprocess. Returns a new ReplayProcess() that is - an identical copy. - """ - self.send(Message(CMD_FORK, int(activate))) - s1, s2 = socket.socketpair() - ancillary.send_fds(self.control_socket.fileno(), [s2.fileno()]) - s2.close() - msg = self.expect(ANSWER_FORKED, Ellipsis) - child_pid = msg.arg1 - self.expect_ready() - other = ReplayProcess(child_pid, s1, - breakpoints_cache=self.breakpoints_cache, - printed_objects=self.printed_objects, - linecacheoutput=self.linecacheoutput) - other.expect_ready() - #print >> sys.stderr, 'CLONED', self.current_time - return other - - def close(self): - """Close this subprocess.""" - try: - self.send(Message(CMD_QUIT)) - except socket.error: - pass - - def forward(self, steps, breakpoint_mode): - """Move this subprocess forward in time. - Returns the Breakpoint or None. - """ - assert not self.tainted - - # - currents = self.current_time, self.currently_created_objects - self.send(Message(CMD_PING)) - self.expect_ready() - assert currents == (self.current_time, self.currently_created_objects) - # - - self.send(Message(CMD_FORWARD, steps, ord(breakpoint_mode))) - # - # record all breakpoints that occur together during the *last* step - bkpt = None - while True: - msg = self.recv() - if msg.cmd != ANSWER_BREAKPOINT: - break - if bkpt is None or bkpt.time != msg.arg1: - bkpt = Breakpoint(msg.arg1) - bkpt.record_num(msg.arg3) - assert msg.cmd == ANSWER_READY, msg - self.update_times(msg) - return bkpt - - def print_text_answer(self, pgroup=None): - while True: - msg = self.recv() - if msg.cmd == ANSWER_TEXT: - sys.stdout.write(msg.extra) - sys.stdout.flush() - elif msg.cmd == ANSWER_READY: - self.update_times(msg) - break - elif msg.cmd == ANSWER_LINECACHE: - line = self.linecacheoutput(msg.extra, msg.arg1) - if line == '': - line = '?' - if msg.arg2: # strip? - line = line.strip() - else: - line = line.rstrip('\r\n') - sys.stdout.write(line + '\n') - sys.stdout.flush() - elif msg.cmd == ANSWER_NEXTNID and pgroup is not None: - uid = msg.arg1 - if uid < pgroup.initial_uid: - continue # created before the first stop point, ignore - self.printed_objects = self.printed_objects.union([uid]) - new_nid = len(pgroup.all_printed_objects_lst) - nid = pgroup.all_printed_objects.setdefault(uid, new_nid) - if nid == new_nid: - pgroup.all_printed_objects_lst.append(uid) - sys.stdout.write('$%d = ' % nid) - sys.stdout.flush() - elif msg.cmd == ANSWER_ATTEMPT_IO: - raise RecreateSubprocess - elif msg.cmd == ANSWER_CHBKPT and pgroup is not None: - # change the breakpoint definition. Needed for - # ":linenum" breakpoints which must be expanded to the - # current file only once - b = pgroup.edit_breakpoints() - assert b.num2break[msg.arg1][0] == 'B' - if msg.extra: - b.set_num2break(msg.arg1, 'B', msg.extra) - else: - del b.num2break[msg.arg1] - else: - print >> sys.stderr, "unexpected %r" % (msg,) - - -class ReplayProcessGroup(object): - """Handle a family of subprocesses. - """ - MAX_SUBPROCESSES = 31 # maximum number of subprocesses - STEP_RATIO = 0.25 # subprocess n is between subprocess n-1 - # and the end, at this fraction of interval - - def __init__(self, executable, revdb_log_filename, linecacheoutput=None): - s1, s2 = socket.socketpair() - initial_subproc = subprocess.Popen( - [executable, '--revdb-replay', revdb_log_filename, - str(s2.fileno())], preexec_fn=s1.close) - s2.close() - child = ReplayProcess(initial_subproc.pid, s1, - linecacheoutput=linecacheoutput) - msg = child.expect(ANSWER_INIT, INIT_VERSION_NUMBER, Ellipsis) - self.total_stop_points = msg.arg2 - if self.total_stop_points == 0: - raise ValueError("%r does not contain any stop point" % - (revdb_log_filename,)) - child.expect_ready() - self.initial_uid = child.currently_created_objects - - self.active = child - self.paused = {1: child.clone()} # {time: subprocess} - self.all_breakpoints = AllBreakpoints() - self.all_printed_objects = {} - self.all_printed_objects_lst = [] - - def get_current_time(self): - return self.active.current_time - - def get_currently_created_objects(self): - return self.active.currently_created_objects - - def get_current_thread(self): - return self.active.current_thread - - def _check_current_time(self, time): - assert self.get_current_time() == time - self.active.send(Message(CMD_FORWARD, 0)) - return self.active.expect(ANSWER_READY, time, Ellipsis, Ellipsis) - - def get_max_time(self): - return self.total_stop_points - - def get_next_clone_time(self): - # if 'active' has more printed_objects than the next process - # already in 'paused', then we re-clone 'active'. - cur_time = self.get_current_time() - future = [time for time in self.paused if time > cur_time] - if future: - for futime in sorted(future): - if (self.paused[futime].printed_objects != - frozenset(self.all_printed_objects_lst)): - # 'futime' is the time of the first "future" childs - # with an incomplete 'printed_objects'. This will - # be re-cloned. - return futime - # - if len(self.paused) >= self.MAX_SUBPROCESSES: - next_time = self.total_stop_points + 1 - else: - latest_done = max(self.paused) - range_not_done = self.total_stop_points - latest_done - next_time = latest_done + int(self.STEP_RATIO * range_not_done) + 1 - return next_time - - def is_tainted(self): - return self.active.tainted - - def go_forward(self, steps, breakpoint_mode='b'): - """Go forward, for the given number of 'steps' of time. - - If needed, it will leave clones at intermediate times. - Does not close the active subprocess. Note that - is_tainted() must return false in order to use this. - - breakpoint_mode: - 'b' = regular mode where hitting a breakpoint stops - 'i' = ignore breakpoints - 'r' = record the occurrence of a breakpoint but continue - """ - assert steps >= 0 - if breakpoint_mode != 'i': - self.update_breakpoints() - latest_bkpt = None - while True: - cur_time = self.get_current_time() - if cur_time + steps > self.total_stop_points: - steps = self.total_stop_points - cur_time - next_clone = self.get_next_clone_time() - rel_next_clone = next_clone - cur_time - if rel_next_clone > steps: - break - assert rel_next_clone >= 0 - if rel_next_clone > 0: - bkpt = self.active.forward(rel_next_clone, breakpoint_mode) - if breakpoint_mode == 'r': - latest_bkpt = bkpt or latest_bkpt - elif bkpt: - raise bkpt - steps -= rel_next_clone - if self.active.current_time in self.paused: - self.paused[self.active.current_time].close() - clone = self.active.clone() - self.paused[clone.current_time] = clone - bkpt = self.active.forward(steps, breakpoint_mode) - if breakpoint_mode == 'r': - bkpt = bkpt or latest_bkpt - if bkpt: - raise bkpt - - def go_backward(self, steps, ignore_breakpoints=False): - """Go backward, for the given number of 'steps' of time. - - Closes the active process. Implemented as jump_in_time() - and then forward-searching for breakpoint locations (if any). - """ - assert steps >= 0 - initial_time = self.get_current_time() - if self.all_breakpoints.is_empty() or ignore_breakpoints: - self.jump_in_time(initial_time - steps) - else: - if self.all_breakpoints.watchvalues: - first_steps = 97 # use smaller steps, because that's costly - else: - first_steps = 957 - self._backward_search_forward( - search_start_time = initial_time - first_steps, - search_stop_time = initial_time, - search_go_on_until_time = initial_time - steps) - - def _backward_search_forward(self, search_start_time, search_stop_time, - search_go_on_until_time=1): - while True: - self.jump_in_time(max(search_start_time, search_go_on_until_time)) - search_start_time = self.get_current_time() - time_range_to_search = search_stop_time - search_start_time - if time_range_to_search <= 0: - print "[search end]" - return - print "[searching %d..%d]" % (search_start_time, - search_stop_time) - self.go_forward(time_range_to_search, breakpoint_mode='r') - # If at least one breakpoint was found, the Breakpoint - # exception is raised with the *last* such breakpoint. - # Otherwise, we continue here. Search farther along a - # 3-times-bigger range. - search_stop_time = search_start_time - search_start_time -= time_range_to_search * 3 - - def _update_watchpoints_uids(self): - if self.all_breakpoints.watchuids: - uids = set() - uids.update(*self.all_breakpoints.watchuids.values()) - #print self.all_breakpoints - #print '\t===>', uids - self.attach_printed_objects(uids, watch_env=True) - - def update_breakpoints(self): - self._update_watchpoints_uids() - cmp = self.all_breakpoints.compare(self.active.breakpoints_cache) - #print 'compare:', cmp, self.all_breakpoints.watchvalues - if cmp == 2: - return # up-to-date - - # update the breakpoints/watchpoints - self.active.breakpoints_cache = None - num2break = self.all_breakpoints.num2break - N = (max(num2break) + 1) if num2break else 0 - if cmp == 0: - flat = [num2break.get(n, '\x00') for n in range(N)] - arg1 = self.all_breakpoints.stack_id - arg2 = self.all_breakpoints.thread_num - extra = ''.join(flat) - self.active.send(Message(CMD_BREAKPOINTS, arg1, arg2, extra=extra)) - self.active.print_text_answer(pgroup=self) - else: - assert cmp == 1 - - # update the watchpoint values - if any(name.startswith('W') for name in num2break.values()): - watchvalues = self.all_breakpoints.watchvalues - flat = [] - for n in range(N): - text = '' - name = num2break.get(n, '') - if name.startswith('W'): - text = watchvalues[n] - flat.append(text) - extra = '\x00'.join(flat) - self.active.send(Message(CMD_WATCHVALUES, extra=extra)) - self.active.expect_ready() - - self.active.breakpoints_cache = self.all_breakpoints.duplicate() - - def update_watch_values(self): - try: - self._update_watchpoints_uids() - except socket.error as e: - print >> sys.stderr, "socket.error: %s" % (e,) - print >> sys.stderr, "restarting at position 1" - self.jump_in_time(1) - self._update_watchpoints_uids() - seen = set() - for num, name in self.all_breakpoints.num2break.items(): - if name.startswith('W'): - _, text = self.check_watchpoint_expr(name[4:]) - if text != self.all_breakpoints.watchvalues[num]: - #print self.active.pid - print 'updating watchpoint value: %s => %s' % ( - self.all_breakpoints.sources[num], text) - self.all_breakpoints.watchvalues[num] = text - seen.add(num) - assert set(self.all_breakpoints.watchvalues) == seen - - def compile_watchpoint_expr(self, expr): - self.active.send(Message(CMD_COMPILEWATCH, extra=expr)) - msg = self.active.expect(ANSWER_WATCH, Ellipsis, extra=Ellipsis) - self.active.expect_ready() - return msg.arg1, msg.extra - - def check_watchpoint_expr(self, compiled_code, nids=None): - if nids: - self.ensure_nids_to_uids(nids) - uids = self.nids_to_uids(nids) - self.attach_printed_objects(uids, watch_env=True) - self.active.send(Message(CMD_CHECKWATCH, extra=compiled_code)) - msg = self.active.expect(ANSWER_WATCH, Ellipsis, extra=Ellipsis) - self.active.expect_ready() - return msg.arg1, msg.extra - - def _resume(self, from_time): - clone_me = self.paused[from_time] - if self.active is not None: - self.active.close() - self.active = clone_me.clone(activate=True) - - def jump_in_time(self, target_time): - """Jump in time at the given 'target_time'. - - This function always closes the active subprocess. - """ - if target_time < 1: - target_time = 1 - if target_time > self.total_stop_points: - target_time = self.total_stop_points - uids = set() - uids.update(*self.all_breakpoints.watchuids.values()) - self.ensure_printed_objects(uids, forced_time = target_time) - - def close(self): - """Close all subprocesses. - """ - for subp in [self.active] + self.paused.values(): - subp.close() - - def ensure_printed_objects(self, uids, forced_time=None): - """Ensure that all the given unique_ids are loaded in the active - child, if necessary by forking another child from earlier. - """ - if forced_time is None: - initial_time = self.get_current_time() - child = self.active - else: - initial_time = forced_time - stop_time = max(time for time in self.paused - if time <= initial_time) - child = self.paused[stop_time] - - while True: - uid_limit = child.currently_created_objects - missing_uids = [uid for uid in uids - if uid < uid_limit - and uid not in child.printed_objects] - if not missing_uids: - break - # pick the earlier fork - start_time = child.current_time - stop_time = max(time for time in self.paused if time < start_time) - child = self.paused[stop_time] - - # No missing_uids left: all uids are either already in - # self.active.printed_objects, or in the future. - future_uids = [uid for uid in uids if uid >= uid_limit] - if child is self.active: - assert not future_uids - else: - self._resume(stop_time) - if future_uids: - future_uids.sort() - pack_uids = [struct.pack('q', uid) for uid in future_uids] - pack_uids = ''.join(pack_uids) - #print '%d: from %d: CMD_FUTUREIDS %r' % ( - # self.active.pid, - # self.active.current_time, - # future_uids) - self.active.send(Message(CMD_FUTUREIDS, extra=pack_uids)) - self.active.expect_ready() - self.active.printed_objects = ( - self.active.printed_objects.union(future_uids)) - self.go_forward(initial_time - self.get_current_time(), - breakpoint_mode='i') - assert self.active.printed_objects.issuperset(uids) - - def nids_to_uids(self, nids, skip_futures=False): - uids = [] - for nid in set(nids): - try: - uid = self.all_printed_objects_lst[nid] - except IndexError: - continue - if skip_futures and uid >= self.get_currently_created_objects(): - #print >> sys.stderr, ( - # "note: '$%d' refers to an object that is " - # "only created later in time" % nid) - continue - uids.append(uid) - return uids - - def ensure_nids_to_uids(self, nids): - # Take the objects listed in nids which are alive at the - # current time, and return a list of uids of them. This - # might require some replaying. - uids = [] - if nids: - uids = self.nids_to_uids(nids, skip_futures=True) - self.ensure_printed_objects(uids) - return uids - - def attach_printed_objects(self, uids, watch_env): - for uid in uids: - nid = self.all_printed_objects[uid] - #print '%d: %s => %s (watch_env=%d)' % (self.active.pid, nid, uid, - # watch_env) - self.active.send(Message(CMD_ATTACHID, nid, uid, int(watch_env))) - self.active.expect_ready() - - def recreate_subprocess(self, target_time=None): - # recreate a subprocess at the given time, or by default the - # current time - if target_time is None: - target_time = self.get_current_time() - self.active = None - self.jump_in_time(target_time) - - def print_cmd(self, expression, nids=[]): - """Print an expression. - """ - uids = self.ensure_nids_to_uids(nids) - self.active.tainted = True - self.attach_printed_objects(uids, watch_env=False) - self.active.send(Message(CMD_PRINT, extra=expression)) - try: - self.active.print_text_answer(pgroup=self) - except RecreateSubprocess: - self.recreate_subprocess() - - def show_backtrace(self, complete=1): - """Show the backtrace. - """ - if complete: - self.active.tainted = True - self.active.send(Message(CMD_BACKTRACE, complete)) - try: - self.active.print_text_answer() - except RecreateSubprocess: - self.recreate_subprocess() - - def show_locals(self): - """Show the locals. - """ - self.active.tainted = True - self.active.send(Message(CMD_LOCALS)) - try: - self.active.print_text_answer() - except RecreateSubprocess: - self.recreate_subprocess() - - def edit_breakpoints(self): - return self.all_breakpoints - - def _stack_id(self, is_parent=0): - self.active.send(Message(CMD_STACKID, is_parent)) - msg = self.active.expect(ANSWER_STACKID, Ellipsis, Ellipsis) - self.active.expect_ready() - return msg - - def get_stack_id(self, is_parent): - return self._stack_id(is_parent).arg1 - - def get_hiddenpos_level(self): - return self._stack_id().arg2 diff --git a/rpython/translator/revdb/revdb.py b/rpython/translator/revdb/revdb.py deleted file mode 100755 --- a/rpython/translator/revdb/revdb.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python2 - -import sys, os - - -if __name__ == '__main__': - import argparse - parser = argparse.ArgumentParser(description='Reverse debugger') - parser.add_argument('log', metavar='LOG', help='log file name') - parser.add_argument('-x', '--executable', dest='executable', - help='name of the executable file ' - 'that recorded the log') - parser.add_argument('-c', '--color', dest='color', - help='colorize source code (dark,light,off)') - options = parser.parse_args() - - sys.path.insert(0, os.path.abspath( - os.path.join(__file__, '..', '..', '..', '..'))) - - from rpython.translator.revdb.interact import RevDebugControl - ctrl = RevDebugControl(options.log, executable=options.executable, - pygments_background=options.color) - ctrl.interact() diff --git a/rpython/translator/revdb/src-revdb/fd_send.c b/rpython/translator/revdb/src-revdb/fd_send.c deleted file mode 100644 --- a/rpython/translator/revdb/src-revdb/fd_send.c +++ /dev/null @@ -1,92 +0,0 @@ -/*************************************************************************** - * libancillary - black magic on Unix domain sockets - * (C) Nicolas George - * fd_send.c - sending file descriptors - ***************************************************************************/ - -/* - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO - * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _XPG4_2 /* Solaris sucks */ -# define _XPG4_2 -#endif - -#include -#include -#include -#include -#include -#if defined(__FreeBSD__) -# include /* FreeBSD sucks */ -#endif - -#include "ancillary.h" - -int -ancil_send_fds_with_buffer(int sock, const int *fds, unsigned n_fds, void *buffer) -{ - struct msghdr msghdr; - char nothing = '!'; - struct iovec nothing_ptr; - struct cmsghdr *cmsg; - int i; - - nothing_ptr.iov_base = ¬hing; - nothing_ptr.iov_len = 1; - msghdr.msg_name = NULL; - msghdr.msg_namelen = 0; - msghdr.msg_iov = ¬hing_ptr; - msghdr.msg_iovlen = 1; - msghdr.msg_flags = 0; - msghdr.msg_control = buffer; - msghdr.msg_controllen = sizeof(struct cmsghdr) + sizeof(int) * n_fds; - cmsg = CMSG_FIRSTHDR(&msghdr); - cmsg->cmsg_len = msghdr.msg_controllen; - cmsg->cmsg_level = SOL_SOCKET; - cmsg->cmsg_type = SCM_RIGHTS; - for(i = 0; i < n_fds; i++) - ((int *)CMSG_DATA(cmsg))[i] = fds[i]; - return(sendmsg(sock, &msghdr, 0) >= 0 ? 0 : -1); -} - -#ifndef SPARE_SEND_FDS -int -ancil_send_fds(int sock, const int *fds, unsigned n_fds) -{ - ANCIL_FD_BUFFER(ANCIL_MAX_N_FDS) buffer; - - assert(n_fds <= ANCIL_MAX_N_FDS); - return(ancil_send_fds_with_buffer(sock, fds, n_fds, &buffer)); -} -#endif /* SPARE_SEND_FDS */ - -#ifndef SPARE_SEND_FD -int -ancil_send_fd(int sock, int fd) -{ - ANCIL_FD_BUFFER(1) buffer; - - return(ancil_send_fds_with_buffer(sock, &fd, 1, &buffer)); -} -#endif /* SPARE_SEND_FD */ From pypy.commits at gmail.com Thu Sep 8 18:13:55 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 08 Sep 2016 15:13:55 -0700 (PDT) Subject: [pypy-commit] pypy test-cpyext: kill w_record_imported_module and unskip test_recursive_package_import() on -A Message-ID: <57d1e2a3.ccd11c0a.e2d2e.0e87@mx.google.com> Author: Ronan Lamy Branch: test-cpyext Changeset: r86968:9374f605480e Date: 2016-09-08 23:13 +0100 http://bitbucket.org/pypy/pypy/changeset/9374f605480e/ Log: kill w_record_imported_module and unskip test_recursive_package_import() on -A diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -61,6 +61,7 @@ py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, buf.array._charbuf_start()) py_buf.c_b_size = buf.getlength() else: + raise RuntimeError raise oefmt(space.w_NotImplementedError, "buffer flavor not supported") diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -317,6 +317,15 @@ if not cls.runappdirect: cls.w_runappdirect = space.wrap(cls.runappdirect) + def record_imported_module(self, name): + """ + Record a module imported in a test so that it can be cleaned up in + teardown before the check for leaks is done. + + name gives the name of the module in the space's sys.modules. + """ + self.imported_module_names.append(name) + def setup_method(self, func): @gateway.unwrap_spec(name=str) def compile_module(space, name, @@ -337,6 +346,11 @@ name, source_files=source_files, source_strings=source_strings) + + # hackish, but tests calling compile_module() always end up + # importing the result + self.record_imported_module(name) + return space.wrap(pydname) @gateway.unwrap_spec(name=str, init='str_or_None', body=str, @@ -399,7 +413,7 @@ return imp.load_dynamic(name, mod) else: api.load_extension_module(space, mod, name) - self.imported_module_names.append(name) + self.record_imported_module(name) return space.getitem( space.sys.get('modules'), space.wrap(name)) @@ -429,16 +443,6 @@ w_include_dirs=w_include_dirs, PY_SSIZE_T_CLEAN=PY_SSIZE_T_CLEAN) - @gateway.unwrap_spec(name=str) - def record_imported_module(name): - """ - Record a module imported in a test so that it can be cleaned up in - teardown before the check for leaks is done. - - name gives the name of the module in the space's sys.modules. - """ - self.imported_module_names.append(name) - def debug_collect(space): rawrefcount._collect() @@ -468,7 +472,6 @@ self.w_import_module = wrap(interp2app(import_module)) self.w_reimport_module = wrap(interp2app(reimport_module)) self.w_import_extension = wrap(interp2app(import_extension)) - self.w_record_imported_module = wrap(interp2app(record_imported_module)) self.w_here = wrap(str(py.path.local(pypydir)) + '/module/cpyext/test/') self.w_debug_collect = wrap(interp2app(debug_collect)) @@ -621,15 +624,11 @@ If `cherry.date` is an extension module which imports `apple.banana`, the latter is added to `sys.modules` for the `"apple.banana"` key. """ - if self.runappdirect: - skip('record_imported_module not supported in runappdirect mode') # Build the extensions. banana = self.compile_module( "apple.banana", source_files=[self.here + 'banana.c']) - self.record_imported_module("apple.banana") date = self.compile_module( "cherry.date", source_files=[self.here + 'date.c']) - self.record_imported_module("cherry.date") # Set up some package state so that the extensions can actually be # imported. From pypy.commits at gmail.com Thu Sep 8 18:13:54 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 08 Sep 2016 15:13:54 -0700 (PDT) Subject: [pypy-commit] pypy test-cpyext: Fix test_recursive_package_import to actually check what it claims to Message-ID: <57d1e2a2.262ec20a.37090.10ed@mx.google.com> Author: Ronan Lamy Branch: test-cpyext Changeset: r86967:3b5fd17a3cac Date: 2016-09-08 19:28 +0100 http://bitbucket.org/pypy/pypy/changeset/3b5fd17a3cac/ Log: Fix test_recursive_package_import to actually check what it claims to diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -641,7 +641,6 @@ apple.__path__ = [os.path.dirname(banana)] import cherry.date - import apple.banana assert sys.modules['apple.banana'].__name__ == 'apple.banana' assert sys.modules['cherry.date'].__name__ == 'cherry.date' From pypy.commits at gmail.com Fri Sep 9 04:54:49 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 09 Sep 2016 01:54:49 -0700 (PDT) Subject: [pypy-commit] pypy reverse-debugger: Move the tests to the external repo Message-ID: <57d278d9.2472c20a.5693d.b402@mx.google.com> Author: Armin Rigo Branch: reverse-debugger Changeset: r86969:6acec5a7e9ed Date: 2016-09-09 10:54 +0200 http://bitbucket.org/pypy/pypy/changeset/6acec5a7e9ed/ Log: Move the tests to the external repo diff --git a/rpython/rlib/src/boehm-rawrefcount.h b/rpython/rlib/src/boehm-rawrefcount.h --- a/rpython/rlib/src/boehm-rawrefcount.h +++ b/rpython/rlib/src/boehm-rawrefcount.h @@ -3,6 +3,7 @@ OP_GC_RAWREFCOUNT_INIT(callback, r): the callback is not supported here OP_GC_RAWREFCOUNT_CREATE_LINK_PYOBJ(): not implemented, maybe not needed */ +#define RPY_USES_RAWREFCOUNT #ifdef RPY_REVERSE_DEBUGGER /* these macros are defined in src-revdb/revdb_include.h */ diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c --- a/rpython/translator/revdb/src-revdb/revdb.c +++ b/rpython/translator/revdb/src-revdb/revdb.c @@ -1783,6 +1783,8 @@ } +#ifdef RPY_USES_RAWREFCOUNT + static void *rawrefcount_tree; /* {pyobj: gcobj} */ struct rawrefcount_link2_s { @@ -1940,6 +1942,8 @@ } } +#endif /* RPY_USES_RAWREFCOUNT */ + /* ------------------------------------------------------------ */ diff --git a/rpython/translator/revdb/test/README b/rpython/translator/revdb/test/README new file mode 100644 --- /dev/null +++ b/rpython/translator/revdb/test/README @@ -0,0 +1,3 @@ +The tests are located in the external repository: + + https://bitbucket.org/pypy/revdb/ diff --git a/rpython/translator/revdb/test/__init__.py b/rpython/translator/revdb/test/__init__.py deleted file mode 100644 diff --git a/rpython/translator/revdb/test/ctrl_c.py b/rpython/translator/revdb/test/ctrl_c.py deleted file mode 100644 --- a/rpython/translator/revdb/test/ctrl_c.py +++ /dev/null @@ -1,43 +0,0 @@ -import sys, os, thread, time, signal - -os.setpgid(0, 0) -assert os.getpgrp() == os.getpid() - - -sys.path[:] = sys.argv[1].split('\x7f') -from rpython.translator.revdb.process import ReplayProcessGroup - -exename, rdbname = sys.argv[2:] -group = ReplayProcessGroup(exename, rdbname) - - -class MyInterrupt(Exception): - pass -def my_signal(*args): - raise MyInterrupt -prev_signal = signal.signal(signal.SIGINT, my_signal) - -def enable_timer(): - def my_kill(): - time.sleep(0.8) - print >> sys.stderr, "--<<< Sending CTRL-C >>>--" - os.killpg(os.getpid(), signal.SIGINT) - thread.start_new_thread(my_kill, ()) - -all_ok = False -try: - # this runs for ~9 seconds if uninterrupted - enable_timer() - group.print_cmd('very-long-loop') -except MyInterrupt: - print >> sys.stderr, "very-long-loop interrupted, trying again" - group.recreate_subprocess(1) - try: - enable_timer() - group.print_cmd('very-long-loop') - except MyInterrupt: - print >> sys.stderr, "second interruption ok" - all_ok = True - -assert all_ok, "expected very-long-loop to be killed by SIGINT" -print "all ok" diff --git a/rpython/translator/revdb/test/test_basic.py b/rpython/translator/revdb/test/test_basic.py deleted file mode 100644 --- a/rpython/translator/revdb/test/test_basic.py +++ /dev/null @@ -1,462 +0,0 @@ -import py -import os, sys, subprocess, socket -import re, array, struct -from rpython.tool.udir import udir -from rpython.translator.interactive import Translation -from rpython.rlib.rarithmetic import LONG_BIT, intmask -from rpython.rlib import objectmodel, revdb -from rpython.rlib.debug import debug_print -from rpython.rtyper.annlowlevel import cast_gcref_to_instance -from rpython.rtyper.lltypesystem import lltype, llmemory - -from rpython.translator.revdb.message import * -from rpython.translator.revdb.process import ReplayProcess - - -ASYNC_THREAD_SWITCH = 0xff54 - 2**16 - - -class RDB(object): - def __init__(self, filename, expected_argv): - with open(filename, 'rb') as f: - self.buffer = f.read() - self.cur = self.buffer.index('\x00') + 1 - header = self.buffer[:self.cur] - assert header == 'RevDB:\t' + '\t'.join(expected_argv) + '\n\x00' - # - x = self.read1('P'); assert x == 0x00FF0003 - x = self.read1('P'); self.main_thread_id = x - x = self.read1('P'); assert x == 0 - x = self.read1('P'); #assert x == &rpy_reverse_db_stop_point - x = self.read1('P'); #assert x == &rpy_revdb - x = self.read1('i'); assert x == 0 - self.argc = self.read1('i') - self.argv = self.read1('P') - self.current_packet_end = self.cur - self.read_check_argv(expected_argv) - - def read1(self, mode): - p = self.cur - self.cur = p + struct.calcsize(mode) - return struct.unpack_from(mode, self.buffer, p)[0] - - def next(self, mode='P'): - if self.current_packet_end == self.cur: - packet_size = self.read1('h') - assert packet_size > 0 - self.current_packet_end = self.cur + packet_size - result = self.read1(mode) - assert self.cur <= self.current_packet_end - return result - - def is_special_packet(self): - if self.current_packet_end != self.cur: - assert self.current_packet_end > self.cur - return False - next_header = struct.unpack_from('h', self.buffer, self.cur)[0] - return (next_header & 0xFF00) == 0xFF00 - - def special_packet(self, expected, fmt): - assert self.current_packet_end == self.cur - next_id = self.read1('h') - assert next_id == expected - p = self.cur - self.cur = self.current_packet_end = p + struct.calcsize(fmt) - return struct.unpack_from(fmt, self.buffer, p) - - def read_check_argv(self, expected): - assert self.argc == len(expected) - for i in range(self.argc): - self.next() # this is from "p = argv[i]" - s = [] - # first we determine the length of the "char *p" - while True: - c = self.next('c') - if c == '\x00': - break - s.append(c) - # then we really read the "char *" and copy it into a rpy string - # (that's why this time we don't read the final \0) - for c1 in s: - c2 = self.next('c') - assert c2 == c1 - assert ''.join(s) == expected[i] - - def number_of_stop_points(self): - return struct.unpack_from("q", self.buffer, len(self.buffer) - 8)[0] - - def done(self): - return self.cur == len(self.buffer) - - def write_call(self, expected_string): - x = self.next() # raw_malloc: the pointer we got - self.gil_release() - self.same_stack() # write - x = self.next(); assert x == len(expected_string) - self.same_stack() # errno - x = self.next('i'); assert x == 0 # errno - self.gil_acquire() - - def same_stack(self): - x = self.next('c'); assert x == '\xFC' - - def gil_acquire(self): - x = self.next('c'); assert x == '\xFD' - - def gil_release(self): - x = self.next('c'); assert x == '\xFE' - - def switch_thread(self, expected=None): - th, = self.special_packet(ASYNC_THREAD_SWITCH, 'q') - if expected is not None: - assert th == expected - return th - - -def compile(self, entry_point, backendopt=True, - withsmallfuncsets=None, shared=False, thread=False): - t = Translation(entry_point, None, gc="boehm") - self.t = t - t.set_backend_extra_options(c_debug_defines=True) - t.config.translation.reverse_debugger = True - t.config.translation.lldebug0 = True - t.config.translation.shared = shared - t.config.translation.thread = thread - if withsmallfuncsets is not None: - t.config.translation.withsmallfuncsets = withsmallfuncsets - if not backendopt: - t.disable(["backendopt_lltype"]) - t.annotate() - t.rtype() - if t.backendopt: - t.backendopt() - self.exename = t.compile_c() - self.rdbname = os.path.join(os.path.dirname(str(self.exename)), - 'log.rdb') - -def run(self, *argv): - env = os.environ.copy() - env['PYPYRDB'] = self.rdbname - t = self.t - stdout, stderr = t.driver.cbuilder.cmdexec(' '.join(argv), env=env, - expect_crash=9) - print >> sys.stderr, stderr - return stdout - -def fetch_rdb(self, expected_argv): - return RDB(self.rdbname, map(str, expected_argv)) - - -class BaseRecordingTests(object): - compile = compile - run = run - fetch_rdb = fetch_rdb - - -class TestRecording(BaseRecordingTests): - - def test_simple(self): - def main(argv): - print argv[1:] - return 9 - self.compile(main, backendopt=False) - assert self.run('abc d') == '[abc, d]\n' - rdb = self.fetch_rdb([self.exename, 'abc', 'd']) - rdb.write_call('[abc, d]\n') - x = rdb.next('q'); assert x == 0 # number of stop points - # that's all we should get from this simple example - assert rdb.done() - - def test_identityhash(self): - def main(argv): - print [objectmodel.compute_identity_hash(argv), - objectmodel.compute_identity_hash(argv), - objectmodel.compute_identity_hash(argv)] - return 9 - self.compile(main, backendopt=False) - out = self.run('Xx') - match = re.match(r'\[(-?\d+), \1, \1]\n', out) - assert match - hash_value = int(match.group(1)) - rdb = self.fetch_rdb([self.exename, 'Xx']) - # compute_identity_hash() doesn't record anything - rdb.write_call(out) - # done - x = rdb.next('q'); assert x == 0 # number of stop points - assert rdb.done() - - def test_dont_record_vtable_reads(self): - class A(object): - x = 42 - class B(A): - x = 43 - lst = [A(), B()] - def main(argv): - print lst[len(argv) & 1].x - return 9 - self.compile(main, backendopt=False) - out = self.run('Xx') - assert out == '42\n' - rdb = self.fetch_rdb([self.exename, 'Xx']) - # write() call (it used to be the case that vtable reads where - # recorded too; the single byte fetched from the vtable from - # the '.x' in main() would appear here) - rdb.write_call(out) - # done - x = rdb.next('q'); assert x == 0 # number of stop points - assert rdb.done() - - def test_dont_record_pbc_reads(self): - class MyPBC: - def _freeze_(self): - return True - pbc1 = MyPBC(); pbc1.x = 41 - pbc2 = MyPBC(); pbc2.x = 42 - lst = [pbc1, pbc2] - def main(argv): - print lst[len(argv) & 1].x - return 9 - self.compile(main, backendopt=False) - out = self.run('Xx') - assert out == '41\n' - rdb = self.fetch_rdb([self.exename, 'Xx']) - # write() call - rdb.write_call(out) - # done - x = rdb.next('q'); assert x == 0 # number of stop points - assert rdb.done() - - @py.test.mark.parametrize('limit', [3, 5]) - def test_dont_record_small_funcset_conversions(self, limit): - def f1(): - return 111 - def f2(): - return 222 - def f3(): - return 333 - def g(n): - if n & 1: - return f1 - else: - return f2 - def main(argv): - x = g(len(argv)) # can be f1 or f2 - if len(argv) > 5: - x = f3 # now can be f1 or f2 or f3 - print x() - return 9 - self.compile(main, backendopt=False, withsmallfuncsets=limit) - for input, expected_output in [ - ('2 3', '111\n'), - ('2 3 4', '222\n'), - ('2 3 4 5 6 7', '333\n'), - ]: - out = self.run(input) - assert out == expected_output - rdb = self.fetch_rdb([self.exename] + input.split()) - # write() call - rdb.write_call(out) - x = rdb.next('q'); assert x == 0 # number of stop points - assert rdb.done() - - -class InteractiveTests(object): - - def replay(self, **kwds): - s1, s2 = socket.socketpair() - subproc = subprocess.Popen( - [str(self.exename), '--revdb-replay', str(self.rdbname), - str(s2.fileno())], preexec_fn=s1.close, **kwds) - s2.close() - self.subproc = subproc - child = ReplayProcess(subproc.pid, s1) - child.expect(ANSWER_INIT, INIT_VERSION_NUMBER, - self.expected_stop_points) - child.expect(ANSWER_READY, 1, Ellipsis) - return child - - -class TestSimpleInterpreter(InteractiveTests): - expected_stop_points = 3 - - def setup_class(cls): - def main(argv): - lst = [argv[0], 'prebuilt'] - for op in argv[1:]: - revdb.stop_point() - print op - lst.append(op + '??') # create a new string here - for x in lst: - print revdb.get_unique_id(x) - return 9 - compile(cls, main, backendopt=False) - assert run(cls, 'abc d ef') == ('abc\nd\nef\n' - '3\n0\n12\n15\n17\n') - rdb = fetch_rdb(cls, [cls.exename, 'abc', 'd', 'ef']) - assert rdb.number_of_stop_points() == 3 - - def test_go(self): - child = self.replay() - child.send(Message(CMD_FORWARD, 2)) - child.expect(ANSWER_READY, 3, Ellipsis) - child.send(Message(CMD_FORWARD, 2)) - child.expect(ANSWER_AT_END) - - def test_quit(self): - child = self.replay() - child.send(Message(CMD_QUIT)) - assert self.subproc.wait() == 0 - - def test_fork(self): - child = self.replay() - child2 = child.clone() - child.send(Message(CMD_FORWARD, 2)) - child.expect(ANSWER_READY, 3, Ellipsis) - child2.send(Message(CMD_FORWARD, 1)) - child2.expect(ANSWER_READY, 2, Ellipsis) - # - child.close() - child2.close() - - -class TestDebugCommands(InteractiveTests): - expected_stop_points = 3 - - def setup_class(cls): - # - class Stuff: - pass - # - def g(cmdline): - if len(cmdline) > 5: - raise ValueError - g._dont_inline_ = True - # - def went_fw(): - revdb.send_answer(120, revdb.current_time()) - if revdb.current_time() != revdb.total_time(): - revdb.go_forward(1, went_fw) - # - def _nothing(arg): - pass - # - def callback_track_obj(gcref): - revdb.send_output("callback_track_obj\n") - dbstate.gcref = gcref - # - def blip(cmd, extra): - debug_print('<<<', cmd.c_cmd, cmd.c_arg1, - cmd.c_arg2, cmd.c_arg3, extra, '>>>') - if extra == 'oops': - print 42 # I/O not permitted - if extra == 'raise-and-catch': - try: - g(extra) - except ValueError: - pass - if extra == 'crash': - raise ValueError - if extra == 'get-value': - revdb.send_answer(100, revdb.current_time(), - revdb.total_time()) - if extra == 'current-place': - revdb.send_answer(200, revdb.current_place()) - ## if extra == 'go-fw': - ## revdb.go_forward(1, went_fw) - ## if cmdline == 'set-break-after-0': - ## dbstate.break_after = 0 - ## if cmdline == 'print-id': - ## revdb.send_output('obj.x=%d %d %d\n' % ( - ## dbstate.stuff.x, - ## revdb.get_unique_id(dbstate.stuff), - ## revdb.currently_created_objects())) - ## if cmdline.startswith('track-object '): - ## uid = int(cmdline[len('track-object '):]) - ## dbstate.gcref = lltype.nullptr(llmemory.GCREF.TO) - ## revdb.track_object(uid, callback_track_obj) - ## if cmdline == 'get-tracked-object': - ## if dbstate.gcref: - ## revdb.send_output('got obj.x=%d\n' % ( - ## cast_gcref_to_instance(Stuff, dbstate.gcref).x,)) - ## else: - ## revdb.send_output('none\n') - ## if cmdline == 'first-created-uid': - ## revdb.send_output('first-created-uid=%d\n' % ( - ## revdb.first_created_object_uid(),)) - revdb.send_answer(42, cmd.c_cmd, -43, -44, extra) - lambda_blip = lambda: blip - # - class DBState: - pass - dbstate = DBState() - # - def main(argv): - revdb.register_debug_command(1, lambda_blip) - for i, op in enumerate(argv[1:]): - dbstate.stuff = Stuff() - dbstate.stuff.x = i + 1000 - revdb.stop_point(i * 10) - print op - if i == 1: - if os.fork() == 0: # child - os.write(2, "this line is from the fork child.\n") - return 0 - return 9 - compile(cls, main, backendopt=False) - assert run(cls, 'abc d ef') == 'abc\nd\nef\n' - - def test_run_blip(self): - child = self.replay() - child.send(Message(1, extra='foo')) - child.expect(42, 1, -43, -44, 'foo') - - def test_io_not_permitted(self): - child = self.replay(stderr=subprocess.PIPE) - child.send(Message(1, extra='oops')) - child.expect(ANSWER_ATTEMPT_IO) - child.close() - err = self.subproc.stderr.read() - assert err.endswith(': Attempted to do I/O or access raw memory\n') - - def test_interaction_with_forward(self): - child = self.replay() - child.send(Message(CMD_FORWARD, 50)) - child.expect(ANSWER_AT_END) - - def test_raise_and_catch(self): - child = self.replay() - child.send(Message(1, extra='raise-and-catch')) - child.expect(42, 1, -43, -44, 'raise-and-catch') - - def test_crash(self): - child = self.replay(stderr=subprocess.PIPE) - child.send(Message(1, extra='crash')) - child.close() - err = self.subproc.stderr.read() - assert err.endswith('Command crashed with ValueError\n') - - def test_get_value(self): - child = self.replay() - child.send(Message(1, extra='get-value')) - child.expect(100, 1, 3) - - def test_current_place(self): - child = self.replay() - child.send(Message(1, extra='current-place')) - child.expect(200, 0) - child.expect(42, 1, -43, -44, 'current-place') - child.expect(ANSWER_READY, 1, Ellipsis) - child.send(Message(CMD_FORWARD, 2)) - child.expect(ANSWER_READY, 3, Ellipsis) - child.send(Message(1, extra='current-place')) - child.expect(200, 20) - child.expect(42, 1, -43, -44, 'current-place') - - ## def test_go_fw(self): - ## child = self.replay() - ## child.send(Message(1, extra='go-fw')) - ## child.expect(42, 1, -43, -44, 'go-fw') - ## child.expect(120, 2) - ## child.expect(120, 3) - ## child.send(Message(CMD_FORWARD, 0)) - ## child.expect(ANSWER_READY, 3, Ellipsis) diff --git a/rpython/translator/revdb/test/test_callback.py b/rpython/translator/revdb/test/test_callback.py deleted file mode 100644 --- a/rpython/translator/revdb/test/test_callback.py +++ /dev/null @@ -1,123 +0,0 @@ -from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib.rarithmetic import intmask -from rpython.rlib import revdb -from rpython.translator.revdb.test.test_basic import BaseRecordingTests -from rpython.translator.revdb.test.test_basic import InteractiveTests - -from rpython.translator.revdb.message import * - - -def get_callback_demo(): - eci = ExternalCompilationInfo(separate_module_sources=[''' - int callme(int(*cb)(int)) { - return cb(40) * cb(3); - } - '''], post_include_bits=[''' - int callme(int(*)(int)); - ''']) - FUNCPTR = lltype.Ptr(lltype.FuncType([rffi.INT], rffi.INT)) - callme = rffi.llexternal('callme', [FUNCPTR], rffi.INT, - compilation_info=eci) - - def callback(n): - print intmask(n) - return n - - def main(argv): - revdb.stop_point() - print intmask(callme(callback)) - revdb.stop_point() - return 9 - - return main - - -class TestRecording(BaseRecordingTests): - - def test_callback_simple(self): - eci = ExternalCompilationInfo(separate_module_sources=[''' - int callme(int(*cb)(int)) { - return cb(40) * cb(3); - } - int callmesimple(void) { - return 55555; - } - '''], post_include_bits=[''' - int callme(int(*)(int)); - int callmesimple(void); - ''']) - FUNCPTR = lltype.Ptr(lltype.FuncType([rffi.INT], rffi.INT)) - callme = rffi.llexternal('callme', [FUNCPTR], rffi.INT, - compilation_info=eci) - callmesimple = rffi.llexternal('callmesimple', [], rffi.INT, - compilation_info=eci) - - def callback(n): - return intmask(n) * 100 - - def main(argv): - print intmask(callmesimple()) - print intmask(callme(callback)) - return 9 - self.compile(main, backendopt=False) - out = self.run('Xx') - rdb = self.fetch_rdb([self.exename, 'Xx']) - rdb.gil_release() - rdb.same_stack() # callmesimple() - x = rdb.next('i'); assert x == 55555 - rdb.gil_acquire() - rdb.write_call('55555\n') - rdb.gil_release() - b = rdb.next('!h'); assert 300 <= b < 310 # -> callback - x = rdb.next('i'); assert x == 40 # arg n - rdb.gil_acquire() - rdb.gil_release() - x = rdb.next('!h'); assert x == b # -> callback - x = rdb.next('i'); assert x == 3 # arg n - rdb.gil_acquire() - rdb.gil_release() - rdb.same_stack() # <- return in main thread - x = rdb.next('i'); assert x == 4000 * 300 # return from callme() - rdb.gil_acquire() - rdb.write_call('%s\n' % (4000 * 300,)) - x = rdb.next('q'); assert x == 0 # number of stop points - assert rdb.done() - - def test_callback_with_effects(self): - main = get_callback_demo() - self.compile(main, backendopt=False) - out = self.run('Xx') - rdb = self.fetch_rdb([self.exename, 'Xx']) - rdb.gil_release() - b = rdb.next('!h'); assert 300 <= b < 310 # -> callback - x = rdb.next('i'); assert x == 40 # arg n - rdb.gil_acquire() - rdb.write_call('40\n') - rdb.gil_release() - x = rdb.next('!h'); assert x == b # -> callback again - x = rdb.next('i'); assert x == 3 # arg n - rdb.gil_acquire() - rdb.write_call('3\n') - rdb.gil_release() - rdb.same_stack() # -> return in main thread - x = rdb.next('i'); assert x == 120 # <- return from callme() - rdb.gil_acquire() - rdb.write_call('120\n') - x = rdb.next('q'); assert x == 2 # number of stop points - assert rdb.done() - - -class TestReplayingCallback(InteractiveTests): - expected_stop_points = 2 - - def setup_class(cls): - from rpython.translator.revdb.test.test_basic import compile, run - main = get_callback_demo() - compile(cls, main, backendopt=False) - run(cls, '') - - def test_replaying_callback(self): - child = self.replay() - child.send(Message(CMD_FORWARD, 3)) - child.expect(ANSWER_AT_END) diff --git a/rpython/translator/revdb/test/test_process.py b/rpython/translator/revdb/test/test_process.py deleted file mode 100644 --- a/rpython/translator/revdb/test/test_process.py +++ /dev/null @@ -1,237 +0,0 @@ -import py, sys, math, os, subprocess, time -from cStringIO import StringIO -from rpython.rlib import revdb, rdtoa -from rpython.rlib.debug import debug_print, ll_assert -from rpython.rtyper.annlowlevel import cast_gcref_to_instance -from rpython.translator.revdb.message import * -from rpython.translator.revdb.process import ReplayProcessGroup, Breakpoint - -from hypothesis import given, strategies - - -class stdout_capture(object): - def __enter__(self): - self.old_stdout = sys.stdout - sys.stdout = self.buffer = StringIO() - return self.buffer - def __exit__(self, *args): - sys.stdout = self.old_stdout - - -class TestReplayProcessGroup: - - def setup_class(cls): - from rpython.translator.revdb.test.test_basic import compile, run - - class Stuff: - pass - - class DBState: - break_loop = -2 - stuff = None - metavar = None - printed_stuff = None - watch_future = -1 - dbstate = DBState() - - def blip(cmd, extra): - debug_print('<<<', cmd.c_cmd, cmd.c_arg1, - cmd.c_arg2, cmd.c_arg3, extra, '>>>') - if extra == 'set-breakpoint': - dbstate.break_loop = cmd.c_arg1 - revdb.send_answer(42, cmd.c_cmd, -43, -44, extra) - lambda_blip = lambda: blip - - def command_print(cmd, extra): - if extra == 'print-me': - stuff = dbstate.stuff - elif extra == '$0': - stuff = dbstate.metavar - elif extra == '2.35': - val = rdtoa.strtod('2.35') - valx, valy = math.modf(val) - revdb.send_output(rdtoa.dtoa(valx) + '\n') - revdb.send_output(rdtoa.dtoa(valy) + '\n') - xx, yy = math.frexp(val) - revdb.send_output(rdtoa.dtoa(xx) + '\n') - revdb.send_output('%d\n' % yy) - return - elif extra == 'very-long-loop': - i = 0 - total = 0 - while i < 2000000000: - total += revdb.flag_io_disabled() - i += 1 - revdb.send_output(str(total)) - return - else: - assert False - uid = revdb.get_unique_id(stuff) - ll_assert(uid > 0, "uid == 0") - revdb.send_nextnid(uid) # outputs '$NUM = ' - revdb.send_output('stuff\n') - dbstate.printed_stuff = stuff - lambda_print = lambda: command_print - - def command_attachid(cmd, extra): - index_metavar = cmd.c_arg1 - uid = cmd.c_arg2 - ll_assert(index_metavar == 0, "index_metavar != 0") # in this test - dbstate.metavar = dbstate.printed_stuff - if dbstate.metavar is None: - # uid not found, probably a future object - dbstate.watch_future = uid - lambda_attachid = lambda: command_attachid - - def command_allocating(uid, gcref): - stuff = cast_gcref_to_instance(Stuff, gcref) - # 'stuff' is just allocated; 'stuff.x' is not yet initialized - dbstate.printed_stuff = stuff - if dbstate.watch_future != -1: - ll_assert(dbstate.watch_future == uid, - "watch_future out of sync") - dbstate.watch_future = -1 - dbstate.metavar = stuff - lambda_allocating = lambda: command_allocating - - def command_compilewatch(cmd, expression): - revdb.send_watch("marshalled_code", ok_flag=1) - lambda_compilewatch = lambda: command_compilewatch - - def command_checkwatch(cmd, marshalled_code): - assert marshalled_code == "marshalled_code" - # check that $0 exists - if dbstate.metavar is not None: - revdb.send_watch("ok, stuff exists\n", ok_flag=1) - else: - revdb.send_watch("stuff does not exist!\n", ok_flag=0) - lambda_checkwatch = lambda: command_checkwatch - - def main(argv): - revdb.register_debug_command(100, lambda_blip) - revdb.register_debug_command(CMD_PRINT, lambda_print) - revdb.register_debug_command(CMD_ATTACHID, lambda_attachid) - revdb.register_debug_command("ALLOCATING", lambda_allocating) - revdb.register_debug_command(revdb.CMD_COMPILEWATCH, - lambda_compilewatch) - revdb.register_debug_command(revdb.CMD_CHECKWATCH, - lambda_checkwatch) - for i, op in enumerate(argv[1:]): - dbstate.stuff = Stuff() - dbstate.stuff.x = i + 1000 - if i == dbstate.break_loop or i == dbstate.break_loop + 1: - revdb.breakpoint(99) - revdb.stop_point() - print op - return 9 - compile(cls, main, backendopt=False) - assert run(cls, 'abc d ef g h i j k l m') == ( - 'abc\nd\nef\ng\nh\ni\nj\nk\nl\nm\n') - - - def test_init(self): - group = ReplayProcessGroup(str(self.exename), self.rdbname) - assert group.get_max_time() == 10 - assert group.get_next_clone_time() == 4 - - def test_forward(self): - group = ReplayProcessGroup(str(self.exename), self.rdbname) - group.go_forward(100) - assert group.get_current_time() == 10 - assert sorted(group.paused) == [1, 4, 6, 8, 9, 10] - assert group._check_current_time(10) - - @given(strategies.lists(strategies.integers(min_value=1, max_value=10))) - def test_jump_in_time(self, target_times): - group = ReplayProcessGroup(str(self.exename), self.rdbname) - for target_time in target_times: - group.jump_in_time(target_time) - group._check_current_time(target_time) - - def test_breakpoint_b(self): - group = ReplayProcessGroup(str(self.exename), self.rdbname) - group.active.send(Message(100, 6, extra='set-breakpoint')) - group.active.expect(42, 100, -43, -44, 'set-breakpoint') - group.active.expect(ANSWER_READY, 1, Ellipsis) - e = py.test.raises(Breakpoint, group.go_forward, 10, 'b') - assert e.value.time == 7 - assert e.value.nums == [99] - group._check_current_time(7) - - def test_breakpoint_r(self): - group = ReplayProcessGroup(str(self.exename), self.rdbname) - group.active.send(Message(100, 6, extra='set-breakpoint')) - group.active.expect(42, 100, -43, -44, 'set-breakpoint') - group.active.expect(ANSWER_READY, 1, Ellipsis) - e = py.test.raises(Breakpoint, group.go_forward, 10, 'r') - assert e.value.time == 7 - assert e.value.nums == [99] - group._check_current_time(10) - - def test_breakpoint_i(self): - group = ReplayProcessGroup(str(self.exename), self.rdbname) - group.active.send(Message(100, 6, extra='set-breakpoint')) - group.active.expect(42, 100, -43, -44, 'set-breakpoint') - group.active.expect(ANSWER_READY, 1, Ellipsis) - group.go_forward(10, 'i') # does not raise Breakpoint - - def test_print_cmd(self): - group = ReplayProcessGroup(str(self.exename), self.rdbname) - group.go_forward(1) - assert group.get_current_time() == 2 - with stdout_capture() as buf: - group.print_cmd('print-me') - assert buf.getvalue() == "$0 = stuff\n" - return group - - def _print_metavar(self, group): - with stdout_capture() as buf: - group.print_cmd('$0', nids=[0]) - assert buf.getvalue() == "$0 = stuff\n" - - def test_print_metavar(self): - group = self.test_print_cmd() - self._print_metavar(group) - - def test_jump_and_print_metavar(self): - group = self.test_print_cmd() - assert group.is_tainted() - group.jump_in_time(2) - self._print_metavar(group) - - def _check_watchpoint_expr(self, group, must_exist): - ok_flag, compiled_code = group.compile_watchpoint_expr("$0") - assert ok_flag == 1 - assert compiled_code == "marshalled_code" - nids = [0] - ok_flag, text = group.check_watchpoint_expr(compiled_code, nids) - print text - assert ok_flag == must_exist - - def test_check_watchpoint_expr(self): - group = self.test_print_cmd() - self._check_watchpoint_expr(group, must_exist=1) - - def test_jump_and_check_watchpoint_expr(self): - group = self.test_print_cmd() - group.jump_in_time(2) - self._check_watchpoint_expr(group, must_exist=1) - - def test_rdtoa(self): - group = ReplayProcessGroup(str(self.exename), self.rdbname) - with stdout_capture() as buf: - group.print_cmd('2.35') - assert buf.getvalue() == "0.35\n2.0\n0.5875\n2\n" - - def test_ctrl_c(self): - localdir = os.path.dirname(__file__) - args = [sys.executable, os.path.join(localdir, 'ctrl_c.py'), - '\x7f'.join(sys.path), - str(self.exename), self.rdbname] - t1 = time.time() - result = subprocess.check_output(args) - t2 = time.time() - print 'subprocess returned with captured stdout:\n%r' % (result,) - assert result == 'all ok\n' - # should take two times ~0.8 seconds if correctly interrupted - assert t2 - t1 < 3.0 diff --git a/rpython/translator/revdb/test/test_raw.py b/rpython/translator/revdb/test/test_raw.py deleted file mode 100644 --- a/rpython/translator/revdb/test/test_raw.py +++ /dev/null @@ -1,88 +0,0 @@ -import os, subprocess -from rpython.rlib import revdb -from rpython.rtyper.lltypesystem import lltype -from rpython.translator.revdb.test.test_basic import InteractiveTests - -from rpython.translator.revdb.message import * - - -class TestReplayingRaw(InteractiveTests): - expected_stop_points = 1 - - def setup_class(cls): - from rpython.translator.revdb.test.test_basic import compile, run - from rpython.translator.revdb.test.test_basic import fetch_rdb - - FOO = lltype.Struct('FOO') - foo = lltype.malloc(FOO, flavor='raw', immortal=True) - - BAR = lltype.Struct('BAR', ('p', lltype.Ptr(FOO))) - bar = lltype.malloc(BAR, flavor='raw', immortal=True) - bar.p = foo - - BAZ = lltype.Struct('BAZ', ('p', lltype.Ptr(FOO)), ('q', lltype.Signed), - hints={'union': True}) - baz = lltype.malloc(BAZ, flavor='raw', immortal=True) - baz.p = foo - - VBAR = lltype.Array(lltype.Ptr(FOO)) - vbar = lltype.malloc(VBAR, 3, flavor='raw', immortal=True) - vbar[0] = vbar[1] = vbar[2] = foo - - RECBAR = lltype.Struct('RECBAR', ('super', BAR), ('q', lltype.Ptr(FOO))) - recbar = lltype.malloc(RECBAR, flavor='raw', immortal=True) - recbar.q = foo - recbar.super.p = foo - - IBAR = lltype.Struct('IBAR', ('p', lltype.Ptr(FOO)), - hints={'static_immutable': True}) - ibar = lltype.malloc(IBAR, flavor='raw', immortal=True) - ibar.p = foo - - BARI = lltype.Struct('BARI', ('b', lltype.Ptr(IBAR))) - bari = lltype.malloc(BARI, flavor='raw', immortal=True) - bari.b = ibar - - class X: - pass - x = X() - x.foo = foo - x.ibar = ibar - x.bari = bari - - def main(argv): - assert bar.p == foo - assert baz.p == foo - for i in range(3): - assert vbar[i] == foo - assert recbar.q == foo - assert recbar.super.p == foo - assert ibar.p == foo - assert bari.b == ibar - assert x.foo == foo - assert x.ibar == ibar - assert x.bari == bari - revdb.stop_point() - return 9 - - compile(cls, main, backendopt=False, shared=True) - run(cls, '') - rdb = fetch_rdb(cls, [cls.exename]) - #assert len(rdb.rdb_struct) >= 4 - - def test_replaying_raw(self): - # This tiny test seems to always have foo at the same address - # in multiple runs. Here we recompile with different options - # just to change that address. - # - # NOTE: not supported right now! The executable must be - # exactly the same one with the same raw addresses. This - # might be fixed in the future. - #subprocess.check_call(["make", "clean"], - # cwd=os.path.dirname(str(self.exename))) - #subprocess.check_call(["make", "lldebug"], - # cwd=os.path.dirname(str(self.exename))) - # - child = self.replay() - child.send(Message(CMD_FORWARD, 2)) - child.expect(ANSWER_AT_END) diff --git a/rpython/translator/revdb/test/test_rawrefcount.py b/rpython/translator/revdb/test/test_rawrefcount.py deleted file mode 100644 --- a/rpython/translator/revdb/test/test_rawrefcount.py +++ /dev/null @@ -1,61 +0,0 @@ -from rpython.rlib import objectmodel, rgc, revdb -from rpython.rtyper.lltypesystem import lltype -from rpython.translator.revdb.test.test_basic import InteractiveTests -from rpython.translator.revdb.test.test_basic import compile, fetch_rdb, run -from rpython.translator.revdb.message import * - -from rpython.rlib import rawrefcount - - -class TestRawRefcount(InteractiveTests): - expected_stop_points = 27 - - def setup_class(cls): - class W_Root(object): - def __init__(self, n): - self.n = n - PyObjectS = lltype.Struct('PyObjectS', - ('c_ob_refcnt', lltype.Signed), - ('c_ob_pypy_link', lltype.Signed)) - PyObject = lltype.Ptr(PyObjectS) - w1 = W_Root(-42) - ob1 = lltype.malloc(PyObjectS, flavor='raw', zero=True, - immortal=True) - ob1.c_ob_refcnt = rawrefcount.REFCNT_FROM_PYPY - - def main(argv): - rawrefcount.create_link_pypy(w1, ob1) - w = None - ob = lltype.nullptr(PyObjectS) - oblist = [] - for op in argv[1:]: - revdb.stop_point() - w = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) - ob.c_ob_refcnt = rawrefcount.REFCNT_FROM_PYPY - rawrefcount.create_link_pypy(w, ob) - oblist.append(ob) - del oblist[-1] - # - rgc.collect() - assert rawrefcount.from_obj(PyObject, w) == ob - assert rawrefcount.to_obj(W_Root, ob) == w - while True: - ob = rawrefcount.next_dead(PyObject) - if not ob: - break - assert ob in oblist - oblist.remove(ob) - objectmodel.keepalive_until_here(w) - revdb.stop_point() - return 9 - compile(cls, main, backendopt=False) - ARGS26 = 'a b c d e f g h i j k l m n o p q r s t u v w x y z' - run(cls, ARGS26) - rdb = fetch_rdb(cls, [cls.exename] + ARGS26.split()) - assert rdb.number_of_stop_points() == cls.expected_stop_points - - def test_go(self): - child = self.replay() - child.send(Message(CMD_FORWARD, 50)) - child.expect(ANSWER_AT_END) diff --git a/rpython/translator/revdb/test/test_thread.py b/rpython/translator/revdb/test/test_thread.py deleted file mode 100644 --- a/rpython/translator/revdb/test/test_thread.py +++ /dev/null @@ -1,213 +0,0 @@ -from rpython.translator.revdb.test.test_basic import BaseRecordingTests -from rpython.translator.revdb.test.test_basic import InteractiveTests -from rpython.rtyper.lltypesystem import rffi -from rpython.rlib import rthread -from rpython.rlib import revdb - -from rpython.translator.revdb.message import * - - -_sleep = rffi.llexternal('sleep', [rffi.UINT], rffi.UINT) - - -class TestThreadRecording(BaseRecordingTests): - - def test_thread_simple(self): - def bootstrap(): - rthread.gc_thread_start() - _sleep(1) - print "BB" - _sleep(2) - print "BBB" - rthread.gc_thread_die() - - def main(argv): - print "A" - rthread.start_new_thread(bootstrap, ()) - for i in range(2): - _sleep(2) - print "AAAA" - return 9 - - self.compile(main, backendopt=False, thread=True) - out = self.run('Xx') - # should have printed A, BB, AAAA, BBB, AAAA - rdb = self.fetch_rdb([self.exename, 'Xx']) - th_A = rdb.main_thread_id - rdb.write_call("A\n") - rdb.same_stack() # RPyGilAllocate() - rdb.gil_release() - - th_B = rdb.switch_thread() - assert th_B != th_A - b = rdb.next('!h'); assert 300 <= b < 310 # "callback": start thread - rdb.gil_acquire() - rdb.gil_release() - - rdb.switch_thread(th_A) - rdb.same_stack() # start_new_thread returns - x = rdb.next(); assert x == th_B # result is the 'th_B' id - rdb.gil_acquire() - rdb.gil_release() - - rdb.switch_thread(th_B) - rdb.same_stack() # sleep() (finishes here) - rdb.next('i') # sleep() - rdb.gil_acquire() - rdb.write_call("BB\n") - rdb.gil_release() - - rdb.switch_thread(th_A) - rdb.same_stack() # sleep() - rdb.next('i') # sleep() - rdb.gil_acquire() - rdb.write_call("AAAA\n") - rdb.gil_release() - - rdb.switch_thread(th_B) - rdb.same_stack() # sleep() - rdb.next('i') # sleep() - rdb.gil_acquire() - rdb.write_call("BBB\n") - rdb.gil_release() - - rdb.switch_thread(th_A) - rdb.same_stack() # sleep() - rdb.next('i') # sleep() - rdb.gil_acquire() - rdb.write_call("AAAA\n") - rdb.done() - - def test_threadlocal(self): - class EC(object): - def __init__(self, value): - self.value = value - raw_thread_local = rthread.ThreadLocalReference(EC) - - def bootstrap(): - rthread.gc_thread_start() - _sleep(1) - ec = EC(4567) - raw_thread_local.set(ec) - print raw_thread_local.get().value - assert raw_thread_local.get() is ec - rthread.gc_thread_die() - - def main(argv): - ec = EC(12) - raw_thread_local.set(ec) - rthread.start_new_thread(bootstrap, ()) - _sleep(2) - print raw_thread_local.get().value - assert raw_thread_local.get() is ec - return 9 - - self.compile(main, backendopt=False, thread=True) - out = self.run('Xx') - # should have printed 4567 and 12 - rdb = self.fetch_rdb([self.exename, 'Xx']) - th_A = rdb.main_thread_id - rdb.same_stack() # RPyGilAllocate() - rdb.gil_release() - - th_B = rdb.switch_thread() - assert th_B != th_A - b = rdb.next('!h'); assert 300 <= b < 310 # "callback": start thread - rdb.gil_acquire() - rdb.gil_release() - - rdb.switch_thread(th_A) - rdb.same_stack() # start_new_thread returns - x = rdb.next(); assert x == th_B # result is the 'th_B' id - rdb.gil_acquire() - rdb.gil_release() - - rdb.switch_thread(th_B) - rdb.same_stack() # sleep() (finishes here) - rdb.next('i') # sleep() - rdb.gil_acquire() - rdb.write_call("4567\n") - rdb.gil_release() - - rdb.switch_thread(th_A) - rdb.same_stack() # sleep() - rdb.next('i') # sleep() - rdb.gil_acquire() - rdb.write_call("12\n") - rdb.done() - - -class TestThreadInteractive(InteractiveTests): - expected_stop_points = 5 - - def setup_class(cls): - from rpython.translator.revdb.test.test_basic import compile, run - def bootstrap(): - rthread.gc_thread_start() - _sleep(1) - revdb.stop_point() - _sleep(2) - revdb.stop_point() - rthread.gc_thread_die() - - def main(argv): - revdb.stop_point() - rthread.start_new_thread(bootstrap, ()) - for i in range(2): - _sleep(2) - revdb.stop_point() - print "ok" - return 9 - - compile(cls, main, backendopt=False, thread=True) - assert run(cls, '') == 'ok\n' - - def test_go(self): - child = self.replay() - for i in range(2, 6): - child.send(Message(CMD_FORWARD, 1)) - child.expect(ANSWER_READY, i, Ellipsis, - (i & 1) ^ 1) # thread number: either 0 or 1 here - child.send(Message(CMD_FORWARD, 1)) - child.expect(ANSWER_AT_END) - - -class TestThreadLocal(InteractiveTests): - expected_stop_points = 2 - - def setup_class(cls): - from rpython.translator.revdb.test.test_basic import compile, run - class EC(object): - def __init__(self, value): - self.value = value - raw_thread_local = rthread.ThreadLocalReference(EC) - - def bootstrap(): - rthread.gc_thread_start() - _sleep(1) - ec = EC(4567) - raw_thread_local.set(ec) - revdb.stop_point() - print raw_thread_local.get().value - assert raw_thread_local.get() is ec - rthread.gc_thread_die() - - def main(argv): - revdb.stop_point() - ec = EC(12) - raw_thread_local.set(ec) - rthread.start_new_thread(bootstrap, ()) - _sleep(2) - print raw_thread_local.get().value - assert raw_thread_local.get() is ec - return 9 - - compile(cls, main, backendopt=False, thread=True) - assert run(cls, '') == '4567\n12\n' - - def test_go_threadlocal(self): - child = self.replay() - child.send(Message(CMD_FORWARD, 1)) - child.expect(ANSWER_READY, 2, Ellipsis, 1) - child.send(Message(CMD_FORWARD, 1)) - child.expect(ANSWER_AT_END) diff --git a/rpython/translator/revdb/test/test_weak.py b/rpython/translator/revdb/test/test_weak.py deleted file mode 100644 --- a/rpython/translator/revdb/test/test_weak.py +++ /dev/null @@ -1,357 +0,0 @@ -import py, weakref -from rpython.rlib import revdb, rgc -from rpython.rlib.debug import debug_print -from rpython.rlib.objectmodel import keepalive_until_here -from rpython.rlib.rarithmetic import intmask -from rpython.translator.revdb.message import * -from rpython.translator.revdb.test.test_basic import BaseRecordingTests -from rpython.translator.revdb.test.test_basic import InteractiveTests - - -# Weakrefs: implemented so that the log file contains one byte -# "afterwards_alive" or "afterwards_dead" at the point where the -# weakref is created, and similarly one such byte at every point where -# the weakref is dereferenced. At every point, the weakref is _alive_ -# at the moment; but the byte tells whether it should stay alive until -# the _next_ point or not. Done by always emitting "afterwards_dead" -# in the log, and patching that to "afterwards_alive" if later we find -# a deref() where the weakref is still alive. (If a deref() finds the -# weakref dead, it doesn't do any recording or patching; it simply -# leaves the previous already-written "afterwards_dead" byte.) - - -WEAKREF_AFTERWARDS_DEAD = chr(0xf2) -WEAKREF_AFTERWARDS_ALIVE = chr(0xeb) - -ASYNC_FINALIZER_TRIGGER = 0xff46 - 2**16 - - -def get_finalizer_queue_main(): - from rpython.rtyper.lltypesystem import lltype, rffi - # - from rpython.translator.tool.cbuild import ExternalCompilationInfo - eci = ExternalCompilationInfo( - pre_include_bits=["#define foobar(x) x\n"]) - foobar = rffi.llexternal('foobar', [lltype.Signed], lltype.Signed, - compilation_info=eci) - class Glob: - pass - glob = Glob() - class X: - pass - class MyFinalizerQueue(rgc.FinalizerQueue): - Class = X - def finalizer_trigger(self): - glob.ping = True - fq = MyFinalizerQueue() - # - def main(argv): - glob.ping = False - lst1 = [X() for i in range(256)] - lst = [X() for i in range(3000)] - for i, x in enumerate(lst): - x.baz = i - fq.register_finalizer(x) - for i in range(3000): - lst[i] = None - if i % 300 == 150: - rgc.collect() - revdb.stop_point() - j = i + glob.ping * 1000000 - assert foobar(j) == j - if glob.ping: - glob.ping = False - total = 0 - while True: - x = fq.next_dead() - if x is None: - break - total = intmask(total * 3 + x.baz) - assert foobar(total) == total - keepalive_until_here(lst1) - return 9 - return main - -def get_old_style_finalizer_main(): - from rpython.rtyper.lltypesystem import lltype, rffi - from rpython.translator.tool.cbuild import ExternalCompilationInfo - # - eci = ExternalCompilationInfo( - pre_include_bits=["#define foobar(x) x\n"]) - foobar = rffi.llexternal('foobar', [lltype.Signed], lltype.Signed, - compilation_info=eci, _nowrapper=True) - class Glob: - pass - glob = Glob() - class X: - def __del__(self): - assert foobar(-7) == -7 - glob.count += 1 - def main(argv): - glob.count = 0 - lst = [X() for i in range(3000)] - x = -1 - for i in range(3000): - lst[i] = None - if i % 300 == 150: - rgc.collect() - revdb.stop_point() - x = glob.count - assert foobar(x) == x - print x - return 9 - return main - - -class TestRecording(BaseRecordingTests): - - def test_weakref_create(self): - class X: - pass - class Glob: - pass - glob = Glob() - def main(argv): - glob.r1 = weakref.ref(X()) - glob.r2 = weakref.ref(X()) - glob.r3 = weakref.ref(X()) - return 9 - self.compile(main, backendopt=False) - out = self.run('Xx') - rdb = self.fetch_rdb([self.exename, 'Xx']) - # find the extra WEAKREF_DEAD - x = rdb.next('c'); assert x == WEAKREF_AFTERWARDS_DEAD - x = rdb.next('c'); assert x == WEAKREF_AFTERWARDS_DEAD - x = rdb.next('c'); assert x == WEAKREF_AFTERWARDS_DEAD - x = rdb.next('q'); assert x == 0 # number of stop points - assert rdb.done() - - def test_weakref_deref_nondead(self): - class X: - pass - class Glob: - pass - glob = Glob() - def main(argv): - x1 = X(); x2 = X() - r1 = weakref.ref(x1) # (*) - r2 = weakref.ref(x2) # (*) - for i in range(8500): - assert r1() is x1 # (*) - assert r2() is x2 # (*) - return 9 - self.compile(main, backendopt=False) - out = self.run('Xx') - rdb = self.fetch_rdb([self.exename, 'Xx']) - # find the 2 + 16998 first WEAKREF_xxx (all "(*)" but the last two) - for i in range(2 + 16998): - x = rdb.next('c'); assert x == WEAKREF_AFTERWARDS_ALIVE - for i in range(2): - x = rdb.next('c'); assert x == WEAKREF_AFTERWARDS_DEAD - x = rdb.next('q'); assert x == 0 # number of stop points - assert rdb.done() - - def test_prebuilt_weakref(self): - class X: - pass - x1 = X() - x1.foobar = 9 - wr = weakref.ref(x1) - def main(argv): - X().foobar = 43 - return wr().foobar - self.compile(main, backendopt=False) - out = self.run('Xx') - rdb = self.fetch_rdb([self.exename, 'Xx']) - # the weakref is prebuilt, so doesn't generate any WEAKREF_xxx - x = rdb.next('q'); assert x == 0 # number of stop points - assert rdb.done() - - def test_finalizer_light_ignored(self): - py.test.skip("lightweight finalizers could be skipped, but that " - "requires also skipping (instead of recording) any " - "external call they do") - class X: - @rgc.must_be_light_finalizer - def __del__(self): - pass - def main(argv): - lst = [X() for i in range(3000)] - for i in range(3000): - lst[i] = None - if i % 300 == 150: - rgc.collect() - revdb.stop_point() - return 9 - self.compile(main, backendopt=False) - out = self.run('Xx') - rdb = self.fetch_rdb([self.exename, 'Xx']) - x = rdb.next('q'); assert x == 3000 # number of stop points - assert rdb.done() - - def test_finalizer_queue(self): - main = get_finalizer_queue_main() - self.compile(main, backendopt=False) - out = self.run('Xx') - rdb = self.fetch_rdb([self.exename, 'Xx']) - uid_seen = set() - totals = [] - for i in range(3000): - triggered = False - if rdb.is_special_packet(): - time, = rdb.special_packet(ASYNC_FINALIZER_TRIGGER, 'q') - assert time == i + 1 - y = intmask(rdb.next('q')); assert y == -1 - triggered = True - rdb.gil_release() - rdb.same_stack() # - j = rdb.next() # call to foobar() - rdb.gil_acquire() - assert j == i + 1000000 * triggered - if triggered: - lst = [] - while True: - uid = intmask(rdb.next()) - if uid == -1: - break - assert uid > 0 and uid not in uid_seen - uid_seen.add(uid) - lst.append(uid) - rdb.gil_release() - rdb.same_stack() # - totals.append((lst, intmask(rdb.next()))) # call to foobar() - rdb.gil_acquire() - x = rdb.next('q'); assert x == 3000 # number of stop points - # - assert 1500 <= len(uid_seen) <= 3000 - d = dict(zip(sorted(uid_seen), range(len(uid_seen)))) - for lst, expected in totals: - total = 0 - for uid in lst: - total = intmask(total * 3 + d[uid]) - assert total == expected - - def test_old_style_finalizer(self): - main = get_old_style_finalizer_main() - self.compile(main, backendopt=False) - out = self.run('Xx') - assert 1500 < int(out) <= 3000 - rdb = self.fetch_rdb([self.exename, 'Xx']) - seen_uids = set() - for i in range(3000): - triggered = False - if rdb.is_special_packet(): - time, = rdb.special_packet(ASYNC_FINALIZER_TRIGGER, 'q') - assert time == i + 1 - triggered = True - x = intmask(rdb.next()) - while True: - assert x != -1 - assert x not in seen_uids - seen_uids.add(x) - rdb.same_stack() - y = intmask(rdb.next()) - assert y == -7 # from the __del__ - x = intmask(rdb.next()) - if x == -1: - break - rdb.same_stack() - x = rdb.next() - assert x == len(seen_uids) - assert len(seen_uids) == int(out) - rdb.write_call(out) - x = rdb.next('q'); assert x == 3000 # number of stop points - - -class TestReplayingWeakref(InteractiveTests): - expected_stop_points = 1 - - def setup_class(cls): - from rpython.translator.revdb.test.test_basic import compile, run - - class X: - def __init__(self, s): - self.s = s - prebuilt = X('prebuilt') - - def make(s): - lst = [prebuilt] + [X(c) for c in s] - keepalive = lst[-1] - return [weakref.ref(x) for x in lst], keepalive - - def main(argv): - lst, keepalive = make(argv[0]) - expected = ['prebuilt'] + [c for c in argv[0]] - dead = [False] * len(lst) - for j in range(17000): - outp = [] - for i in range(len(lst)): - v = lst[i]() - debug_print(v) - if dead[i]: - assert v is None - elif v is None: - outp.append('') - dead[i] = True - else: - outp.append(v.s) - assert v.s == expected[i] - print ''.join(outp) - if (j % 1000) == 999: - debug_print('============= COLLECT ===========') - rgc.collect() - debug_print('------ done', j, '.') - assert not dead[0] - assert not dead[-1] - keepalive_until_here(keepalive) - revdb.stop_point() - return 9 - compile(cls, main, backendopt=False) - output = run(cls, '') - lines = output.splitlines() - assert lines[-1].startswith('prebuilt') and lines[-1].endswith( - str(cls.exename)[-1]) - assert (len(lines[-1]) + output.count('') == - len('prebuilt') + len(str(cls.exename))) - - def test_replaying_weakref(self): - child = self.replay() - # the asserts are replayed; if we get here it means they passed again - child.send(Message(CMD_FORWARD, 1)) - child.expect(ANSWER_AT_END) - - -class TestReplayingFinalizerQueue(InteractiveTests): - expected_stop_points = 3000 - - def setup_class(cls): - from rpython.translator.revdb.test.test_basic import compile, run - main = get_finalizer_queue_main() - compile(cls, main, backendopt=False) - run(cls, '') - - def test_replaying_finalizer_queue(self): - child = self.replay() - child.send(Message(CMD_FORWARD, 3001)) - child.expect(ANSWER_AT_END) - - -class TestReplayingOldStyleFinalizer(InteractiveTests): - expected_stop_points = 3000 - - def setup_class(cls): - from rpython.translator.revdb.test.test_basic import compile, run - main = get_old_style_finalizer_main() - compile(cls, main, backendopt=False) - run(cls, '') - - def test_replaying_old_style_finalizer(self): - child = self.replay() - child.send(Message(CMD_FORWARD, 3001)) - child.expect(ANSWER_AT_END) - - def test_bug1(self): - child = self.replay() - for i in range(50): - child.send(Message(CMD_FORWARD, i)) - child.expect_ready() From pypy.commits at gmail.com Fri Sep 9 05:01:59 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 09 Sep 2016 02:01:59 -0700 (PDT) Subject: [pypy-commit] pypy reverse-debugger: Disable micronumpy as well, for now Message-ID: <57d27a87.e440c20a.881ca.b489@mx.google.com> Author: Armin Rigo Branch: reverse-debugger Changeset: r86970:dcba9f2816ab Date: 2016-09-09 11:01 +0200 http://bitbucket.org/pypy/pypy/changeset/dcba9f2816ab/ Log: Disable micronumpy as well, for now diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -57,6 +57,7 @@ reverse_debugger_disable_modules = set([ "_continuation", "_vmprof", "_multiprocessing", + "micronumpy", ]) # XXX this should move somewhere else, maybe to platform ("is this posixish" From pypy.commits at gmail.com Fri Sep 9 05:04:13 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 09 Sep 2016 02:04:13 -0700 (PDT) Subject: [pypy-commit] pypy reverse-debugger: Rename the env var REVDB Message-ID: <57d27b0d.255ac20a.7ccce.b2fa@mx.google.com> Author: Armin Rigo Branch: reverse-debugger Changeset: r86971:4909c06daf41 Date: 2016-09-09 11:03 +0200 http://bitbucket.org/pypy/pypy/changeset/4909c06daf41/ Log: Rename the env var REVDB diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -200,7 +200,7 @@ skip the caching logic inside getter methods or properties, to make them usable from - watchpoints. Note that you need to re-run ``PYPYRDB=.. pypy'' + watchpoints. Note that you need to re-run ``REVDB=.. pypy'' after changing the Python code. """ return space.wrap(space._side_effects_ok()) diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c --- a/rpython/translator/revdb/src-revdb/revdb.c +++ b/rpython/translator/revdb/src-revdb/revdb.c @@ -91,7 +91,7 @@ Potentially buggy to use argv[0] here, but good enough I suppose. For this reason ensure_fixed_address_space() is - not called when running manually without any PYPYRDB + not called when running manually without any REVDB environment variable set. */ execv(argv[0], argv); @@ -207,7 +207,7 @@ static void setup_record_mode(int argc, char *argv[]) { - char *filename = getenv("PYPYRDB"); + char *filename = getenv("REVDB"); rdb_header_t h; int i; @@ -216,11 +216,11 @@ if (filename && *filename) { ensure_fixed_address_space(argv); - putenv("PYPYRDB="); + putenv("REVDB="); rpy_rev_fileno = open(filename, O_RDWR | O_CLOEXEC | O_CREAT | O_NOCTTY | O_TRUNC, 0600); if (rpy_rev_fileno < 0) { - fprintf(stderr, "Fatal error: can't create PYPYRDB file '%s'\n", + fprintf(stderr, "Fatal error: can't create REVDB file '%s'\n", filename); abort(); } @@ -250,7 +250,7 @@ } else { fprintf(stderr, "PID %d starting, log file disabled " - "(use PYPYRDB=logfile)\n", (int)getpid()); + "(use REVDB=logfile)\n", (int)getpid()); } rpy_revdb.buf_p = rpy_rev_buffer + sizeof(int16_t); From pypy.commits at gmail.com Fri Sep 9 08:40:35 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 09 Sep 2016 05:40:35 -0700 (PDT) Subject: [pypy-commit] pypy default: test, fix Py_buffer format, which can be a string (issue #2396 and IRC discussion) Message-ID: <57d2adc3.a4fdc20a.9fe80.12fe@mx.google.com> Author: Matti Picus Branch: Changeset: r86972:fe0add22fd7e Date: 2016-09-09 12:37 +0300 http://bitbucket.org/pypy/pypy/changeset/fe0add22fd7e/ Log: test, fix Py_buffer format, which can be a string (issue #2396 and IRC discussion) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -119,7 +119,7 @@ constant_names = """ Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER -METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE +METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE Py_MAX_FMT METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HAVE_INPLACEOPS Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS Py_TPFLAGS_HAVE_NEWBUFFER Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES Py_MAX_NDIMS @@ -645,7 +645,7 @@ ('format', rffi.CCHARP), ('shape', Py_ssize_tP), ('strides', Py_ssize_tP), - ('_format', rffi.UCHAR), + ('_format', rffi.CFixedArray(rffi.UCHAR, Py_MAX_FMT)), ('_shape', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), ('_strides', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), ('suboffsets', Py_ssize_tP), diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -144,6 +144,7 @@ /* Py3k buffer interface, adapted for PyPy */ #define Py_MAX_NDIMS 32 +#define Py_MAX_FMT 5 typedef struct bufferinfo { void *buf; PyObject *obj; /* owned reference */ @@ -158,7 +159,7 @@ Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; /* alway NULL for app-level objects*/ - unsigned char _format; + unsigned char _format[Py_MAX_FMT]; Py_ssize_t _strides[Py_MAX_NDIMS]; Py_ssize_t _shape[Py_MAX_NDIMS]; /* static store for shape and strides of diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,5 +1,5 @@ from pypy.module.cpyext.api import (cpython_api, Py_buffer, CANNOT_FAIL, - Py_MAX_NDIMS, build_type_checkers, Py_ssize_tP) + Py_MAX_FMT, Py_MAX_NDIMS, build_type_checkers, Py_ssize_tP) from pypy.module.cpyext.pyobject import PyObject, make_ref, incref from rpython.rtyper.lltypesystem import lltype, rffi from pypy.objspace.std.memoryobject import W_MemoryView @@ -41,10 +41,22 @@ view.c_len = w_obj.getlength() view.c_itemsize = w_obj.buf.getitemsize() rffi.setintfield(view, 'c_ndim', ndim) - view.c__format = rffi.cast(rffi.UCHAR, w_obj.buf.getformat()) view.c_format = rffi.cast(rffi.CCHARP, view.c__format) view.c_shape = rffi.cast(Py_ssize_tP, view.c__shape) view.c_strides = rffi.cast(Py_ssize_tP, view.c__strides) + fmt = w_obj.buf.getformat() + n = Py_MAX_FMT - 1 # NULL terminated buffer + if len(fmt) > n: + ### WARN? + pass + else: + n = len(fmt) + for i in range(n): + if ord(fmt[i]) > 255: + view.c_format[i] = '*' + else: + view.c_format[i] = fmt[i] + view.c_format[n] = '\x00' shape = w_obj.buf.getshape() strides = w_obj.buf.getstrides() for i in range(ndim): diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -1,5 +1,5 @@ import py, pytest -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem import lltype from pypy.interpreter.baseobjspace import W_Root from pypy.module.cpyext.state import State from pypy.module.cpyext import api diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py --- a/pypy/module/cpyext/test/test_bufferobject.py +++ b/pypy/module/cpyext/test/test_bufferobject.py @@ -1,4 +1,4 @@ -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem import lltype from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.api import PyObject diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -1,3 +1,4 @@ +from rpython.rtyper.lltypesystem import rffi from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rlib.buffer import StringBuffer @@ -16,8 +17,12 @@ w_buf = space.newbuffer(StringBuffer("hello")) w_memoryview = api.PyMemoryView_FromObject(w_buf) w_view = api.PyMemoryView_GET_BUFFER(w_memoryview) - ndim = w_view.c_ndim - assert ndim == 1 + assert w_view.c_ndim == 1 + f = rffi.charp2str(w_view.c_format) + assert f == 'B' + assert w_view.c_shape[0] == 5 + assert w_view.c_strides[0] == 1 + assert w_view.c_len == 5 class AppTestBufferProtocol(AppTestCpythonExtensionBase): def test_buffer_protocol(self): From pypy.commits at gmail.com Fri Sep 9 08:40:37 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 09 Sep 2016 05:40:37 -0700 (PDT) Subject: [pypy-commit] pypy default: move all Py_buffer to memoryobject, create helper function Message-ID: <57d2adc5.6937c20a.1be9a.14a2@mx.google.com> Author: Matti Picus Branch: Changeset: r86973:f8f2080418c8 Date: 2016-09-09 13:59 +0300 http://bitbucket.org/pypy/pypy/changeset/f8f2080418c8/ Log: move all Py_buffer to memoryobject, create helper function diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,9 +1,7 @@ -from pypy.interpreter.error import oefmt -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib.rarithmetic import widen +from rpython.rtyper.lltypesystem import rffi from pypy.module.cpyext.api import ( - cpython_api, CANNOT_FAIL, Py_buffer, Py_TPFLAGS_HAVE_NEWBUFFER, Py_ssize_tP) -from pypy.module.cpyext.pyobject import PyObject, make_ref, incref + cpython_api, CANNOT_FAIL, Py_TPFLAGS_HAVE_NEWBUFFER) +from pypy.module.cpyext.pyobject import PyObject @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def PyObject_CheckBuffer(space, pyobj): @@ -14,102 +12,4 @@ return 1 return 0 - at cpython_api([PyObject, lltype.Ptr(Py_buffer), rffi.INT_real], - rffi.INT_real, error=-1) -def PyObject_GetBuffer(space, w_obj, view, flags): - """Export obj into a Py_buffer, view. These arguments must - never be NULL. The flags argument is a bit field indicating what - kind of buffer the caller is prepared to deal with and therefore what - kind of buffer the exporter is allowed to return. The buffer interface - allows for complicated memory sharing possibilities, but some caller may - not be able to handle all the complexity but may want to see if the - exporter will let them take a simpler view to its memory. - - Some exporters may not be able to share memory in every possible way and - may need to raise errors to signal to some consumers that something is - just not possible. These errors should be a BufferError unless - there is another error that is actually causing the problem. The - exporter can use flags information to simplify how much of the - Py_buffer structure is filled in with non-default values and/or - raise an error if the object can't support a simpler view of its memory. - - 0 is returned on success and -1 on error.""" - flags = widen(flags) - buf = space.buffer_w(w_obj, flags) - try: - view.c_buf = rffi.cast(rffi.VOIDP, buf.get_raw_address()) - except ValueError: - raise BufferError("could not create buffer from object") - view.c_len = buf.getlength() - view.c_obj = make_ref(space, w_obj) - ndim = buf.getndim() - view.c_itemsize = buf.getitemsize() - rffi.setintfield(view, 'c_readonly', int(buf.readonly)) - rffi.setintfield(view, 'c_ndim', ndim) - view.c_format = rffi.str2charp(buf.getformat()) - view.c_shape = lltype.malloc(Py_ssize_tP.TO, ndim, flavor='raw') - view.c_strides = lltype.malloc(Py_ssize_tP.TO, ndim, flavor='raw') - shape = buf.getshape() - strides = buf.getstrides() - for i in range(ndim): - view.c_shape[i] = shape[i] - view.c_strides[i] = strides[i] - view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO) - view.c_internal = lltype.nullptr(rffi.VOIDP.TO) - return 0 - -def _IsFortranContiguous(view): - ndim = widen(view.c_ndim) - if ndim == 0: - return 1 - if not view.c_strides: - return ndim == 1 - sd = view.c_itemsize - if ndim == 1: - return view.c_shape[0] == 1 or sd == view.c_strides[0] - for i in range(view.c_ndim): - dim = view.c_shape[i] - if dim == 0: - return 1 - if view.c_strides[i] != sd: - return 0 - sd *= dim - return 1 - -def _IsCContiguous(view): - ndim = widen(view.c_ndim) - if ndim == 0: - return 1 - if not view.c_strides: - return ndim == 1 - sd = view.c_itemsize - if ndim == 1: - return view.c_shape[0] == 1 or sd == view.c_strides[0] - for i in range(ndim - 1, -1, -1): - dim = view.c_shape[i] - if dim == 0: - return 1 - if view.c_strides[i] != sd: - return 0 - sd *= dim - return 1 - - - at cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) -def PyBuffer_IsContiguous(space, view, fort): - """Return 1 if the memory defined by the view is C-style (fortran is - 'C') or Fortran-style (fortran is 'F') contiguous or either one - (fortran is 'A'). Return 0 otherwise.""" - # traverse the strides, checking for consistent stride increases from - # right-to-left (c) or left-to-right (fortran). Copied from cpython - if not view.c_suboffsets: - return 0 - if (fort == 'C'): - return _IsCContiguous(view) - elif (fort == 'F'): - return _IsFortranContiguous(view) - elif (fort == 'A'): - return (_IsCContiguous(view) or _IsFortranContiguous(view)) - return 0 - diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -2,10 +2,124 @@ Py_MAX_FMT, Py_MAX_NDIMS, build_type_checkers, Py_ssize_tP) from pypy.module.cpyext.pyobject import PyObject, make_ref, incref from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.rarithmetic import widen from pypy.objspace.std.memoryobject import W_MemoryView PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView", "w_memoryview") + at cpython_api([PyObject, lltype.Ptr(Py_buffer), rffi.INT_real], + rffi.INT_real, error=-1) +def PyObject_GetBuffer(space, w_obj, view, flags): + """Export obj into a Py_buffer, view. These arguments must + never be NULL. The flags argument is a bit field indicating what + kind of buffer the caller is prepared to deal with and therefore what + kind of buffer the exporter is allowed to return. The buffer interface + allows for complicated memory sharing possibilities, but some caller may + not be able to handle all the complexity but may want to see if the + exporter will let them take a simpler view to its memory. + + Some exporters may not be able to share memory in every possible way and + may need to raise errors to signal to some consumers that something is + just not possible. These errors should be a BufferError unless + there is another error that is actually causing the problem. The + exporter can use flags information to simplify how much of the + Py_buffer structure is filled in with non-default values and/or + raise an error if the object can't support a simpler view of its memory. + + 0 is returned on success and -1 on error.""" + flags = widen(flags) + buf = space.buffer_w(w_obj, flags) + try: + view.c_buf = rffi.cast(rffi.VOIDP, buf.get_raw_address()) + except ValueError: + raise BufferError("could not create buffer from object") + return fill_Py_buffer(space, w_obj, view, flags) + view.c_obj = make_ref(space, w_obj) + +def fill_Py_buffer(space, buf, view): + # c_buf, c_obj have been filled in + ndim = buf.getndim() + view.c_len = buf.getlength() + view.c_itemsize = buf.getitemsize() + rffi.setintfield(view, 'c_ndim', ndim) + view.c_format = rffi.cast(rffi.CCHARP, view.c__format) + view.c_shape = rffi.cast(Py_ssize_tP, view.c__shape) + view.c_strides = rffi.cast(Py_ssize_tP, view.c__strides) + fmt = buf.getformat() + n = Py_MAX_FMT - 1 # NULL terminated buffer + if len(fmt) > n: + ### WARN? + pass + else: + n = len(fmt) + for i in range(n): + if ord(fmt[i]) > 255: + view.c_format[i] = '*' + else: + view.c_format[i] = fmt[i] + view.c_format[n] = '\x00' + shape = buf.getshape() + strides = buf.getstrides() + for i in range(ndim): + view.c_shape[i] = shape[i] + view.c_strides[i] = strides[i] + view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO) + view.c_internal = lltype.nullptr(rffi.VOIDP.TO) + return 0 + +def _IsFortranContiguous(view): + ndim = widen(view.c_ndim) + if ndim == 0: + return 1 + if not view.c_strides: + return ndim == 1 + sd = view.c_itemsize + if ndim == 1: + return view.c_shape[0] == 1 or sd == view.c_strides[0] + for i in range(view.c_ndim): + dim = view.c_shape[i] + if dim == 0: + return 1 + if view.c_strides[i] != sd: + return 0 + sd *= dim + return 1 + +def _IsCContiguous(view): + ndim = widen(view.c_ndim) + if ndim == 0: + return 1 + if not view.c_strides: + return ndim == 1 + sd = view.c_itemsize + if ndim == 1: + return view.c_shape[0] == 1 or sd == view.c_strides[0] + for i in range(ndim - 1, -1, -1): + dim = view.c_shape[i] + if dim == 0: + return 1 + if view.c_strides[i] != sd: + return 0 + sd *= dim + return 1 + + at cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) +def PyBuffer_IsContiguous(space, view, fort): + """Return 1 if the memory defined by the view is C-style (fortran is + 'C') or Fortran-style (fortran is 'F') contiguous or either one + (fortran is 'A'). Return 0 otherwise.""" + # traverse the strides, checking for consistent stride increases from + # right-to-left (c) or left-to-right (fortran). Copied from cpython + if not view.c_suboffsets: + return 0 + if (fort == 'C'): + return _IsCContiguous(view) + elif (fort == 'F'): + return _IsFortranContiguous(view) + elif (fort == 'A'): + return (_IsCContiguous(view) or _IsFortranContiguous(view)) + return 0 + @cpython_api([PyObject], PyObject) def PyMemoryView_FromObject(space, w_obj): return space.call_method(space.builtin, "memoryview", w_obj) @@ -38,31 +152,6 @@ view.c_obj = make_ref(space, w_s) rffi.setintfield(view, 'c_readonly', 1) isstr = True - view.c_len = w_obj.getlength() - view.c_itemsize = w_obj.buf.getitemsize() - rffi.setintfield(view, 'c_ndim', ndim) - view.c_format = rffi.cast(rffi.CCHARP, view.c__format) - view.c_shape = rffi.cast(Py_ssize_tP, view.c__shape) - view.c_strides = rffi.cast(Py_ssize_tP, view.c__strides) - fmt = w_obj.buf.getformat() - n = Py_MAX_FMT - 1 # NULL terminated buffer - if len(fmt) > n: - ### WARN? - pass - else: - n = len(fmt) - for i in range(n): - if ord(fmt[i]) > 255: - view.c_format[i] = '*' - else: - view.c_format[i] = fmt[i] - view.c_format[n] = '\x00' - shape = w_obj.buf.getshape() - strides = w_obj.buf.getstrides() - for i in range(ndim): - view.c_shape[i] = shape[i] - view.c_strides[i] = strides[i] - view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO) - view.c_internal = lltype.nullptr(rffi.VOIDP.TO) + fill_Py_buffer(space, w_obj.buf, view) return view From pypy.commits at gmail.com Fri Sep 9 08:40:40 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 09 Sep 2016 05:40:40 -0700 (PDT) Subject: [pypy-commit] pypy default: make sure test of PyObject_GetBuffer runs to completion, fix implementation Message-ID: <57d2adc8.948e1c0a.a05af.d5e9@mx.google.com> Author: Matti Picus Branch: Changeset: r86975:dcd57351c263 Date: 2016-09-09 15:13 +0300 http://bitbucket.org/pypy/pypy/changeset/dcd57351c263/ Log: make sure test of PyObject_GetBuffer runs to completion, fix implementation diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -33,8 +33,8 @@ view.c_buf = rffi.cast(rffi.VOIDP, buf.get_raw_address()) except ValueError: raise BufferError("could not create buffer from object") - return fill_Py_buffer(space, w_obj, view, flags) view.c_obj = make_ref(space, w_obj) + return fill_Py_buffer(space, buf, view) def fill_Py_buffer(space, buf, view): # c_buf, c_obj have been filled in diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -43,14 +43,10 @@ from _numpypy import multiarray as np module = self.import_module(name='buffer_test') get_buffer_info = module.get_buffer_info - # test_export_flags from numpy test_multiarray raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',)) - # test_relaxed_strides from numpy test_multiarray - arr = np.zeros((1, 10)) - if arr.flags.f_contiguous: - shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS']) - assert strides[0] == 8 - arr = np.ones((10, 1), order='F') - shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS']) - assert strides[-1] == 8 - + arr = np.zeros((1, 10), order='F') + shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS']) + assert strides[0] == 8 + arr = np.zeros((10, 1), order='C') + shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS']) + assert strides[-1] == 8 From pypy.commits at gmail.com Fri Sep 9 08:40:39 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 09 Sep 2016 05:40:39 -0700 (PDT) Subject: [pypy-commit] pypy default: fix unintentional skip of untranslated tests Message-ID: <57d2adc7.05d71c0a.ab11f.0efb@mx.google.com> Author: Matti Picus Branch: Changeset: r86974:0f729817ac87 Date: 2016-09-09 14:24 +0300 http://bitbucket.org/pypy/pypy/changeset/0f729817ac87/ Log: fix unintentional skip of untranslated tests diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -18,6 +18,8 @@ from .support import c_compile +only_pypy ="config.option.runappdirect and '__pypy__' not in sys.builtin_module_names" + @api.cpython_api([], api.PyObject) def PyPy_Crash1(space): 1/0 @@ -275,11 +277,11 @@ "the test actually passed in the first place; if it failed " "it is likely to reach this place.") - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') + @pytest.mark.skipif(only_pypy, reason='pypy only test') def test_only_import(self): import cpyext - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') + @pytest.mark.skipif(only_pypy, reason='pypy only test') def test_load_error(self): import cpyext raises(ImportError, cpyext.load_module, "missing.file", "foo") @@ -894,7 +896,7 @@ ]) raises(SystemError, mod.newexc, "name", Exception, {}) - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy specific test') + @pytest.mark.skipif(only_pypy, reason='pypy specific test') def test_hash_pointer(self): mod = self.import_extension('foo', [ ('get_hash', 'METH_NOARGS', @@ -945,7 +947,7 @@ print p assert 'py' in p - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') + @pytest.mark.skipif(only_pypy, reason='pypy only test') def test_get_version(self): mod = self.import_extension('foo', [ ('get_version', 'METH_NOARGS', diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py --- a/pypy/module/cpyext/test/test_thread.py +++ b/pypy/module/cpyext/test/test_thread.py @@ -1,12 +1,13 @@ import sys -import py, pytest +import pytest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +only_pypy ="config.option.runappdirect and '__pypy__' not in sys.builtin_module_names" class AppTestThread(AppTestCpythonExtensionBase): - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') + @pytest.mark.skipif(only_pypy, reason='pypy only test') def test_get_thread_ident(self): module = self.import_extension('foo', [ ("get_thread_ident", "METH_NOARGS", @@ -33,7 +34,7 @@ assert results[0][0] != results[1][0] - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') + @pytest.mark.skipif(only_pypy, reason='pypy only test') def test_acquire_lock(self): module = self.import_extension('foo', [ ("test_acquire_lock", "METH_NOARGS", @@ -57,7 +58,7 @@ ]) module.test_acquire_lock() - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') + @pytest.mark.skipif(only_pypy, reason='pypy only test') def test_release_lock(self): module = self.import_extension('foo', [ ("test_release_lock", "METH_NOARGS", @@ -79,7 +80,7 @@ ]) module.test_release_lock() - @pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') + @pytest.mark.skipif(only_pypy, reason='pypy only test') def test_tls(self): module = self.import_extension('foo', [ ("create_key", "METH_NOARGS", diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -3,6 +3,7 @@ import py, pytest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +only_pypy ="config.option.runappdirect and '__pypy__' not in sys.builtin_module_names" def test_pragma_version(): from pypy.module.sys.version import CPYTHON_VERSION @@ -32,11 +33,9 @@ assert module.py_minor_version == sys.version_info.minor assert module.py_micro_version == sys.version_info.micro - #@pytest.mark.skipif('__pypy__' not in sys.builtin_module_names, reason='pypy only test') + @pytest.mark.skipif(only_pypy, reason='pypy only test') def test_pypy_versions(self): import sys - if '__pypy__' not in sys.builtin_module_names: - py.test.skip("pypy only test") init = """ if (Py_IsInitialized()) { PyObject *m = Py_InitModule("foo", NULL); From pypy.commits at gmail.com Fri Sep 9 09:47:15 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 09 Sep 2016 06:47:15 -0700 (PDT) Subject: [pypy-commit] pypy reverse-debugger: Added tag RevDB-pypy2.7-v5.4.1 for changeset 4909c06daf41 Message-ID: <57d2bd63.48811c0a.de509.278c@mx.google.com> Author: Armin Rigo Branch: reverse-debugger Changeset: r86976:bd755cdacd05 Date: 2016-09-09 15:46 +0200 http://bitbucket.org/pypy/pypy/changeset/bd755cdacd05/ Log: Added tag RevDB-pypy2.7-v5.4.1 for changeset 4909c06daf41 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -33,3 +33,4 @@ 050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 0e2d9a73f5a1818d0245d75daccdbe21b2d5c3ef release-pypy2.7-v5.4.1 +4909c06daf41ce88f87dc01c57959cadad4df4a8 RevDB-pypy2.7-v5.4.1 From pypy.commits at gmail.com Fri Sep 9 12:02:19 2016 From: pypy.commits at gmail.com (ntruessel) Date: Fri, 09 Sep 2016 09:02:19 -0700 (PDT) Subject: [pypy-commit] pypy quad-color-gc: Update qcgc codebase Message-ID: <57d2dd0b.915c1c0a.e70f.5c1b@mx.google.com> Author: Nicolas Truessel Branch: quad-color-gc Changeset: r86977:e2e9fdac5cf3 Date: 2016-09-09 18:01 +0200 http://bitbucket.org/pypy/pypy/changeset/e2e9fdac5cf3/ Log: Update qcgc codebase diff --git a/rpython/translator/c/src/qcgc/allocator.c b/rpython/translator/c/src/qcgc/allocator.c --- a/rpython/translator/c/src/qcgc/allocator.c +++ b/rpython/translator/c/src/qcgc/allocator.c @@ -29,6 +29,8 @@ qcgc_arena_bag_create(QCGC_ARENA_BAG_INIT_SIZE); qcgc_allocator_state.free_arenas = qcgc_arena_bag_create(4); // XXX + qcgc_allocator_state.use_bump_allocator = true; + // Bump Allocator qcgc_allocator_state.bump_state.bump_ptr = NULL; qcgc_allocator_state.bump_state.remaining_cells = 0; @@ -254,17 +256,22 @@ #endif cell_t *result = NULL; for ( ; index < QCGC_SMALL_FREE_LISTS; index++) { - linear_free_list_t *free_list = - qcgc_allocator_state.fit_state.small_free_list[index]; size_t list_cell_size = small_index_to_cells(index); - while (free_list->count > 0) { - result = free_list->items[free_list->count - 1]; - free_list = qcgc_linear_free_list_remove_index(free_list, - free_list->count - 1); + while (qcgc_allocator_state.fit_state.small_free_list[index]->count + > 0) { + result = qcgc_allocator_state.fit_state.small_free_list[index]-> + items[qcgc_allocator_state.fit_state.small_free_list[index] + ->count - 1]; + qcgc_allocator_state.fit_state.small_free_list[index] = + qcgc_linear_free_list_remove_index( + qcgc_allocator_state.fit_state.small_free_list[index], + qcgc_allocator_state.fit_state.small_free_list[index]-> + count - 1); // Check whether block is still valid if (valid_block(result, list_cell_size)) { + // The next call might invalidate free_list, reload! qcgc_fit_allocator_add(result + cells, list_cell_size - cells); break; } else { @@ -272,7 +279,6 @@ } } - qcgc_allocator_state.fit_state.small_free_list[index] = free_list; if (result != NULL) { return result; } @@ -285,24 +291,32 @@ assert(1u<<(index + QCGC_LARGE_FREE_LIST_FIRST_EXP) <= cells); assert(1u<<(index + QCGC_LARGE_FREE_LIST_FIRST_EXP + 1) > cells); #endif - exp_free_list_t *free_list = - qcgc_allocator_state.fit_state.large_free_list[index]; - size_t best_fit_index = free_list->count; + size_t best_fit_index = qcgc_allocator_state.fit_state. + large_free_list[index]->count; cell_t *result = NULL; size_t best_fit_cells = SIZE_MAX; size_t i = 0; - while (i < free_list->count) { - if (valid_block(free_list->items[i].ptr, free_list->items[i].size)) { - if (free_list->items[i].size >= cells && - free_list->items[i].size < best_fit_cells) { - result = free_list->items[i].ptr; - best_fit_cells = free_list->items[i].size; + while (i < qcgc_allocator_state.fit_state.large_free_list[index]->count) { + if (valid_block(qcgc_allocator_state.fit_state.large_free_list[index] -> + items[i].ptr, + qcgc_allocator_state.fit_state.large_free_list[index]-> + items[i].size)) { + if (qcgc_allocator_state.fit_state.large_free_list[index]-> + items[i].size >= cells && + qcgc_allocator_state.fit_state.large_free_list[index]-> + items[i].size < best_fit_cells) { + result = qcgc_allocator_state.fit_state.large_free_list[index]-> + items[i].ptr; + best_fit_cells = qcgc_allocator_state.fit_state. + large_free_list[index]->items[i].size; best_fit_index = i; } i++; } else { - free_list = qcgc_exp_free_list_remove_index(free_list, i); + qcgc_allocator_state.fit_state.large_free_list[index] = + qcgc_exp_free_list_remove_index(qcgc_allocator_state.fit_state. + large_free_list[index], i); // NO i++ ! } @@ -313,14 +327,16 @@ if (result != NULL) { // Best fit was found - assert(best_fit_index < free_list->count); - free_list = qcgc_exp_free_list_remove_index(free_list, best_fit_index); + assert(best_fit_index < qcgc_allocator_state.fit_state. + large_free_list[index]->count); + qcgc_allocator_state.fit_state.large_free_list[index] = + qcgc_exp_free_list_remove_index(qcgc_allocator_state.fit_state. + large_free_list[index], best_fit_index); qcgc_fit_allocator_add(result + cells, best_fit_cells - cells); } else { // No best fit, go for first fit result = fit_allocator_large_first_fit(index + 1, cells); } - qcgc_allocator_state.fit_state.large_free_list[index] = free_list; return result; } @@ -330,13 +346,17 @@ #endif cell_t *result = NULL; for ( ; index < QCGC_LARGE_FREE_LISTS; index++) { - exp_free_list_t *free_list = - qcgc_allocator_state.fit_state.large_free_list[index]; - while(free_list->count > 0) { + while(qcgc_allocator_state.fit_state.large_free_list[index]->count + > 0) { struct exp_free_list_item_s item = - free_list->items[free_list->count - 1]; - free_list = qcgc_exp_free_list_remove_index(free_list, - free_list->count - 1); + qcgc_allocator_state.fit_state.large_free_list[index]->items[ + qcgc_allocator_state.fit_state.large_free_list[index]->count - 1 + ]; + qcgc_allocator_state.fit_state.large_free_list[index] = + qcgc_exp_free_list_remove_index( + qcgc_allocator_state.fit_state.large_free_list[index], + qcgc_allocator_state.fit_state.large_free_list[index]-> + count - 1); // Check whether block is still valid if (valid_block(item.ptr, item.size)) { @@ -345,7 +365,6 @@ break; } } - qcgc_allocator_state.fit_state.large_free_list[index] = free_list; if (result != NULL) { return result; } diff --git a/rpython/translator/c/src/qcgc/allocator.h b/rpython/translator/c/src/qcgc/allocator.h --- a/rpython/translator/c/src/qcgc/allocator.h +++ b/rpython/translator/c/src/qcgc/allocator.h @@ -48,6 +48,7 @@ linear_free_list_t *small_free_list[QCGC_SMALL_FREE_LISTS]; exp_free_list_t *large_free_list[QCGC_LARGE_FREE_LISTS]; } fit_state; + bool use_bump_allocator; } qcgc_allocator_state; /** diff --git a/rpython/translator/c/src/qcgc/arena.c b/rpython/translator/c/src/qcgc/arena.c --- a/rpython/translator/c/src/qcgc/arena.c +++ b/rpython/translator/c/src/qcgc/arena.c @@ -11,6 +11,7 @@ #include "allocator.h" #include "event_logger.h" +#include "gc_state.h" /** * Internal functions @@ -178,85 +179,124 @@ // No coalescing, collector will do this } +bool qcgc_arena_pseudo_sweep(arena_t *arena) { +#if CHECKED + assert(arena != NULL); + assert(qcgc_arena_is_coalesced(arena)); + assert(qcgc_arena_addr(qcgc_allocator_state.bump_state.bump_ptr) == arena); +#endif + // XXX: Maybe ignore free cell / largest block counting here? + size_t last_free_cell = 0; + for (size_t cell = QCGC_ARENA_FIRST_CELL_INDEX; + cell < QCGC_ARENA_CELLS_COUNT; + cell++) { + switch (get_blocktype(arena, cell)) { + case BLOCK_FREE: + last_free_cell = cell; + case BLOCK_EXTENT: // Fall through + break; + case BLOCK_BLACK: + set_blocktype(arena, cell, BLOCK_WHITE); + case BLOCK_WHITE: // Fall through + if (last_free_cell != 0) { + qcgc_state.free_cells += cell - last_free_cell; + qcgc_state.largest_free_block = MAX( + qcgc_state.largest_free_block, + cell - last_free_cell); + last_free_cell = 0; + } + break; + } + } + if (last_free_cell != 0) { + qcgc_state.free_cells += QCGC_ARENA_CELLS_COUNT - last_free_cell; + qcgc_state.largest_free_block = MAX( + qcgc_state.largest_free_block, + QCGC_ARENA_CELLS_COUNT - last_free_cell); + last_free_cell = 0; + } +#if CHECKED + assert(qcgc_arena_is_coalesced(arena)); +#endif + return false; +} + bool qcgc_arena_sweep(arena_t *arena) { #if CHECKED assert(arena != NULL); assert(qcgc_arena_is_coalesced(arena)); + //assert(qcgc_arena_addr(qcgc_allocator_state.bump_state.bump_ptr) != arena); #endif -#if DEBUG_ZERO_ON_SWEEP - bool zero = true; -#endif - bool free = true; - bool coalesce = false; - bool add_to_free_list = false; - size_t last_free_cell = QCGC_ARENA_FIRST_CELL_INDEX; - if (qcgc_arena_addr(qcgc_allocator_state.bump_state.bump_ptr) == arena) { - for (size_t cell = QCGC_ARENA_FIRST_CELL_INDEX; - cell < QCGC_ARENA_CELLS_COUNT; - cell++) { - if (get_blocktype(arena, cell) == BLOCK_BLACK) { - set_blocktype(arena, cell, BLOCK_WHITE); - } - } - return false; + return qcgc_arena_pseudo_sweep(arena); } + size_t last_free_cell = 0; + bool register_free_block = false; + bool free = true; for (size_t cell = QCGC_ARENA_FIRST_CELL_INDEX; cell < QCGC_ARENA_CELLS_COUNT; cell++) { switch (get_blocktype(arena, cell)) { case BLOCK_EXTENT: -#if DEBUG_ZERO_ON_SWEEP - if (zero) { - memset(&arena->cells[cell], 0, sizeof(cell_t)); - } -#endif break; case BLOCK_FREE: - if (coalesce) { + if (last_free_cell != 0) { + // Coalesce set_blocktype(arena, cell, BLOCK_EXTENT); } else { last_free_cell = cell; } - coalesce = true; -#if DEBUG_ZERO_ON_SWEEP - zero = true; - memset(&arena->cells[cell], 0, sizeof(cell_t)); -#endif + // ==> last_free_cell != 0 break; case BLOCK_WHITE: - if (coalesce) { + if (last_free_cell != 0) { + // Coalesce set_blocktype(arena, cell, BLOCK_EXTENT); } else { set_blocktype(arena, cell, BLOCK_FREE); last_free_cell = cell; } - coalesce = true; - add_to_free_list = true; -#if DEBUG_ZERO_ON_SWEEP - zero = true; - memset(&arena->cells[cell], 0, sizeof(cell_t)); -#endif + // ==> last_free_cell != 0 + register_free_block = true; break; case BLOCK_BLACK: set_blocktype(arena, cell, BLOCK_WHITE); - if (add_to_free_list) { - qcgc_fit_allocator_add(arena->cells + last_free_cell, + if (last_free_cell != 0) { + if (register_free_block) { + qcgc_fit_allocator_add(arena->cells + last_free_cell, + cell - last_free_cell); +#if DEBUG_ZERO_ON_SWEEP + memset(arena->cells + last_free_cell, 0, + sizeof(cell_t) * (cell - last_free_cell)); +#endif + } + qcgc_state.free_cells += cell - last_free_cell; + qcgc_state.largest_free_block = MAX( + qcgc_state.largest_free_block, cell - last_free_cell); + last_free_cell = 0; } + register_free_block = false; free = false; - coalesce = false; - add_to_free_list = false; -#if DEBUG_ZERO_ON_SWEEP - zero = false; -#endif + // ==> last_free_cell == 0 break; } } - if (add_to_free_list && !free) { - qcgc_fit_allocator_add(arena->cells + last_free_cell, - QCGC_ARENA_CELLS_COUNT - last_free_cell); + if (last_free_cell != 0 && !free) { + if (register_free_block) { + qcgc_fit_allocator_add(arena->cells + last_free_cell, + QCGC_ARENA_CELLS_COUNT - last_free_cell); +#if DEBUG_ZERO_ON_SWEEP + memset(arena->cells + last_free_cell, 0, + sizeof(cell_t) * (QCGC_ARENA_CELLS_COUNT - last_free_cell)); +#endif + } + qcgc_state.free_cells += QCGC_ARENA_CELLS_COUNT - last_free_cell; + qcgc_state.largest_free_block = MAX( + qcgc_state.largest_free_block, + QCGC_ARENA_CELLS_COUNT - last_free_cell); + last_free_cell = 0; } #if CHECKED assert(qcgc_arena_is_coalesced(arena)); @@ -284,6 +324,7 @@ return true; } + bool qcgc_arena_is_coalesced(arena_t *arena) { #if CHECKED assert(arena != NULL); diff --git a/rpython/translator/c/src/qcgc/arena.h b/rpython/translator/c/src/qcgc/arena.h --- a/rpython/translator/c/src/qcgc/arena.h +++ b/rpython/translator/c/src/qcgc/arena.h @@ -143,6 +143,15 @@ */ bool qcgc_arena_sweep(arena_t *arena); +/** + * Sweep given arena, but only reset black to white, no white to free + * + * @param arena Arena + * @return Whether arena is empty after sweeping, always false + */ +bool qcgc_arena_pseudo_sweep(arena_t *arena); + + /******************************************************************************* * Debug functions * ******************************************************************************/ diff --git a/rpython/translator/c/src/qcgc/config.h b/rpython/translator/c/src/qcgc/config.h --- a/rpython/translator/c/src/qcgc/config.h +++ b/rpython/translator/c/src/qcgc/config.h @@ -1,14 +1,14 @@ #pragma once #define CHECKED 0 // Enable runtime sanity checks -#define DEBUG_ZERO_ON_SWEEP 1 // Zero memory on sweep (debug only) +#define DEBUG_ZERO_ON_SWEEP 0 // Zero memory on sweep (debug only) #define QCGC_INIT_ZERO 1 // Init new objects with zero bytes /** * Event logger */ -#define EVENT_LOG 0 // Enable event log +#define EVENT_LOG 1 // Enable event log #define LOGFILE "./qcgc_events.log" // Default logfile #define LOG_ALLOCATION 0 // Enable allocation log (warning: // significant performance impact) diff --git a/rpython/translator/c/src/qcgc/event_logger.h b/rpython/translator/c/src/qcgc/event_logger.h --- a/rpython/translator/c/src/qcgc/event_logger.h +++ b/rpython/translator/c/src/qcgc/event_logger.h @@ -21,7 +21,6 @@ EVENT_NEW_ARENA, EVENT_MARK_START, - EVENT_INCMARK_START, EVENT_MARK_DONE, }; diff --git a/rpython/translator/c/src/qcgc/gc_state.h b/rpython/translator/c/src/qcgc/gc_state.h --- a/rpython/translator/c/src/qcgc/gc_state.h +++ b/rpython/translator/c/src/qcgc/gc_state.h @@ -33,4 +33,9 @@ gc_phase_t phase; size_t bytes_since_collection; size_t bytes_since_incmark; + size_t free_cells; // Overall amount of free cells without huge + // blocks and free areans. Valid right after sweep + size_t largest_free_block; // Size of the largest free block. + // (Free arenas don't count as free blocks) + // Valid right after sweep } qcgc_state; diff --git a/rpython/translator/c/src/qcgc/qcgc.c b/rpython/translator/c/src/qcgc/qcgc.c --- a/rpython/translator/c/src/qcgc/qcgc.c +++ b/rpython/translator/c/src/qcgc/qcgc.c @@ -38,6 +38,8 @@ qcgc_state.phase = GC_PAUSE; qcgc_state.bytes_since_collection = 0; qcgc_state.bytes_since_incmark = 0; + qcgc_state.free_cells = 0; + qcgc_state.largest_free_block = 0; qcgc_allocator_initialize(); qcgc_hbtable_initialize(); qcgc_event_logger_initialize(); @@ -145,7 +147,7 @@ if (size <= 1<count; - qcgc_event_logger_log(EVENT_SWEEP_START, sizeof(arena_count), - (uint8_t *) &arena_count); + { + unsigned long arena_count; + arena_count = qcgc_allocator_state.arenas->count; + qcgc_event_logger_log(EVENT_SWEEP_START, sizeof(arena_count), + (uint8_t *) &arena_count); + } qcgc_hbtable_sweep(); size_t i = 0; + qcgc_state.free_cells = 0; + qcgc_state.largest_free_block = 0; while (i < qcgc_allocator_state.arenas->count) { arena_t *arena = qcgc_allocator_state.arenas->items[i]; // The arena that contains the bump pointer is autmatically skipped @@ -355,7 +377,6 @@ qcgc_allocator_state.arenas, i); qcgc_allocator_state.free_arenas = qcgc_arena_bag_add( qcgc_allocator_state.free_arenas, arena); - // NO i++ } else { // Not free @@ -364,8 +385,26 @@ } qcgc_state.phase = GC_PAUSE; - qcgc_event_logger_log(EVENT_SWEEP_DONE, 0, NULL); + // Determine whether fragmentation is too high + // Fragmenation = 1 - (largest block / total free space) + // Use bump allocator when fragmentation < 50% + qcgc_allocator_state.use_bump_allocator = qcgc_state.free_cells < + 2 * qcgc_state.largest_free_block; + update_weakrefs(); + + { + struct log_info_s { + size_t free_cells; + size_t largest_free_block; + }; + struct log_info_s log_info = { + qcgc_state.free_cells, + qcgc_state.largest_free_block + }; + qcgc_event_logger_log(EVENT_SWEEP_DONE, sizeof(struct log_info_s), + (uint8_t *) &log_info); + } } void qcgc_collect(void) { From pypy.commits at gmail.com Fri Sep 9 13:28:37 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 09 Sep 2016 10:28:37 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: Probable fix for issue #2383: have 'list(S())' call 'S.__getitem__' if S Message-ID: <57d2f145.442d1c0a.36d62.82d1@mx.google.com> Author: Armin Rigo Branch: release-5.x Changeset: r86978:eacdeb58a9ec Date: 2016-08-29 20:32 +0200 http://bitbucket.org/pypy/pypy/changeset/eacdeb58a9ec/ Log: Probable fix for issue #2383: have 'list(S())' call 'S.__getitem__' if S is a subclass of str with a custom __getitem__. (grafted from 990f5b2322e124bbbfd7d9ab56d44a77f0085a8a) diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -58,6 +58,20 @@ return w_iter tuple_iter._annspecialcase_ = 'specialize:memo' +def str_getitem(space): + "Utility that returns the app-level descriptor str.__getitem__." + w_src, w_iter = space.lookup_in_type_where(space.w_str, + '__getitem__') + return w_iter +str_getitem._annspecialcase_ = 'specialize:memo' + +def unicode_getitem(space): + "Utility that returns the app-level descriptor unicode.__getitem__." + w_src, w_iter = space.lookup_in_type_where(space.w_unicode, + '__getitem__') + return w_iter +unicode_getitem._annspecialcase_ = 'specialize:memo' + def raiseattrerror(space, w_obj, name, w_descr=None): if w_descr is None: raise oefmt(space.w_AttributeError, diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -445,7 +445,7 @@ return w_obj.listview_bytes() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_bytes() - if isinstance(w_obj, W_BytesObject) and self._uses_no_iter(w_obj): + if isinstance(w_obj, W_BytesObject) and self._str_uses_no_iter(w_obj): return w_obj.listview_bytes() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_bytes() @@ -460,7 +460,7 @@ return w_obj.listview_unicode() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_unicode() - if isinstance(w_obj, W_UnicodeObject) and self._uses_no_iter(w_obj): + if isinstance(w_obj, W_UnicodeObject) and self._uni_uses_no_iter(w_obj): return w_obj.listview_unicode() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_unicode() @@ -504,8 +504,15 @@ from pypy.objspace.descroperation import tuple_iter return self.lookup(w_obj, '__iter__') is tuple_iter(self) - def _uses_no_iter(self, w_obj): - return self.lookup(w_obj, '__iter__') is None + def _str_uses_no_iter(self, w_obj): + from pypy.objspace.descroperation import str_getitem + return (self.lookup(w_obj, '__iter__') is None and + self.lookup(w_obj, '__getitem__') is str_getitem(self)) + + def _uni_uses_no_iter(self, w_obj): + from pypy.objspace.descroperation import unicode_getitem + return (self.lookup(w_obj, '__iter__') is None and + self.lookup(w_obj, '__getitem__') is unicode_getitem(self)) def sliceindices(self, w_slice, w_length): if isinstance(w_slice, W_SliceObject): diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -432,7 +432,7 @@ class AppTestListObject(object): - spaceconfig = {"objspace.std.withliststrategies": True} # it's the default + #spaceconfig = {"objspace.std.withliststrategies": True} # it's the default def setup_class(cls): import platform @@ -1518,6 +1518,16 @@ def __iter__(self): yield "ok" assert list(U(u"don't see me")) == ["ok"] + # + class S(str): + def __getitem__(self, index): + return str.__getitem__(self, index).upper() + assert list(S("abc")) == list("ABC") + # + class U(unicode): + def __getitem__(self, index): + return unicode.__getitem__(self, index).upper() + assert list(U(u"abc")) == list(u"ABC") def test_extend_from_nonempty_list_with_subclasses(self): l = ["hi!"] @@ -1543,6 +1553,20 @@ l.extend(U(u"don't see me")) # assert l == ["hi!", "okT", "okL", "okL", "okS", "okU"] + # + class S(str): + def __getitem__(self, index): + return str.__getitem__(self, index).upper() + l = [] + l.extend(S("abc")) + assert l == list("ABC") + # + class U(unicode): + def __getitem__(self, index): + return unicode.__getitem__(self, index).upper() + l = [] + l.extend(U(u"abc")) + assert l == list(u"ABC") def test_no_len_on_range_iter(self): iterable = range(10) From pypy.commits at gmail.com Fri Sep 9 15:26:44 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 09 Sep 2016 12:26:44 -0700 (PDT) Subject: [pypy-commit] pypy test-cpyext: Simplify import_module(): kill load_it param and remove duplication with reimport_module() Message-ID: <57d30cf4.010c1c0a.8043.abfb@mx.google.com> Author: Ronan Lamy Branch: test-cpyext Changeset: r86979:12888babfe97 Date: 2016-09-09 20:25 +0100 http://bitbucket.org/pypy/pypy/changeset/12888babfe97/ Log: Simplify import_module(): kill load_it param and remove duplication with reimport_module() diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -53,19 +53,19 @@ self.extra_libs = extra_libs self.ext = ext - def compile_extension_module(self, modname, include_dirs=[], + def compile_extension_module(self, name, include_dirs=[], source_files=None, source_strings=None): """ Build an extension module and return the filename of the resulting native code file. - modname is the name of the module, possibly including dots if it is a + name is the name of the module, possibly including dots if it is a module inside a package. Any extra keyword arguments are passed on to ExternalCompilationInfo to build the module (so specify your source with one of those). """ - modname = modname.split('.')[-1] + modname = name.split('.')[-1] dirname = (udir/uniquemodulename('module')).ensure(dir=1) if source_strings: assert not source_files @@ -354,9 +354,8 @@ return space.wrap(pydname) @gateway.unwrap_spec(name=str, init='str_or_None', body=str, - load_it=bool, filename='str_or_None', - PY_SSIZE_T_CLEAN=bool) - def import_module(space, name, init=None, body='', load_it=True, + filename='str_or_None', PY_SSIZE_T_CLEAN=bool) + def import_module(space, name, init=None, body='', filename=None, w_include_dirs=None, PY_SSIZE_T_CLEAN=False): """ @@ -405,30 +404,21 @@ kwds = dict(source_files=[filename]) mod = self.sys_info.compile_extension_module( name, include_dirs=include_dirs, **kwds) + w_result = load_module(space, mod, name) + if not self.runappdirect: + self.record_imported_module(name) + return w_result - if not load_it: - return space.wrap(mod) + + @gateway.unwrap_spec(mod=str, name=str) + def load_module(space, mod, name): if self.runappdirect: import imp return imp.load_dynamic(name, mod) else: api.load_extension_module(space, mod, name) - self.record_imported_module(name) return space.getitem( - space.sys.get('modules'), - space.wrap(name)) - - - @gateway.unwrap_spec(mod=str, name=str) - def reimport_module(space, mod, name): - if self.runappdirect: - import imp - return imp.load_dynamic(name, mod) - else: - api.load_extension_module(space, mod, name) - return space.getitem( - space.sys.get('modules'), - space.wrap(name)) + space.sys.get('modules'), space.wrap(name)) @gateway.unwrap_spec(modname=str, prologue=str, more_init=str, PY_SSIZE_T_CLEAN=bool) @@ -470,7 +460,7 @@ self.sys_info = get_cpyext_info(self.space) self.w_compile_module = wrap(interp2app(compile_module)) self.w_import_module = wrap(interp2app(import_module)) - self.w_reimport_module = wrap(interp2app(reimport_module)) + self.w_load_module = wrap(interp2app(load_module)) self.w_import_extension = wrap(interp2app(import_extension)) self.w_here = wrap(str(py.path.local(pypydir)) + '/module/cpyext/test/') self.w_debug_collect = wrap(interp2app(debug_collect)) @@ -976,7 +966,7 @@ f.write('not again!\n') f.close() m1 = sys.modules['foo'] - m2 = self.reimport_module(m1.__file__, name='foo') + m2 = self.load_module(m1.__file__, name='foo') assert m1 is m2 assert m1 is sys.modules['foo'] diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -40,7 +40,8 @@ class AppTestImportLogic(AppTestCpythonExtensionBase): def test_import_logic(self): import sys, os - path = self.import_module(name='test_import_module', load_it=False) + path = self.compile_module('test_import_module', + source_files=[os.path.join(self.here, 'test_import_module.c')]) sys.path.append(os.path.dirname(path)) import test_import_module assert test_import_module.TEST is None From pypy.commits at gmail.com Sat Sep 10 03:36:27 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Sep 2016 00:36:27 -0700 (PDT) Subject: [pypy-commit] pypy reverse-debugger: Print full precision doubles Message-ID: <57d3b7fb.81a2c20a.5e378.564e@mx.google.com> Author: Armin Rigo Branch: reverse-debugger Changeset: r86980:d7724c0a5700 Date: 2016-09-10 09:35 +0200 http://bitbucket.org/pypy/pypy/changeset/d7724c0a5700/ Log: Print full precision doubles diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c --- a/rpython/translator/revdb/src-revdb/revdb.c +++ b/rpython/translator/revdb/src-revdb/revdb.c @@ -1768,7 +1768,7 @@ char buffer[128], *p; RPyString *result; int size; - size = snprintf(buffer, sizeof(buffer) - 3, "%g", d); + size = snprintf(buffer, sizeof(buffer) - 3, "%.17g", d); if (size < 0) size = 0; for (p = buffer; '0' <= *p && *p <= '9'; p++) { From pypy.commits at gmail.com Sat Sep 10 04:38:26 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Sep 2016 01:38:26 -0700 (PDT) Subject: [pypy-commit] pypy reverse-debugger: Added tag RevDB-pypy2.7-v5.4.1 for changeset d7724c0a5700 Message-ID: <57d3c682.02d31c0a.ec84b.614a@mx.google.com> Author: Armin Rigo Branch: reverse-debugger Changeset: r86981:b6111ee37219 Date: 2016-09-10 10:37 +0200 http://bitbucket.org/pypy/pypy/changeset/b6111ee37219/ Log: Added tag RevDB-pypy2.7-v5.4.1 for changeset d7724c0a5700 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -34,3 +34,5 @@ 050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 0e2d9a73f5a1818d0245d75daccdbe21b2d5c3ef release-pypy2.7-v5.4.1 4909c06daf41ce88f87dc01c57959cadad4df4a8 RevDB-pypy2.7-v5.4.1 +4909c06daf41ce88f87dc01c57959cadad4df4a8 RevDB-pypy2.7-v5.4.1 +d7724c0a5700b895a47de44074cdf5fd659a988f RevDB-pypy2.7-v5.4.1 From pypy.commits at gmail.com Sat Sep 10 08:57:59 2016 From: pypy.commits at gmail.com (ntruessel) Date: Sat, 10 Sep 2016 05:57:59 -0700 (PDT) Subject: [pypy-commit] pypy quad-color-gc: Update qcgc codebase Message-ID: <57d40357.c398c20a.dfeea.c1ab@mx.google.com> Author: Nicolas Truessel Branch: quad-color-gc Changeset: r86982:79998830344e Date: 2016-09-10 14:57 +0200 http://bitbucket.org/pypy/pypy/changeset/79998830344e/ Log: Update qcgc codebase diff --git a/rpython/translator/c/src/qcgc/allocator.c b/rpython/translator/c/src/qcgc/allocator.c --- a/rpython/translator/c/src/qcgc/allocator.c +++ b/rpython/translator/c/src/qcgc/allocator.c @@ -23,6 +23,7 @@ QCGC_STATIC cell_t *fit_allocator_large_first_fit(size_t index, size_t cells); QCGC_STATIC bool valid_block(cell_t *ptr, size_t cells); +QCGC_STATIC void free_list_consistency_check(void); void qcgc_allocator_initialize(void) { qcgc_allocator_state.arenas = @@ -77,8 +78,13 @@ if (cells > 0) { assert((((object_t *)ptr)->flags & QCGC_PREBUILT_OBJECT) == 0); assert((cell_t *) qcgc_arena_addr(ptr) != ptr); - assert(qcgc_arena_get_blocktype(ptr) == BLOCK_FREE || - qcgc_arena_get_blocktype(ptr) == BLOCK_EXTENT); + assert(qcgc_arena_get_blocktype(ptr) == BLOCK_FREE); + if (qcgc_arena_addr(ptr) == qcgc_arena_addr(ptr + cells)) { + assert(qcgc_arena_get_blocktype(ptr + cells) != BLOCK_EXTENT); + } + for (size_t i = 1; i < cells; i++) { + assert(qcgc_arena_get_blocktype(ptr + i) == BLOCK_EXTENT); + } } #endif if (cells > 0) { @@ -208,6 +214,9 @@ } object_t *qcgc_fit_allocate(size_t bytes) { +#if CHECKED + free_list_consistency_check(); +#endif size_t cells = bytes_to_cells(bytes); cell_t *mem; @@ -223,7 +232,6 @@ return NULL; } - qcgc_arena_mark_allocated(mem, cells); object_t *result = (object_t *) mem; #if QCGC_INIT_ZERO @@ -250,6 +258,16 @@ return result; } +void qcgc_fit_allocator_empty_lists(void) { + for (size_t i = 0; i < QCGC_SMALL_FREE_LISTS; i++) { + qcgc_allocator_state.fit_state.small_free_list[i]->count = 0; + } + + for (size_t i = 0; i < QCGC_LARGE_FREE_LISTS; i++) { + qcgc_allocator_state.fit_state.large_free_list[i]->count = 0; + } +} + QCGC_STATIC cell_t *fit_allocator_small_first_fit(size_t index, size_t cells) { #if CHECKED assert(small_index_to_cells(index) >= cells); @@ -272,16 +290,11 @@ // Check whether block is still valid if (valid_block(result, list_cell_size)) { // The next call might invalidate free_list, reload! + qcgc_arena_mark_allocated(result, cells); qcgc_fit_allocator_add(result + cells, list_cell_size - cells); - break; - } else { - result = NULL; + return result; } } - - if (result != NULL) { - return result; - } } return fit_allocator_large_first_fit(0, cells); } @@ -332,6 +345,7 @@ qcgc_allocator_state.fit_state.large_free_list[index] = qcgc_exp_free_list_remove_index(qcgc_allocator_state.fit_state. large_free_list[index], best_fit_index); + qcgc_arena_mark_allocated(result, cells); qcgc_fit_allocator_add(result + cells, best_fit_cells - cells); } else { // No best fit, go for first fit @@ -344,7 +358,6 @@ #if CHECKED assert(1u<<(index + QCGC_LARGE_FREE_LIST_FIRST_EXP) >= cells); #endif - cell_t *result = NULL; for ( ; index < QCGC_LARGE_FREE_LISTS; index++) { while(qcgc_allocator_state.fit_state.large_free_list[index]->count > 0) { @@ -360,14 +373,11 @@ // Check whether block is still valid if (valid_block(item.ptr, item.size)) { + qcgc_arena_mark_allocated(item.ptr, cells); qcgc_fit_allocator_add(item.ptr + cells, item.size - cells); - result = item.ptr; - break; + return item.ptr; } } - if (result != NULL) { - return result; - } } return NULL; } @@ -415,3 +425,31 @@ ((qcgc_arena_addr(ptr + cells)) == (arena_t *) (ptr + cells)) || qcgc_arena_get_blocktype(ptr + cells) != BLOCK_EXTENT)); } + +QCGC_STATIC void free_list_consistency_check(void) { + for (size_t i = 0; i < QCGC_SMALL_FREE_LISTS; i++) { + linear_free_list_t *free_list = + qcgc_allocator_state.fit_state.small_free_list[i]; + for (size_t j = 0; j < free_list->count; j++) { + cell_t *item = free_list->items[j]; + if (qcgc_arena_get_blocktype(item) == BLOCK_FREE) { + for (size_t s = 1; s < small_index_to_cells(i); s++) { + assert(qcgc_arena_get_blocktype(item + s) == BLOCK_EXTENT); + } + } + } + } + + for (size_t i = 0; i < QCGC_LARGE_FREE_LISTS; i++) { + exp_free_list_t *free_list = + qcgc_allocator_state.fit_state.large_free_list[i]; + for (size_t j = 0; j < free_list->count; j++) { + struct exp_free_list_item_s item = free_list->items[j]; + if (qcgc_arena_get_blocktype(item.ptr) == BLOCK_FREE) { + for (size_t s = 1; s < item.size; s++) { + assert(qcgc_arena_get_blocktype(item.ptr + s) == BLOCK_EXTENT); + } + } + } + } +} diff --git a/rpython/translator/c/src/qcgc/allocator.h b/rpython/translator/c/src/qcgc/allocator.h --- a/rpython/translator/c/src/qcgc/allocator.h +++ b/rpython/translator/c/src/qcgc/allocator.h @@ -89,6 +89,11 @@ */ object_t *qcgc_large_allocate(size_t bytes); +/** + * Empty all free lists (used before sweep) + */ +void qcgc_fit_allocator_empty_lists(void); + /** * Add memory to free lists diff --git a/rpython/translator/c/src/qcgc/arena.c b/rpython/translator/c/src/qcgc/arena.c --- a/rpython/translator/c/src/qcgc/arena.c +++ b/rpython/translator/c/src/qcgc/arena.c @@ -185,36 +185,20 @@ assert(qcgc_arena_is_coalesced(arena)); assert(qcgc_arena_addr(qcgc_allocator_state.bump_state.bump_ptr) == arena); #endif - // XXX: Maybe ignore free cell / largest block counting here? - size_t last_free_cell = 0; + // Ignore free cell / largest block counting here, as blocks are not + // registerd in free lists as well for (size_t cell = QCGC_ARENA_FIRST_CELL_INDEX; cell < QCGC_ARENA_CELLS_COUNT; cell++) { switch (get_blocktype(arena, cell)) { - case BLOCK_FREE: - last_free_cell = cell; - case BLOCK_EXTENT: // Fall through - break; case BLOCK_BLACK: set_blocktype(arena, cell, BLOCK_WHITE); + case BLOCK_FREE: // Fall through + case BLOCK_EXTENT: // Fall through case BLOCK_WHITE: // Fall through - if (last_free_cell != 0) { - qcgc_state.free_cells += cell - last_free_cell; - qcgc_state.largest_free_block = MAX( - qcgc_state.largest_free_block, - cell - last_free_cell); - last_free_cell = 0; - } break; } } - if (last_free_cell != 0) { - qcgc_state.free_cells += QCGC_ARENA_CELLS_COUNT - last_free_cell; - qcgc_state.largest_free_block = MAX( - qcgc_state.largest_free_block, - QCGC_ARENA_CELLS_COUNT - last_free_cell); - last_free_cell = 0; - } #if CHECKED assert(qcgc_arena_is_coalesced(arena)); #endif @@ -232,7 +216,6 @@ } size_t last_free_cell = 0; - bool register_free_block = false; bool free = true; for (size_t cell = QCGC_ARENA_FIRST_CELL_INDEX; cell < QCGC_ARENA_CELLS_COUNT; @@ -258,40 +241,34 @@ last_free_cell = cell; } // ==> last_free_cell != 0 - register_free_block = true; break; case BLOCK_BLACK: set_blocktype(arena, cell, BLOCK_WHITE); if (last_free_cell != 0) { - if (register_free_block) { - qcgc_fit_allocator_add(arena->cells + last_free_cell, - cell - last_free_cell); + qcgc_fit_allocator_add(arena->cells + last_free_cell, + cell - last_free_cell); #if DEBUG_ZERO_ON_SWEEP - memset(arena->cells + last_free_cell, 0, - sizeof(cell_t) * (cell - last_free_cell)); + memset(arena->cells + last_free_cell, 0, + sizeof(cell_t) * (cell - last_free_cell)); #endif - } qcgc_state.free_cells += cell - last_free_cell; qcgc_state.largest_free_block = MAX( qcgc_state.largest_free_block, cell - last_free_cell); last_free_cell = 0; } - register_free_block = false; free = false; // ==> last_free_cell == 0 break; } } if (last_free_cell != 0 && !free) { - if (register_free_block) { - qcgc_fit_allocator_add(arena->cells + last_free_cell, - QCGC_ARENA_CELLS_COUNT - last_free_cell); + qcgc_fit_allocator_add(arena->cells + last_free_cell, + QCGC_ARENA_CELLS_COUNT - last_free_cell); #if DEBUG_ZERO_ON_SWEEP - memset(arena->cells + last_free_cell, 0, - sizeof(cell_t) * (QCGC_ARENA_CELLS_COUNT - last_free_cell)); + memset(arena->cells + last_free_cell, 0, + sizeof(cell_t) * (QCGC_ARENA_CELLS_COUNT - last_free_cell)); #endif - } qcgc_state.free_cells += QCGC_ARENA_CELLS_COUNT - last_free_cell; qcgc_state.largest_free_block = MAX( qcgc_state.largest_free_block, diff --git a/rpython/translator/c/src/qcgc/qcgc.c b/rpython/translator/c/src/qcgc/qcgc.c --- a/rpython/translator/c/src/qcgc/qcgc.c +++ b/rpython/translator/c/src/qcgc/qcgc.c @@ -147,7 +147,8 @@ if (size <= 1<count) { arena_t *arena = qcgc_allocator_state.arenas->items[i]; // The arena that contains the bump pointer is autmatically skipped From pypy.commits at gmail.com Sat Sep 10 09:36:23 2016 From: pypy.commits at gmail.com (ntruessel) Date: Sat, 10 Sep 2016 06:36:23 -0700 (PDT) Subject: [pypy-commit] pypy quad-color-gc: Add teardown method to codegen Message-ID: <57d40c57.217fc20a.479c9.0f83@mx.google.com> Author: Nicolas Truessel Branch: quad-color-gc Changeset: r86983:30fd102afbe9 Date: 2016-09-10 15:35 +0200 http://bitbucket.org/pypy/pypy/changeset/30fd102afbe9/ Log: Add teardown method to codegen diff --git a/rpython/translator/c/gc.py b/rpython/translator/c/gc.py --- a/rpython/translator/c/gc.py +++ b/rpython/translator/c/gc.py @@ -40,6 +40,9 @@ def gc_startup_code(self): return [] + def gc_teardown_code(self): + return [] + def struct_setup(self, structdefnode, rtti): return None @@ -477,6 +480,12 @@ for i in s: yield i + def gc_teardown_code(self): + yield 'qcgc_destroy();' + s = list(super(QcgcFrameworkGcPolicy, self).gc_teardown_code()) + for i in s: + yield i + name_to_gcpolicy = { 'boehm': BoehmGcPolicy, 'ref': RefcountingGcPolicy, diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -823,6 +823,15 @@ print >> f, '}' +def gen_teardowncode(f, database): + # generate tear-down code and put it into a function (empty except for qcgc) + print >> f, 'void RPython_TeardownCode(void) {' + + for line in database.gcpolicy.gc_teardown_code(): + print >> f, "\t" + line + + print >> f, '}' + def commondefs(defines): from rpython.rlib.rarithmetic import LONG_BIT, LONGLONG_BIT defines['PYPY_LONG_BIT'] = LONG_BIT @@ -887,6 +896,7 @@ headers_to_precompile.insert(0, incfilename) gen_startupcode(f, database) + gen_teardowncode(f, database) f.close() if 'PYPY_INSTRUMENT' in defines: diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -109,6 +109,7 @@ RPyGilRelease(); #endif + RPython_TeardownCode(); return exitcode; memory_out: diff --git a/rpython/translator/c/src/entrypoint.h b/rpython/translator/c/src/entrypoint.h --- a/rpython/translator/c/src/entrypoint.h +++ b/rpython/translator/c/src/entrypoint.h @@ -9,5 +9,6 @@ #endif RPY_EXTERN void RPython_StartupCode(void); +RPY_EXTERN void RPython_TeardownCode(void); RPY_EXPORTED int PYPY_MAIN_FUNCTION(int argc, char *argv[]); #endif /* PYPY_STANDALONE */ From pypy.commits at gmail.com Sat Sep 10 13:12:29 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Sep 2016 10:12:29 -0700 (PDT) Subject: [pypy-commit] pypy reverse-debugger: emit the byte "release gil" before actually releasing the GIL Message-ID: <57d43efd.85b81c0a.422a8.dbff@mx.google.com> Author: Armin Rigo Branch: reverse-debugger Changeset: r86985:a82acaa0d692 Date: 2016-09-10 19:03 +0200 http://bitbucket.org/pypy/pypy/changeset/a82acaa0d692/ Log: emit the byte "release gil" before actually releasing the GIL diff --git a/rpython/translator/revdb/gencsupp.py b/rpython/translator/revdb/gencsupp.py --- a/rpython/translator/revdb/gencsupp.py +++ b/rpython/translator/revdb/gencsupp.py @@ -92,10 +92,9 @@ # reading the 0xFD or 0xFE, we switch to a different thread if needed # (actually implemented with stacklets). if call_code == 'RPyGilAcquire();': - byte = '0xFD' + return 'RPY_REVDB_CALL_GIL_ACQUIRE();' else: - byte = '0xFE' - return 'RPY_REVDB_CALL_GIL(%s, %s);' % (call_code, byte) + return 'RPY_REVDB_CALL_GIL_RELEASE();' # tp = funcgen.lltypename(v_result) if tp == 'void @': diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c --- a/rpython/translator/revdb/src-revdb/revdb.c +++ b/rpython/translator/revdb/src-revdb/revdb.c @@ -1730,10 +1730,10 @@ } RPY_EXTERN -void rpy_reverse_db_bad_acquire_gil(void) +void rpy_reverse_db_bad_acquire_gil(const char *name) { fprintf(stderr, "out of sync: unexpected byte in log " - " (at acquire_gil or release_gil)\n"); + " (at %s_gil)\n", name); exit(1); } diff --git a/rpython/translator/revdb/src-revdb/revdb_include.h b/rpython/translator/revdb/src-revdb/revdb_include.h --- a/rpython/translator/revdb/src-revdb/revdb_include.h +++ b/rpython/translator/revdb/src-revdb/revdb_include.h @@ -151,23 +151,32 @@ rpy_reverse_db_invoke_callback(_re); \ } -#define RPY_REVDB_CALL_GIL(call_code, byte) \ +#define RPY_REVDB_CALL_GIL_ACQUIRE() \ if (!RPY_RDB_REPLAY) { \ - call_code \ + RPyGilAcquire(); \ _RPY_REVDB_LOCK(); \ - _RPY_REVDB_EMIT_RECORD_L(unsigned char _e, byte) \ + _RPY_REVDB_EMIT_RECORD_L(unsigned char _e, 0xFD) \ _RPY_REVDB_UNLOCK(); \ } \ else { \ unsigned char _re; \ _RPY_REVDB_EMIT_REPLAY(unsigned char _e, _re) \ - if (_re != byte) \ - rpy_reverse_db_bad_acquire_gil(); \ + if (_re != 0xFD) \ + rpy_reverse_db_bad_acquire_gil("acquire"); \ } -#define RPY_REVDB_CALL_GILCTRL(call_code) \ +#define RPY_REVDB_CALL_GIL_RELEASE() \ if (!RPY_RDB_REPLAY) { \ - call_code \ + _RPY_REVDB_LOCK(); \ + _RPY_REVDB_EMIT_RECORD_L(unsigned char _e, 0xFE) \ + _RPY_REVDB_UNLOCK(); \ + RPyGilRelease(); \ + } \ + else { \ + unsigned char _re; \ + _RPY_REVDB_EMIT_REPLAY(unsigned char _e, _re) \ + if (_re != 0xFE) \ + rpy_reverse_db_bad_acquire_gil("release"); \ } #define RPY_REVDB_CALLBACKLOC(locnum) \ @@ -284,7 +293,7 @@ RPY_EXTERN void rpy_reverse_db_invoke_callback(unsigned char); RPY_EXTERN void rpy_reverse_db_callback_loc(int); RPY_EXTERN void rpy_reverse_db_lock_acquire(bool_t lock_contention); -RPY_EXTERN void rpy_reverse_db_bad_acquire_gil(void); +RPY_EXTERN void rpy_reverse_db_bad_acquire_gil(const char *name); RPY_EXTERN void rpy_reverse_db_set_thread_breakpoint(int64_t tnum); RPY_EXTERN double rpy_reverse_db_strtod(RPyString *s); RPY_EXTERN RPyString *rpy_reverse_db_dtoa(double d); From pypy.commits at gmail.com Sat Sep 10 13:12:27 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Sep 2016 10:12:27 -0700 (PDT) Subject: [pypy-commit] pypy reverse-debugger: Fix for Linux 32-bit Message-ID: <57d43efb.03121c0a.b279a.0d96@mx.google.com> Author: Armin Rigo Branch: reverse-debugger Changeset: r86984:9d7562248cdf Date: 2016-09-10 17:07 +0200 http://bitbucket.org/pypy/pypy/changeset/9d7562248cdf/ Log: Fix for Linux 32-bit diff --git a/rpython/translator/revdb/src-revdb/revdb.c b/rpython/translator/revdb/src-revdb/revdb.c --- a/rpython/translator/revdb/src-revdb/revdb.c +++ b/rpython/translator/revdb/src-revdb/revdb.c @@ -848,7 +848,9 @@ ssize_t rsize = read(rpy_rev_fileno, buf + result, count_max - result); if (rsize <= 0) { if (rsize == 0) - fprintf(stderr, "RevDB file appears truncated\n"); + fprintf(stderr, "RevDB file appears truncated (cannot read " + "more after offset %lld)\n", + (long long)lseek(rpy_rev_fileno, 0, SEEK_CUR)); else fprintf(stderr, "RevDB file read error: %m\n"); exit(1); @@ -997,11 +999,11 @@ count = lseek(rpy_rev_fileno, 0, SEEK_CUR); if (count < 0 || - lseek(rpy_rev_fileno, -sizeof(uint64_t), SEEK_END) < 0 || - read(rpy_rev_fileno, &total_stop_points, - sizeof(uint64_t)) != sizeof(uint64_t) || - lseek(rpy_rev_fileno, count, SEEK_SET) != count) { - fprintf(stderr, "%s: %m\n", rpy_rev_filename); + lseek(rpy_rev_fileno, -(off_t)sizeof(uint64_t), SEEK_END) < 0 || + (read_all(&total_stop_points, sizeof(uint64_t)), + lseek(rpy_rev_fileno, count, SEEK_SET)) != count) { + fprintf(stderr, "%s: invalid total_stop_points (%m)\n", + rpy_rev_filename); exit(1); } From pypy.commits at gmail.com Sat Sep 10 15:26:59 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Sep 2016 12:26:59 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: PyPy on FreeBSD is maintained externally on FreshPorts. Thanks DragonSA! Message-ID: <57d45e83.058a1c0a.d2b9e.fae4@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r790:1d67e372fd65 Date: 2016-09-10 21:26 +0200 http://bitbucket.org/pypy/pypy.org/changeset/1d67e372fd65/ Log: PyPy on FreeBSD is maintained externally on FreshPorts. Thanks DragonSA! diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -124,7 +124,7 @@
  • ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring) (see [1] below)
  • ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2, Ubuntu Precise) (see [1] below)
  • Mac OS/X binary (64bit)
  • -
  • FreeBSD 9.2 x86 64 bit (hopefully availabe soon) (see [1] below)
  • +
  • FreeBSD x86 and x86_64: see FreshPorts
  • Windows binary (32bit) (you might need the VS 2008 runtime library installer vcredist_x86.exe.)
  • PowerPC PPC64 Linux binary (64bit big-endian, Fedora 20) (see [1] below)
  • diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -86,7 +86,7 @@ * `ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring)`__ (see ``[1]`` below) * `ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2, Ubuntu Precise)`__ (see ``[1]`` below) * `Mac OS/X binary (64bit)`__ -* FreeBSD 9.2 x86 64 bit *(hopefully availabe soon)* (see ``[1]`` below) +* FreeBSD x86 and x86_64: see FreshPorts_ * `Windows binary (32bit)`__ (you might need the VS 2008 runtime library installer `vcredist_x86.exe`_.) * `PowerPC PPC64 Linux binary (64bit big-endian, Fedora 20)`__ (see ``[1]`` below) @@ -111,6 +111,7 @@ .. _`vcredist_x86.exe`: http://www.microsoft.com/en-us/download/details.aspx?id=5582 .. __: https://bitbucket.org/pypy/pypy/downloads .. _mirror: http://buildbot.pypy.org/mirror/ +.. _FreshPorts: http://www.freshports.org/lang/pypy Python 3.3.5 compatible PyPy3.3 v5.2 ------------------------------------- From pypy.commits at gmail.com Sat Sep 10 15:42:19 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Sep 2016 12:42:19 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <57d4621b.4cc51c0a.57b5f.358d@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r791:91c9256c0b81 Date: 2016-09-10 21:42 +0200 http://bitbucket.org/pypy/pypy.org/changeset/91c9256c0b81/ Log: update the values diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $58969 of $80000 (73.7%) + $58979 of $80000 (73.7%)
    @@ -25,7 +25,7 @@
  • From pypy.commits at gmail.com Sat Sep 10 15:52:15 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Sep 2016 12:52:15 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: Close the STM donation button, as per the January 22 mail on pypy-z. Message-ID: <57d4646f.eeb8c20a.ba0e2.3d04@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r792:bdb3d0de7c45 Date: 2016-09-10 21:52 +0200 http://bitbucket.org/pypy/pypy.org/changeset/bdb3d0de7c45/ Log: Close the STM donation button, as per the January 22 mail on pypy-z. diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -23,7 +23,11 @@
  • -
  • diff --git a/js/script2.js b/js/script2.js --- a/js/script2.js +++ b/js/script2.js @@ -25,5 +25,5 @@ if (location.href.indexOf("py3donate.html") >= 0) f = py3k_donate; else - f = stm_donate; + f = general_donate; $(document).ready(f); From pypy.commits at gmail.com Sat Sep 10 16:04:54 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Sep 2016 13:04:54 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: Close the call for donation Message-ID: <57d46766.11331c0a.74120.4666@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r793:c51b7e6f6b85 Date: 2016-09-10 22:04 +0200 http://bitbucket.org/pypy/pypy.org/changeset/c51b7e6f6b85/ Log: Close the call for donation diff --git a/source/tmdonate2.txt b/source/tmdonate2.txt --- a/source/tmdonate2.txt +++ b/source/tmdonate2.txt @@ -7,6 +7,24 @@ Transactional Memory, 2nd Call ============================== +UPDATE (September 2016): + + **This call for donation is closed. Thank you everybody for + contributing!** + + *We have actually more money in the pot that we can use in the near + future. As it turns out, STM is a hard researchy topic. Remi Meier + is still actively working on this topic as part of his Ph.D. thesis. + Armin Rigo thinks more fundamental work is needed, which will + eventually lead us to a next generation attempt.* + +======================== + +.. raw:: html + +

    + + .. class:: download_menu * `Preamble`_ diff --git a/tmdonate2.html b/tmdonate2.html --- a/tmdonate2.html +++ b/tmdonate2.html @@ -69,7 +69,18 @@

    2nd Call for donations - Transactional Memory in PyPy

    -